From: Kern Sibbald Date: Wed, 12 Dec 2007 20:38:18 +0000 (+0000) Subject: Add first cut reorganization X-Git-Tag: Release-3.0.0~2159 X-Git-Url: https://git.sur5r.net/?p=bacula%2Fdocs;a=commitdiff_plain;h=68c20a3269c5caf3bcf190d52e78135f1b0fcb26 Add first cut reorganization --- diff --git a/docs/Makefile.in b/docs/Makefile.in index 7a7428ac..1e16864b 100644 --- a/docs/Makefile.in +++ b/docs/Makefile.in @@ -10,28 +10,35 @@ BACULASRC = @bacula@ basedir = .. topdir = .. -thisdir = doc +thisdir = docs # # Distribution variables # +en_dirs = manuals/en/catalog manuals/en/concepts manuals/en/console \ + manuals/en/developers manuals/en/install manuals/en/problems \ + manuals/en/utility DIST = Makefile.in #------------------------------------------------------------------------- all: - (cd manual; make) - (cd developers; make) + @for I in ${en_dirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; \ + $(MAKE) $@ || (echo ""; echo ""; echo " ====== Error in `pwd` ======"; \ + echo ""; echo "";)); \ + done (cd bacula-web; make) + @echo "All manuals built ..." -configure: autoconf/configure.in autoconf/aclocal.m4 autoconf/acconfig.h autoconf/config.h.in +configure: autoconf/configure.in autoconf/aclocal.m4 autoconf/acconfig.h cd $(srcdir); ${RMF} -f config.cache config.log config.out config.status src/config.h autoconf --prepend-include=$(srcdir)/autoconf \ - autoconf/configure.in > configure + autoconf/configure.in > configure chmod 755 configure Makefile: Makefile.in @@ -63,10 +70,13 @@ clean: (cd manual-de; make clean) (cd manual-fr; make clean) (cd bacula-web; make clean) + @for I in ${en_dirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1); done + realclean: clean -distclean: +distclean: clean $(RMF) Makefile $(RMF) -r CVS html-manual/CVS home-page/CVS techlogs/CVS $(RMF) -rf autom4te.cache bacula-doc-* config.log config.out @@ -75,6 +85,8 @@ distclean: (cd manual-de; make distclean) (cd manual-fr; make distclean) (cd bacula-web; make distclean) + @for I in ${en_dirs}; \ + do (cd $$I; echo "==>Entering directory `pwd`"; ${MAKE} $@ || exit 1); done devclean: diff --git a/docs/autoconf/configure.in b/docs/autoconf/configure.in index 54044eb4..2e4d616b 100644 --- a/docs/autoconf/configure.in +++ b/docs/autoconf/configure.in @@ -90,6 +90,27 @@ AC_SUBST_FILE(MCOMMON) AC_OUTPUT([ \ autoconf/Make.common \ Makefile \ + manuals/en/catalog/Makefile \ + manuals/en/catalog/update_version \ + manuals/en/catalog/version.tex \ + manuals/en/concepts/Makefile \ + manuals/en/concepts/update_version \ + manuals/en/concepts/version.tex \ + manuals/en/console/Makefile \ + manuals/en/console/update_version \ + manuals/en/console/version.tex \ + manuals/en/developers/Makefile \ + manuals/en/developers/update_version \ + manuals/en/developers/version.tex \ + manuals/en/install/Makefile \ + manuals/en/install/update_version \ + manuals/en/install/version.tex \ + manuals/en/problems/Makefile \ + manuals/en/problems/update_version \ + manuals/en/problems/version.tex \ + manuals/en/utility/Makefile \ + manuals/en/utility/update_version \ + manuals/en/utility/version.tex \ manual/Makefile \ manual/update_version \ manual/version.tex \ @@ -107,7 +128,14 @@ AC_OUTPUT([ \ [ ] ) -chmod 766 manual/update_version +chmod 766 manuals/en/catalog/update_version +chmod 766 manuals/en/concepts/update_version +chmod 766 manuals/en/console/update_version +chmod 766 manuals/en/developers/update_version +chmod 766 manuals/en/install/update_version +chmod 766 manuals/en/problems/update_version +chmod 766 manuals/en/utility/update_version +chmod 766 manuals//update_version chmod 766 manual-fr/update_version chmod 766 manual-de/update_version diff --git a/docs/configure b/docs/configure index 54abe447..6f210d57 100755 --- a/docs/configure +++ b/docs/configure @@ -1768,7 +1768,7 @@ MCOMMON=./autoconf/Make.common - ac_config_files="$ac_config_files autoconf/Make.common Makefile manual/Makefile manual/update_version manual/version.tex manual-de/Makefile manual-de/version.tex manual-de/update_version manual-fr/Makefile manual-fr/version.tex manual-fr/update_version developers/Makefile developers/version.tex bacula-web/Makefile bacula-web/version.tex $PFILES" + ac_config_files="$ac_config_files autoconf/Make.common Makefile manuals/en/catalog/Makefile manuals/en/catalog/update_version manuals/en/catalog/version.tex manuals/en/concepts/Makefile manuals/en/concepts/update_version manuals/en/concepts/version.tex manuals/en/console/Makefile manuals/en/console/update_version manuals/en/console/version.tex manuals/en/developers/Makefile manuals/en/developers/update_version manuals/en/developers/version.tex manuals/en/install/Makefile manuals/en/install/update_version manuals/en/install/version.tex manuals/en/problems/Makefile manuals/en/problems/update_version manuals/en/problems/version.tex manuals/en/utility/Makefile manuals/en/utility/update_version manuals/en/utility/version.tex manual/Makefile manual/update_version manual/version.tex manual-de/Makefile manual-de/version.tex manual-de/update_version manual-fr/Makefile manual-fr/version.tex manual-fr/update_version developers/Makefile developers/version.tex bacula-web/Makefile bacula-web/version.tex $PFILES" ac_config_commands="$ac_config_commands default" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -2326,6 +2326,27 @@ do # Handling of arguments. "autoconf/Make.common" ) CONFIG_FILES="$CONFIG_FILES autoconf/Make.common" ;; "Makefile" ) CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "manuals/en/catalog/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/Makefile" ;; + "manuals/en/catalog/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/update_version" ;; + "manuals/en/catalog/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/catalog/version.tex" ;; + "manuals/en/concepts/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/concepts/Makefile" ;; + "manuals/en/concepts/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/concepts/update_version" ;; + "manuals/en/concepts/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/concepts/version.tex" ;; + "manuals/en/console/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/console/Makefile" ;; + "manuals/en/console/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/console/update_version" ;; + "manuals/en/console/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/console/version.tex" ;; + "manuals/en/developers/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/developers/Makefile" ;; + "manuals/en/developers/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/developers/update_version" ;; + "manuals/en/developers/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/developers/version.tex" ;; + "manuals/en/install/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/install/Makefile" ;; + "manuals/en/install/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/install/update_version" ;; + "manuals/en/install/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/install/version.tex" ;; + "manuals/en/problems/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/problems/Makefile" ;; + "manuals/en/problems/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/problems/update_version" ;; + "manuals/en/problems/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/problems/version.tex" ;; + "manuals/en/utility/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/Makefile" ;; + "manuals/en/utility/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/update_version" ;; + "manuals/en/utility/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/version.tex" ;; "manual/Makefile" ) CONFIG_FILES="$CONFIG_FILES manual/Makefile" ;; "manual/update_version" ) CONFIG_FILES="$CONFIG_FILES manual/update_version" ;; "manual/version.tex" ) CONFIG_FILES="$CONFIG_FILES manual/version.tex" ;; @@ -2832,7 +2853,14 @@ if test "$no_create" != yes; then fi -chmod 766 manual/update_version +chmod 766 manuals/en/catalog/update_version +chmod 766 manuals/en/concepts/update_version +chmod 766 manuals/en/console/update_version +chmod 766 manuals/en/developers/update_version +chmod 766 manuals/en/install/update_version +chmod 766 manuals/en/problems/update_version +chmod 766 manuals/en/utility/update_version +chmod 766 manuals//update_version chmod 766 manual-fr/update_version chmod 766 manual-de/update_version diff --git a/docs/manual/install.tex b/docs/manual/install.tex index 68bb4132..953fa440 100644 --- a/docs/manual/install.tex +++ b/docs/manual/install.tex @@ -772,6 +772,16 @@ $ nm /usr/local/lib/libpq.a | grep mutex security, please modify src/version.h appropriately (it should be obvious when you look at the file). + Running with Batch Insert turned on is recommended because it can + significantly improve attribute insertion times. However, it does + put a significantly larger part of the work on your SQL engine, so + you may need to pay more attention to tuning it. In particular, + Batch Insert can require large temporary table space, and consequently, + the default location (often /tmp) may run out of space causing errors. + For MySQL, the location is set in my.conf with "tmpdir". You may also + want to increase the memory available to your SQL engine to further + improve performance during Batch Inserts. + \item [ {-}{-}enable-gnome ] \index[general]{{-}{-}enable-gnome} If you have GNOME installed on your computer including the diff --git a/docs/manual/postgresql.tex b/docs/manual/postgresql.tex index 835f7732..15be98e9 100644 --- a/docs/manual/postgresql.tex +++ b/docs/manual/postgresql.tex @@ -128,7 +128,8 @@ user). not set UTF8 as your default character set or because you have imported files from elsewhere (e.g. MacOS X). For this reason, Bacula uses SQL\_ASCII as the default encoding. If you want to change this, - please modify the script before running it. + please modify the script before running it, but be forewarned that + Bacula backups will fail if PostgreSQL finds any non-UTF8 sequences. If running the script fails, it is probably because the database is owned by a user other than yourself. On many systems, the database diff --git a/docs/manual/tls.tex b/docs/manual/tls.tex index c5068d1a..6c90e110 100644 --- a/docs/manual/tls.tex +++ b/docs/manual/tls.tex @@ -249,12 +249,25 @@ for bconsole to the Director. TLS Verify Peer = yes # Allow only the Director to connect TLS Allowed CN = "bacula@backup1.example.com" - TLS CA Certificate File = /usr/local/etc/ssl/ca.pem\ + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem # This is a server certificate. It is used by connecting # directors to verify the authenticity of this file daemon TLS Certificate = /usr/local/etc/ssl/server1/cert.pem TLS Key = /usr/local/etc/ssl/server1/key.pem } + + FileDaemon { + Name = backup1-fd + ... + # you need these TLS entries so the SD and FD can + # communicate + TLS Enable = yes + TLS Require = yes + + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + TLS Certificate = /usr/local/etc/ssl/server1/cert.pem + TLS Key = /usr/local/etc/ssl/server1/key.pem +} \end{verbatim} \normalsize diff --git a/docs/manuals/en/catalog/Makefile.in b/docs/manuals/en/catalog/Makefile.in new file mode 100644 index 00000000..7f8c78fa --- /dev/null +++ b/docs/manuals/en/catalog/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=catalog + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Catalog Database Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Catalo*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/catalog/catalog.css b/docs/manuals/en/catalog/catalog.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/catalog/catalog.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/catalog/catalog.tex b/docs/manuals/en/catalog/catalog.tex new file mode 100644 index 00000000..4a6ad9f5 --- /dev/null +++ b/docs/manuals/en/catalog/catalog.tex @@ -0,0 +1,81 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Catalog Database Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{catmaintenance} +\include{mysql} +\include{postgresql} +\include{sqlite} +\include{internaldb} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/en/catalog/catmaintenance.tex b/docs/manuals/en/catalog/catmaintenance.tex new file mode 100644 index 00000000..bbdf013b --- /dev/null +++ b/docs/manuals/en/catalog/catmaintenance.tex @@ -0,0 +1,762 @@ +%% +%% + +\chapter{Catalog Maintenance} +\label{CatMaintenanceChapter} +\index[general]{Maintenance!Catalog } +\index[general]{Catalog Maintenance } + +Without proper setup and maintenance, your Catalog may continue to grow +indefinitely as you run Jobs and backup Files, and/or it may become +very inefficient and slow. How fast the size of your +Catalog grows depends on the number of Jobs you run and how many files they +backup. By deleting records within the database, you can make space available +for the new records that will be added during the next Job. By constantly +deleting old expired records (dates older than the Retention period), your +database size will remain constant. + +If you started with the default configuration files, they already contain +reasonable defaults for a small number of machines (less than 5), so if you +fall into that case, catalog maintenance will not be urgent if you have a few +hundred megabytes of disk space free. Whatever the case may be, some knowledge +of retention periods will be useful. +\label{Retention} + +\section{Setting Retention Periods} +\index[general]{Setting Retention Periods } +\index[general]{Periods!Setting Retention } + +{\bf Bacula} uses three Retention periods: the {\bf File Retention} period, +the {\bf Job Retention} period, and the {\bf Volume Retention} period. Of +these three, the File Retention period is by far the most important in +determining how large your database will become. + +The {\bf File Retention} and the {\bf Job Retention} are specified in each +Client resource as is shown below. The {\bf Volume Retention} period is +specified in the Pool resource, and the details are given in the next chapter +of this manual. + +\begin{description} + +\item [File Retention = \lt{}time-period-specification\gt{}] + \index[general]{File Retention } + The File Retention record defines the length of time that Bacula will keep +File records in the Catalog database. When this time period expires, and if +{\bf AutoPrune} is set to {\bf yes}, Bacula will prune (remove) File records +that are older than the specified File Retention period. The pruning will +occur at the end of a backup Job for the given Client. Note that the Client +database record contains a copy of the File and Job retention periods, but +Bacula uses the current values found in the Director's Client resource to do +the pruning. + +Since File records in the database account for probably 80 percent of the +size of the database, you should carefully determine exactly what File +Retention period you need. Once the File records have been removed from +the database, you will no longer be able to restore individual files +in a Job. However, with Bacula version 1.37 and later, as long as the +Job record still exists, you will be able to restore all files in the +job. + +Retention periods are specified in seconds, but as a convenience, there are +a number of modifiers that permit easy specification in terms of minutes, +hours, days, weeks, months, quarters, or years on the record. See the +\ilink{ Configuration chapter}{Time} of this manual for additional details +of modifier specification. + +The default File retention period is 60 days. + +\item [Job Retention = \lt{}time-period-specification\gt{}] + \index[general]{Job Retention } + The Job Retention record defines the length of time that {\bf Bacula} +will keep Job records in the Catalog database. When this time period +expires, and if {\bf AutoPrune} is set to {\bf yes} Bacula will prune +(remove) Job records that are older than the specified Job Retention +period. Note, if a Job record is selected for pruning, all associated File +and JobMedia records will also be pruned regardless of the File Retention +period set. As a consequence, you normally will set the File retention +period to be less than the Job retention period. + +As mentioned above, once the File records are removed from the database, +you will no longer be able to restore individual files from the Job. +However, as long as the Job record remains in the database, you will be +able to restore all the files backuped for the Job (on version 1.37 and +later). As a consequence, it is generally a good idea to retain the Job +records much longer than the File records. + +The retention period is specified in seconds, but as a convenience, there +are a number of modifiers that permit easy specification in terms of +minutes, hours, days, weeks, months, quarters, or years. See the \ilink{ +Configuration chapter}{Time} of this manual for additional details of +modifier specification. + +The default Job Retention period is 180 days. + +\item [AutoPrune = \lt{}yes/no\gt{}] + \index[general]{AutoPrune } + If AutoPrune is set to {\bf yes} (default), Bacula will automatically apply +the File retention period and the Job retention period for the Client at the +end of the Job. + +If you turn this off by setting it to {\bf no}, your Catalog will grow each +time you run a Job. +\end{description} + +\label{CompactingMySQL} +\section{Compacting Your MySQL Database} +\index[general]{Database!Compacting Your MySQL } +\index[general]{Compacting Your MySQL Database } + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, {\bf MySQL} does not effectively +use the space, and instead continues growing. To avoid this, from time to +time, you must compact your database. Normally, large commercial database such +as Oracle have commands that will compact a database to reclaim wasted file +space. MySQL has the {\bf OPTIMIZE TABLE} command that you can use, and SQLite +version 2.8.4 and greater has the {\bf VACUUM} command. We leave it to you to +explore the utility of the {\bf OPTIMIZE TABLE} command in MySQL. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for MySQL, PostgreSQL and SQLite. + +For a {\bf MySQL} database, you could write the Bacula database as an ASCII +file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +mysqldump -f --opt bacula > bacula.sql +mysql bacula < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, if I cd to the location of the MySQL +Bacula database (typically /opt/mysql/var or something similar) and enter: + +\footnotesize +\begin{verbatim} +du bacula +\end{verbatim} +\normalsize + +I get {\bf 620,644} which means there are that many blocks containing 1024 +bytes each or approximately 635 MB of data. After doing the {\bf mysqldump}, I +had a bacula.sql file that had {\bf 174,356} blocks, and after doing the {\bf +mysql} command to recreate the database, I ended up with a total of {\bf +210,464} blocks rather than the original {\bf 629,644}. In other words, the +compressed version of the database took approximately one third of the space +of the database that had been in use for about a year. + +As a consequence, I suggest you monitor the size of your database and from +time to time (once every six months or year), compress it. + +\label{DatabaseRepair} +\label{RepairingMySQL} +\section{Repairing Your MySQL Database} +\index[general]{Database!Repairing Your MySQL } +\index[general]{Repairing Your MySQL Database } + +If you find that you are getting errors writing to your MySQL database, or +Bacula hangs each time it tries to access the database, you should consider +running MySQL's database check and repair routines. The program you need to +run depends on the type of database indexing you are using. If you are using +the default, you will probably want to use {\bf myisamchk}. For more details +on how to do this, please consult the MySQL document at: +\elink{ +http://www.mysql.com/doc/en/Repair.html} +{http://www.mysql.com/doc/en/Repair.html}. + +If the errors you are getting are simply SQL warnings, then you might try +running dbcheck before (or possibly after) using the MySQL database repair +program. It can clean up many of the orphaned record problems, and certain +other inconsistencies in the Bacula database. + +A typical cause of MySQL database problems is if your partition fills. In +such a case, you will need to create additional space on the partition or +free up some space then repair the database probably using {\bf myisamchk}. +Recently my root partition filled and the MySQL database was corrupted. +Simply running {\bf myisamchk -r} did not fix the problem. However, +the following script did the trick for me: + +\footnotesize +\begin{verbatim} +#!/bin/sh +for i in *.MYD ; do + mv $i x${i} + t=`echo $i | cut -f 1 -d '.' -` + mysql bacula <bacula.db +select * from sqlite_master where type='index' and tbl_name='File'; +\end{verbatim} +\normalsize + +If the indexes are not present, especially the JobId index, you can +create them with the following commands: + +\footnotesize +\begin{verbatim} +mysql bacula +CREATE INDEX file_jobid_idx on File (JobId); +CREATE INDEX file_jfp_idx on File (Job, FilenameId, PathId); +\end{verbatim} +\normalsize + + + +\label{CompactingPostgres} +\section{Compacting Your PostgreSQL Database} +\index[general]{Database!Compacting Your PostgreSQL } +\index[general]{Compacting Your PostgreSQL Database } + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, PostgreSQL has a {\bf VACUUM} +command that will compact your database for you. Alternatively you may want to +use the {\bf vacuumdb} command, which can be run from a cron job. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for PostgreSQL. + +For a {\bf PostgreSQL} database, you could write the Bacula database as an +ASCII file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +pg_dump -c bacula > bacula.sql +cat bacula.sql | psql bacula +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, you can {\bf cd} to the location of +the Bacula database (typically /usr/local/pgsql/data or possible +/var/lib/pgsql/data) and check the size. + +There are certain PostgreSQL users who do not recommend the above +procedure. They have the following to say: +PostgreSQL does not +need to be dumped/restored to keep the database efficient. A normal +process of vacuuming will prevent the database from every getting too +large. If you want to fine-tweak the database storage, commands such +as VACUUM FULL, REINDEX, and CLUSTER exist specifically to keep you +from having to do a dump/restore. + +Finally, you might want to look at the PostgreSQL documentation on +this subject at +\elink{http://www.postgresql.org/docs/8.1/interactive/maintenance.html} +{http://www.postgresql.org/docs/8.1/interactive/maintenance.html}. + +\section{Compacting Your SQLite Database} +\index[general]{Compacting Your SQLite Database } +\index[general]{Database!Compacting Your SQLite } + +First please read the previous section that explains why it is necessary to +compress a database. SQLite version 2.8.4 and greater have the {\bf Vacuum} +command for compacting the database. + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo 'vacuum;' | sqlite bacula.db +\end{verbatim} +\normalsize + +As an alternative, you can use the following commands, adapted to your system: + + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo '.dump' | sqlite bacula.db > bacula.sql +rm -f bacula.db +sqlite bacula.db < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Where {\bf working-directory} is the directory that you specified in the +Director's configuration file. Note, in the case of SQLite, it is necessary to +completely delete (rm) the old database before creating a new compressed +version. + +\section{Migrating from SQLite to MySQL} +\index[general]{MySQL!Migrating from SQLite to } +\index[general]{Migrating from SQLite to MySQL } + +You may begin using Bacula with SQLite then later find that you want to switch +to MySQL for any of a number of reasons: SQLite tends to use more disk than +MySQL; when the database is corrupted it is often more catastrophic than +with MySQL or PostgreSQL. +Several users have succeeded in converting from SQLite to MySQL by +exporting the MySQL data and then processing it with Perl scripts +prior to putting it into MySQL. This is, however, not a simple +process. + +\label{BackingUpBacula} +\section{Backing Up Your Bacula Database} +\index[general]{Backing Up Your Bacula Database } +\index[general]{Database!Backing Up Your Bacula } + +If ever the machine on which your Bacula database crashes, and you need to +restore from backup tapes, one of your first priorities will probably be to +recover the database. Although Bacula will happily backup your catalog +database if it is specified in the FileSet, this is not a very good way to do +it, because the database will be saved while Bacula is modifying it. Thus the +database may be in an instable state. Worse yet, you will backup the database +before all the Bacula updates have been applied. + +To resolve these problems, you need to backup the database after all the backup +jobs have been run. In addition, you will want to make a copy while Bacula is +not modifying it. To do so, you can use two scripts provided in the release +{\bf make\_catalog\_backup} and {\bf delete\_catalog\_backup}. These files +will be automatically generated along with all the other Bacula scripts. The +first script will make an ASCII copy of your Bacula database into {\bf +bacula.sql} in the working directory you specified in your configuration, and +the second will delete the {\bf bacula.sql} file. + +The basic sequence of events to make this work correctly is as follows: + +\begin{itemize} +\item Run all your nightly backups +\item After running your nightly backups, run a Catalog backup Job +\item The Catalog backup job must be scheduled after your last nightly backup + +\item You use {\bf RunBeforeJob} to create the ASCII backup file and {\bf + RunAfterJob} to clean up +\end{itemize} + +Assuming that you start all your nightly backup jobs at 1:05 am (and that they +run one after another), you can do the catalog backup with the following +additional Director configuration statements: + +\footnotesize +\begin{verbatim} +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Type = Backup + Client=rufus-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = DLTDrive + Messages = Standard + Pool = Default + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/home/kern/bacula/bin/make_catalog_backup" + RunAfterJob = "/home/kern/bacula/bin/delete_catalog_backup" + Write Bootstrap = "/home/kern/bacula/working/BackupCatalog.bsr" +} +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup + Run = Level=Full sun-sat at 1:10 +} +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include { + Options { + signature=MD5 + } + File = \lt{}working_directory\gt{}/bacula.sql + } +} +\end{verbatim} +\normalsize + +Be sure to write a bootstrap file as in the above example. However, it is preferable +to write or copy the bootstrap file to another computer. It will allow +you to quickly recover the database backup should that be necessary. If +you do not have a bootstrap file, it is still possible to recover your +database backup, but it will be more work and take longer. + + +\label{BackingUpBaculaSecurityConsiderations} +\section{Security considerations} +\index[general]{Backing Up Your Bacula Database - Security Considerations } +\index[general]{Database!Backing Up Your Bacula Database - Security Considerations } + +We provide make\_catalog\_backup as an example of what can be used to backup +your Bacula database. We expect you to take security precautions relevant +to your situation. make\_catalog\_backup is designed to take a password on +the command line. This is fine on machines with only trusted users. It is +not acceptable on machines without trusted users. Most database systems +provide a alternative method, which does not place the password on the +command line. + +The make\_catalog\_backup script contains some warnings about how to use it. Please +read those tips. + +To help you get started, we know PostgreSQL has a password file, +\elink{ +.pgpass}{http://www.postgresql.org/docs/8.2/static/libpq-pgpass.html}, and +we know MySQL has +\elink{ .my.cnf}{http://dev.mysql.com/doc/refman/4.1/en/password-security.html}. + +Only you can decide what is appropriate for your situation. We have provided +you with a starting point. We hope it helps. + + +\label{BackingUPOtherDBs} +\section{Backing Up Third Party Databases} +\index[general]{Backing Up Third Party Databases } +\index[general]{Databases!Backing Up Third Party } + +If you are running a database in production mode on your machine, Bacula will +happily backup the files, but if the database is in use while Bacula is +reading it, you may back it up in an unstable state. + +The best solution is to shutdown your database before backing it up, or use +some tool specific to your database to make a valid live copy perhaps by +dumping the database in ASCII format. I am not a database expert, so I cannot +provide you advice on how to do this, but if you are unsure about how to +backup your database, you might try visiting the Backup Central site, which +has been renamed Storage Mountain (www.backupcentral.com). In particular, +their +\elink{ Free Backup and Recovery +Software}{http://www.backupcentral.com/toc-free-backup-software.html} page has +links to scripts that show you how to shutdown and backup most major +databases. +\label{Size} + +\section{Database Size} +\index[general]{Size!Database } +\index[general]{Database Size } + +As mentioned above, if you do not do automatic pruning, your Catalog will grow +each time you run a Job. Normally, you should decide how long you want File +records to be maintained in the Catalog and set the {\bf File Retention} +period to that time. Then you can either wait and see how big your Catalog +gets or make a calculation assuming approximately 154 bytes for each File +saved and knowing the number of Files that are saved during each backup and +the number of Clients you backup. + +For example, suppose you do a backup of two systems, each with 100,000 files. +Suppose further that you do a Full backup weekly and an Incremental every day, +and that the Incremental backup typically saves 4,000 files. The size of your +database after a month can roughly be calculated as: + +\footnotesize +\begin{verbatim} + Size = 154 * No. Systems * (100,000 * 4 + 10,000 * 26) +\end{verbatim} +\normalsize + +where we have assumed four weeks in a month and 26 incremental backups per month. +This would give the following: + +\footnotesize +\begin{verbatim} + Size = 154 * 2 * (100,000 * 4 + 10,000 * 26) +or + Size = 308 * (400,000 + 260,000) +or + Size = 203,280,000 bytes +\end{verbatim} +\normalsize + +So for the above two systems, we should expect to have a database size of +approximately 200 Megabytes. Of course, this will vary according to how many +files are actually backed up. + +Below are some statistics for a MySQL database containing Job records for five +Clients beginning September 2001 through May 2002 (8.5 months) and File +records for the last 80 days. (Older File records have been pruned). For these +systems, only the user files and system files that change are backed up. The +core part of the system is assumed to be easily reloaded from the Red Hat rpms. + + +In the list below, the files (corresponding to Bacula Tables) with the +extension .MYD contain the data records whereas files with the extension .MYI +contain indexes. + +You will note that the File records (containing the file attributes) make up +the large bulk of the number of records as well as the space used (459 Mega +Bytes including the indexes). As a consequence, the most important Retention +period will be the {\bf File Retention} period. A quick calculation shows that +for each File that is saved, the database grows by approximately 150 bytes. + +\footnotesize +\begin{verbatim} + Size in + Bytes Records File + ============ ========= =========== + 168 5 Client.MYD + 3,072 Client.MYI + 344,394,684 3,080,191 File.MYD + 115,280,896 File.MYI + 2,590,316 106,902 Filename.MYD + 3,026,944 Filename.MYI + 184 4 FileSet.MYD + 2,048 FileSet.MYI + 49,062 1,326 JobMedia.MYD + 30,720 JobMedia.MYI + 141,752 1,378 Job.MYD + 13,312 Job.MYI + 1,004 11 Media.MYD + 3,072 Media.MYI + 1,299,512 22,233 Path.MYD + 581,632 Path.MYI + 36 1 Pool.MYD + 3,072 Pool.MYI + 5 1 Version.MYD + 1,024 Version.MYI +\end{verbatim} +\normalsize + +This database has a total size of approximately 450 Megabytes. + +If we were using SQLite, the determination of the total database size would be +much easier since it is a single file, but we would have less insight to the +size of the individual tables as we have in this case. + +Note, SQLite databases may be as much as 50\% larger than MySQL databases due +to the fact that all data is stored as ASCII strings. That is even binary +integers are stored as ASCII strings, and this seems to increase the space +needed. diff --git a/docs/manuals/en/catalog/check_tex.pl b/docs/manuals/en/catalog/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/catalog/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/catalog/do_echo b/docs/manuals/en/catalog/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/en/catalog/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/en/catalog/fdl.tex b/docs/manuals/en/catalog/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/en/catalog/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/catalog/fix_tex.pl b/docs/manuals/en/catalog/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/catalog/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/catalog/index.perl b/docs/manuals/en/catalog/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/catalog/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/catalog/internaldb.tex b/docs/manuals/en/catalog/internaldb.tex new file mode 100644 index 00000000..65cd0ea0 --- /dev/null +++ b/docs/manuals/en/catalog/internaldb.tex @@ -0,0 +1,76 @@ +%% +%% + +\chapter{The internal database is not supported, please do not +use it.} +\label{InternalDbChapter} +\index[general]{Use it!The internal database is not supported please +do not } +\index[general]{The internal database is not supported, please do not +use it. } + +\section{Internal Bacula Database} +\index[general]{Internal Bacula Database } +\index[general]{Database!Internal Bacula } + +Previously it was intended to be used primarily by Bacula developers for +testing; although SQLite is also a good choice for this. We do not recommend +its use in general. + +This database is simplistic in that it consists entirely of Bacula's internal +structures appended sequentially to a file. Consequently, it is in most cases +inappropriate for sites with many clients or systems with large numbers of +files, or long-term production environments. + +Below, you will find a table comparing the features available with SQLite and +MySQL and with the internal Bacula database. At the current time, you cannot +dynamically switch from one to the other, but must rebuild the Bacula source +code. If you wish to experiment with both, it is possible to build both +versions of Bacula and install them into separate directories. + +\addcontentsline{lot}{table}{SQLite vs MySQL Database Comparison} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Feature } & \multicolumn{1}{c| }{\bf SQLite or MySQL + } & \multicolumn{1}{c| }{\bf Bacula } \\ + \hline +{Job Record } & {Yes } & {Yes } \\ + \hline +{Media Record } & {Yes } & {Yes } \\ + \hline +{FileName Record } & {Yes } & {No } \\ + \hline +{File Record } & {Yes } & {No } \\ + \hline +{FileSet Record } & {Yes } & {Yes } \\ + \hline +{Pool Record } & {Yes } & {Yes } \\ + \hline +{Client Record } & {Yes } & {Yes } \\ + \hline +{JobMedia Record } & {Yes } & {Yes } \\ + \hline +{List Job Records } & {Yes } & {Yes } \\ + \hline +{List Media Records } & {Yes } & {Yes } \\ + \hline +{List Pool Records } & {Yes } & {Yes } \\ + \hline +{List JobMedia Records } & {Yes } & {Yes } \\ + \hline +{Delete Pool Record } & {Yes } & {Yes } \\ + \hline +{Delete Media Record } & {Yes } & {Yes } \\ + \hline +{Update Pool Record } & {Yes } & {Yes } \\ + \hline +{Implement Verify } & {Yes } & {No } \\ + \hline +{MD5 Signatures } & {Yes } & {No } +\\ \hline + +\end{longtable} + +In addition, since there is no SQL available, the Console commands: {\bf +sqlquery}, {\bf query}, {\bf retention}, and any other command that directly +uses SQL are not available with the Internal database. diff --git a/docs/manuals/en/catalog/latex2html-init.pl b/docs/manuals/en/catalog/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/catalog/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/catalog/mysql.tex b/docs/manuals/en/catalog/mysql.tex new file mode 100644 index 00000000..75cc6f0e --- /dev/null +++ b/docs/manuals/en/catalog/mysql.tex @@ -0,0 +1,286 @@ +%% +%% + +\chapter{Installing and Configuring MySQL} +\label{MySqlChapter} +\index[general]{MySQL!Installing and Configuring } +\index[general]{Installing and Configuring MySQL } + +\section{Installing and Configuring MySQL -- Phase I} +\index[general]{Installing and Configuring MySQL -- Phase I } +\index[general]{Phase I!Installing and Configuring MySQL -- } + +If you use the ./configure \verb:--:with-mysql=mysql-directory statement for +configuring {\bf Bacula}, you will need MySQL version 4.1 or later installed +in the {\bf mysql-directory}. If you are using one of the new modes such as +ANSI/ISO compatibility, you may experience problems. + +If MySQL is installed in the standard system location, you need only enter +{\bf \verb:--:with-mysql} since the configure program will search all the +standard locations. If you install MySQL in your home directory or some +other non-standard directory, you will need to provide the full path to it. + +Installing and Configuring MySQL is not difficult but can be confusing the +first time. As a consequence, below, we list the steps that we used to install +it on our machines. Please note that our configuration leaves MySQL without +any user passwords. This may be an undesirable situation if you have other +users on your system. + +The notes below describe how to build MySQL from the source tar files. If +you have a pre-installed MySQL, you can return to complete the installation +of Bacula, then come back to Phase II of the MySQL installation. If you +wish to install MySQL from rpms, you will probably need to install +the following: + +\footnotesize +\begin{verbatim} +mysql-.rpm +mysql-server-.rpm +mysql-devel-.rpm +\end{verbatim} +\normalsize +The names of the packages may vary from distribution to +distribution. It is important to have the devel package loaded as +it contains the libraries and header files necessary to build +Bacula. There may be additional packages that are required to +install the above, for example, zlib and openssl. + +Once these packages are installed, you will be able to build Bacula (using +the files installed with the mysql package, then run MySQL using the +files installed with mysql-server. If you have installed MySQL by rpms, +please skip Phase I below, and return to complete the installation of +Bacula, then come back to Phase II of the MySQL installation when indicated +to do so. + +Beginning with Bacula version 1.31, the thread safe version of the +MySQL client library is used, and hence you should add the {\bf +\verb:--:enable-thread-safe-client} option to the {\bf +./configure} as shown below: + +\begin{enumerate} +\item Download MySQL source code from + \elink{www.mysql.com/downloads}{http://www.mysql.com/downloads} + +\item Detar it with something like: + + {\bf tar xvfz mysql-filename} + +Note, the above command requires GNU tar. If you do not have GNU tar, a +command such as: + +{\bf zcat mysql-filename | tar xvf - } + +will probably accomplish the same thing. + +\item cd {\bf mysql-source-directory} + + where you replace {\bf mysql-source-directory} with the directory name where + you put the MySQL source code. + +\item ./configure \verb:--:enable-thread-safe-client \verb:--:prefix=mysql-directory + + where you replace {\bf mysql-directory} with the directory name where you + want to install mysql. Normally for system wide use this is /usr/local/mysql. + In my case, I use \~{}kern/mysql. + +\item make + + This takes a bit of time. + +\item make install + + This will put all the necessary binaries, libraries and support files into + the {\bf mysql-directory} that you specified above. + +\item ./scripts/mysql\_install\_db + + This will create the necessary MySQL databases for controlling user access. +Note, this script can also be found in the {\bf bin} directory in the +installation directory + +\end{enumerate} + +The MySQL client library {\bf mysqlclient} requires the gzip compression +library {\bf libz.a} or {\bf libz.so}. If you are using rpm packages, these +libraries are in the {\bf libz-devel} package. On Debian systems, you will +need to load the {\bf zlib1g-dev} package. If you are not using rpms or debs, +you will need to find the appropriate package for your system. + +At this point, you should return to completing the installation of {\bf +Bacula}. Later after Bacula is installed, come back to this chapter to +complete the installation. Please note, the installation files used in the +second phase of the MySQL installation are created during the Bacula +Installation. + +\label{mysql_phase2} +\section{Installing and Configuring MySQL -- Phase II} +\index[general]{Installing and Configuring MySQL -- Phase II } +\index[general]{Phase II!Installing and Configuring MySQL -- } + +At this point, you should have built and installed MySQL, or already have a +running MySQL, and you should have configured, built and installed {\bf +Bacula}. If not, please complete these items before proceeding. + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-mysql=mysql-directory}, where {\bf mysql-directory} is the +directory name that you specified on the ./configure command for configuring +MySQL. This is needed so that Bacula can find the necessary include headers +and library files for interfacing to MySQL. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_mysql\_database. The *\_bacula\_* files are provided for +convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +Now you will create the Bacula MySQL database and the tables that Bacula uses. + + +\begin{enumerate} +\item Start {\bf mysql}. You might want to use the {\bf startmysql} script + provided in the Bacula release. + +\item cd \lt{}install-directory\gt{} + This directory contains the Bacula catalog interface routines. + +\item ./grant\_mysql\_privileges + This script creates unrestricted access rights for the user {\bf bacula}. + You may want to modify it to suit your situation. Please + note that none of the userids, including root, are password protected. + If you need more security, please assign a password to the root user + and to bacula. The program {\bf mysqladmin} can be used for this. + +\item ./create\_mysql\_database + This script creates the MySQL {\bf bacula} database. The databases you + create as well as the access databases will be located in + \lt{}install-dir\gt{}/var/ in a subdirectory with the name of the + database, where \lt{}install-dir\gt{} is the directory name that you + specified on the {\bf \verb:--:prefix} option. This can be important to + know if you want to make a special backup of the Bacula database or to + check its size. + +\item ./make\_mysql\_tables + This script creates the MySQL tables used by {\bf Bacula}. +\end{enumerate} + +Each of the three scripts (grant\_mysql\_privileges, create\_mysql\_database +and make\_mysql\_tables) allows the addition of a command line argument. This +can be useful for specifying the user and or password. For example, you might +need to add {\bf -u root} to the command line to have sufficient privilege to +create the Bacula tables. + +To take a closer look at the access privileges that you have setup with the +above, you can do: + +\footnotesize +\begin{verbatim} +mysql-directory/bin/mysql -u root mysql +select * from user; +\end{verbatim} +\normalsize + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_mysql_tables + ./make_mysql_tables +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. + +\section{Linking Bacula with MySQL} +\index[general]{Linking Bacula with MySQL } +\index[general]{MySQL!Linking Bacula with } +\index[general]{Upgrading} + +After configuring Bacula with + +./configure \verb:--:enable-thread-safe-client \verb:--:prefix=\lt{}mysql-directory\gt{} +where \lt{}mysql-directory\gt{} is in my case {\bf /home/kern/mysql}, you may +have to configure the loader so that it can find the MySQL shared libraries. +If you have previously followed this procedure and later add the {\bf +\verb:--:enable-thread-safe-client} options, you will need to rerun the {\bf +ldconfig} program shown below. If you put MySQL in a standard place such as +{\bf /usr/lib} or {\bf /usr/local/lib} this will not be necessary, but in my +case it is. The description that follows is Linux specific. For other +operating systems, please consult your manuals on how to do the same thing: + +First edit: {\bf /etc/ld.so.conf} and add a new line to the end of the file +with the name of the mysql-directory. In my case, it is: + +/home/kern/mysql/lib/mysql then rebuild the loader's cache with: + +/sbin/ldconfig If you upgrade to a new version of {\bf MySQL}, the shared +library names will probably change, and you must re-run the {\bf +/sbin/ldconfig} command so that the runtime loader can find them. + +Alternatively, your system my have a loader environment variable that can be +set. For example, on a Solaris system where I do not have root permission, I +use: + +LD\_LIBRARY\_PATH=/home/kern/mysql/lib/mysql + +Finally, if you have encryption enabled in MySQL, you may need to add {\bf +-lssl -lcrypto} to the link. In that case, you can either export the +appropriate LDFLAGS definition, or alternatively, you can include them +directly on the ./configure line as in: + +\footnotesize +\begin{verbatim} +LDFLAGS="-lssl -lcyrpto" \ + ./configure \ + +\end{verbatim} +\normalsize + +\section{Installing MySQL from RPMs} +\index[general]{MySQL!Installing from RPMs} +\index[general]{Installing MySQL from RPMs} +If you are installing MySQL from RPMs, you will need to install +both the MySQL binaries and the client libraries. The client +libraries are usually found in a devel package, so you must +install: + +\footnotesize +\begin{verbatim} + mysql + mysql-devel +\end{verbatim} +\normalsize + +This will be the same with most other package managers too. + +\section{Upgrading MySQL} +\index[general]{Upgrading MySQL } +\index[general]{Upgrading!MySQL } +\index[general]{Upgrading} +If you upgrade MySQL, you must reconfigure, rebuild, and re-install +Bacula otherwise you are likely to get bizarre failures. If you +install from rpms and you upgrade MySQL, you must also rebuild Bacula. +You can do so by rebuilding from the source rpm. To do so, you may need +to modify the bacula.spec file to account for the new MySQL version. diff --git a/docs/manuals/en/catalog/postgresql.tex b/docs/manuals/en/catalog/postgresql.tex new file mode 100644 index 00000000..15be98e9 --- /dev/null +++ b/docs/manuals/en/catalog/postgresql.tex @@ -0,0 +1,460 @@ +%% +%% + +\chapter{Installing and Configuring PostgreSQL} +\label{PostgreSqlChapter} +\index[general]{PostgreSQL!Installing and Configuring } +\index[general]{Installing and Configuring PostgreSQL } +\index[general]{Upgrading} + +If you are considering using PostreSQL, you should be aware +of their philosophy of upgrades, which could be +destabilizing for a production shop. Basically at every major version +upgrade, you are required to dump your database in an ASCII format, +do the upgrade, and then reload your database (or databases). This is +because they frequently update the "data format" from version to +version, and they supply no tools to automatically do the conversion. +If you forget to do the ASCII dump, your database may become totally +useless because none of the new tools can access it due to the format +change, and the PostgreSQL server will not be able to start. + +If you are building PostgreSQL from source, please be sure to add +the {\bf \verb:--:enable-thread-safety} option when doing the ./configure +for PostgreSQL. + +\section{Installing PostgreSQL} +\index[general]{PostgreSQL!Installing } + +If you use the {\bf ./configure \verb:--:with-postgresql=PostgreSQL-Directory} +statement for configuring {\bf Bacula}, you will need PostgreSQL version 7.4 +or later installed. NOTE! PostgreSQL versions earlier than 7.4 do not work +with Bacula. If PostgreSQL is installed in the standard system location, you +need only enter {\bf \verb:--:with-postgresql} since the configure program will +search all the standard locations. If you install PostgreSQL in your home +directory or some other non-standard directory, you will need to provide the +full path with the {\bf \verb:--:with-postgresql} option. + +Installing and configuring PostgreSQL is not difficult but can be confusing +the first time. If you prefer, you may want to use a package provided by your +chosen operating system. Binary packages are available on most PostgreSQL +mirrors. + +If you prefer to install from source, we recommend following the instructions +found in the +\elink{PostgreSQL documentation}{http://www.postgresql.org/docs/}. + +If you are using FreeBSD, +\elink{this FreeBSD Diary article}{http://www.freebsddiary.org/postgresql.php} +will be useful. Even if you are not using FreeBSD, the article will contain +useful configuration and setup information. + +If you configure the Batch Insert code in Bacula (attribute inserts are +10 times faster), you {\bf must} be using a PostgreSQL that was built with +the {\bf \verb:--:enable-thread-safety} option, otherwise you will get +data corruption. Most major Linux distros have thread safety turned on, but +it is better to check. One way is to see if the PostgreSQL library that +Bacula will be linked against references pthreads. This can be done +with a command such as: + +\footnotesize +\begin{verbatim} + nm /usr/lib/libpq.a | grep pthread_mutex_lock +\end{verbatim} +\normalsize + +The above command should print a line that looks like: + +\footnotesize +\begin{verbatim} + U pthread_mutex_lock +\end{verbatim} +\normalsize + +if does, then everything is OK. If it prints nothing, do not enable batch +inserts when building Bacula. + +After installing PostgreSQL, you should return to completing the installation +of {\bf Bacula}. Later, after Bacula is installed, come back to this chapter +to complete the installation. Please note, the installation files used in the +second phase of the PostgreSQL installation are created during the Bacula +Installation. You must still come back to complete the second phase of the +PostgreSQL installation even if you installed binaries (e.g. rpm, deb, +...). + + +\label{PostgreSQL_configure} +\section{Configuring PostgreSQL} +\index[general]{PostgreSQL!Configuring PostgreSQL -- } + +At this point, you should have built and installed PostgreSQL, or already have +a running PostgreSQL, and you should have configured, built and installed {\bf +Bacula}. If not, please complete these items before proceeding. + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-postgresql=PostgreSQL-directory}, where {\bf +PostgreSQL-directory} is the directory name that you specified on the +./configure command for configuring PostgreSQL (if you didn't specify a +directory or PostgreSQL is installed in a default location, you do not need to +specify the directory). This is needed so that Bacula can find the necessary +include headers and library files for interfacing to PostgreSQL. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_postgresql\_database. The *\_bacula\_* files are +provided for convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +Now you will create the Bacula PostgreSQL database and the tables that Bacula +uses. These instructions assume that you already have PostgreSQL running. You +will need to perform these steps as a user that is able to create new +databases. This can be the PostgreSQL user (on most systems, this is the pgsql +user). + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} + + This directory contains the Bacula catalog interface routines. + +\item ./create\_bacula\_database + + This script creates the PostgreSQL {\bf bacula} database. + Before running this command, you should carefully think about + what encoding sequence you want for the text fields (paths, files, ...). + Ideally, the encoding should be set to UTF8. However, many Unix systems + have filenames that are not encoded in UTF8, either because you have + not set UTF8 as your default character set or because you have imported + files from elsewhere (e.g. MacOS X). For this reason, Bacula uses + SQL\_ASCII as the default encoding. If you want to change this, + please modify the script before running it, but be forewarned that + Bacula backups will fail if PostgreSQL finds any non-UTF8 sequences. + + If running the script fails, it is probably because the database is + owned by a user other than yourself. On many systems, the database + owner is {\bf pgsql} and on others such as Red Hat and Fedora it is {\bf + postgres}. You can find out which it is by examining your /etc/passwd + file. To create a new user under either your name or with say the name + {\bf bacula}, you can do the following: + +\begin{verbatim} + su + (enter root password) + su pgsql (or postgres) + createuser kern (or perhaps bacula) + Shall the new user be allowed to create databases? (y/n) y + Shall the new user be allowed to create more new users? (y/n) (choose + what you want) + exit +\end{verbatim} + + At this point, you should be able to execute the + ./create\_bacula\_database command. + +\item ./make\_bacula\_tables + + This script creates the PostgreSQL tables used by {\bf Bacula}. +\item ./grant\_bacula\_privileges + + This script creates the database user {\bf bacula} with restricted access +rights. You may want to modify it to suit your situation. Please note that +this database is not password protected. + +\end{enumerate} + +Each of the three scripts (create\_bacula\_database, make\_bacula\_tables, and +grant\_bacula\_privileges) allows the addition of a command line argument. +This can be useful for specifying the user name. For example, you might need +to add {\bf -h hostname} to the command line to specify a remote database +server. + +To take a closer look at the access privileges that you have setup with the +above, you can do: + +\footnotesize +\begin{verbatim} +PostgreSQL-directory/bin/psql --command \\dp bacula +\end{verbatim} +\normalsize + +Also, I had an authorization problem with the password. In the end, +I had to modify my {\bf pg\_hba.conf} file (in /var/lib/pgsql/data on my machine) +from: + +\footnotesize +\begin{verbatim} + local all all ident sameuser +to + local all all trust sameuser +\end{verbatim} +\normalsize + +This solved the problem for me, but it is not always a good thing +to do from a security standpoint. However, it allowed me to run +my regression scripts without having a password. + +A more secure way to perform database authentication is with md5 +password hashes. Begin by editing the {\bf pg\_hba.conf} file, and +just prior the the existing ``local'' and ``host'' lines, add the line: + +\footnotesize +\begin{verbatim} + local bacula bacula md5 +\end{verbatim} +\normalsize + +and restart the Postgres database server (frequently, this can be done +using "/etc/init.d/postgresql restart" or "service postgresql restart") to +put this new authentication rule into effect. + +Next, become the Postgres administrator, postgres, either by logging +on as the postgres user, or by using su to become root and then using +su - postgres to become postgres. Add a password to the bacula +database for the bacula user using: + +\footnotesize +\begin{verbatim} + \$ psql bacula + bacula=# alter user bacula with password 'secret'; + ALTER USER + bacula=# \\q +\end{verbatim} +\normalsize + +You'll have to add this password to two locations in the +bacula-dir.conf file: once to the Catalog resource and once to the +RunBeforeJob entry in the BackupCatalog Job resource. With the +password in place, these two lines should look something like: + +\footnotesize +\begin{verbatim} + dbname = bacula; user = bacula; password = "secret" + ... and ... + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/etc/make_catalog_backup bacula bacula secret" +\end{verbatim} +\normalsize + +Naturally, you should choose your own significantly more random +password, and ensure that the bacula-dir.conf file containing this +password is readable only by the root. + +Even with the files containing the database password properly +restricted, there is still a security problem with this approach: on +some platforms, the environment variable that is used to supply the +password to Postgres is available to all users of the +local system. To eliminate this problem, the Postgres team have +deprecated the use of the environment variable password-passing +mechanism and recommend the use of a .pgpass file instead. To use +this mechanism, create a file named .pgpass containing the single +line: + +\footnotesize +\begin{verbatim} + localhost:5432:bacula:bacula:secret +\end{verbatim} +\normalsize + +This file should be copied into the home directory of all accounts +that will need to gain access to the database: typically, root, +bacula, and any users who will make use of any of the console +programs. The files must then have the owner and group set to match +the user (so root:root for the copy in ~root, and so on), and the mode +set to 600, limiting access to the owner of the file. + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_bacula_tables + ./make_bacula_tables + ./grant_bacula_privileges +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. + +\section{Installing PostgreSQL from RPMs} +\index[general]{PostgreSQL!Installing from RPMs} +\index[general]{Installing PostgreSQL from RPMs} +If you are installing PostgreSQL from RPMs, you will need to install +both the PostgreSQL binaries and the client libraries. The client +libraries are usually found in a devel package, so you must +install: + +\footnotesize +\begin{verbatim} + postgresql + postgresql-devel + postgresql-server + postgresql-libs +\end{verbatim} +\normalsize + +These will be similar with most other package managers too. After +installing from rpms, you will still need to run the scripts that set up +the database and create the tables as described above. + + +\section{Converting from MySQL to PostgreSQL} +\index[general]{PostgreSQL!Converting from MySQL to } +\index[general]{Converting from MySQL to PostgreSQL } + +The conversion procedure presented here was worked out by Norm Dressler +\lt{}ndressler at dinmar dot com\gt{} + +This process was tested using the following software versions: + +\begin{itemize} +\item Linux Mandrake 10/Kernel 2.4.22-10 SMP +\item Mysql Ver 12.21 Distrib 4.0.15, for mandrake-linux-gnu (i586) +\item PostgreSQL 7.3.4 +\item Bacula 1.34.5 + \end{itemize} + +WARNING: Always as a precaution, take a complete backup of your databases +before proceeding with this process! + +\begin{enumerate} +\item Shutdown bacula (cd /etc/bacula;./bacula stop) +\item Run the following command to dump your Mysql database: + + \footnotesize +\begin{verbatim} + mysqldump -f -t -n >bacula-backup.dmp + +\end{verbatim} +\normalsize + +\item Make a backup of your /etc/bacula directory (but leave the original in + place). +\item Go to your Bacula source directory and rebuild it to include PostgreSQL + support rather then Mysql support. Check the config.log file for your + original configure command and replace enable-mysql with enable-postgresql. +\item Recompile Bacula with a make and if everything compiles completely, + perform a make install. +\item Shutdown Mysql. +\item Start PostgreSQL on your system. +\item Create a bacula user in Postgres with the createuser command. Depending on + your Postgres install, you may have to SU to the user who has privileges to + create a user. +\item Verify your pg\_hba.conf file contains sufficient permissions to allow + bacula to access the server. Mine has the following since it's on a secure + network: + +\footnotesize +\begin{verbatim} +local all all trust + +host all all 127.0.0.1 255.255.255.255 trust + +NOTE: you should restart your postgres server if you + made changes + +\end{verbatim} +\normalsize + +\item Change into the /etc/bacula directory and prepare the database and + tables with the following commands: + +\footnotesize +\begin{verbatim} +./create_postgresql_database + +./make_postgresql_tables + +./grant_postgresql_privileges + +\end{verbatim} +\normalsize + +\item Verify you have access to the database: + + \footnotesize +\begin{verbatim} + +psql -Ubacula bacula + +\end{verbatim} +\normalsize + +You should not get any errors. +\item Load your database from the Mysql database dump with: + + \footnotesize +\begin{verbatim} +psql -Ubacula bacula + +\end{verbatim} +\normalsize + +\item Resequence your tables with the following commands: + + \footnotesize +\begin{verbatim} +psql -Ubacula bacula + +SELECT SETVAL('basefiles_baseid_seq', (SELECT +MAX(baseid) FROM basefiles)); +SELECT SETVAL('client_clientid_seq', (SELECT +MAX(clientid) FROM client)); +SELECT SETVAL('file_fileid_seq', (SELECT MAX(fileid) +FROM file)); +SELECT SETVAL('filename_filenameid_seq', (SELECT +MAX(filenameid) FROM filename)); + +SELECT SETVAL('fileset_filesetid_seq', (SELECT +MAX(filesetid) FROM fileset)); + +SELECT SETVAL('job_jobid_seq', (SELECT MAX(jobid) FROM job)); +SELECT SETVAL('jobmedia_jobmediaid_seq', (SELECT +MAX(jobmediaid) FROM jobmedia)); +SELECT SETVAL('media_mediaid_seq', (SELECT MAX(mediaid) FROM media)); +SELECT SETVAL('path_pathid_seq', (SELECT MAX(pathid) FROM path)); + +SELECT SETVAL('pool_poolid_seq', (SELECT MAX(poolid) FROM pool)); + +\end{verbatim} +\normalsize + +\item At this point, start up Bacula, verify your volume library and perform + a test backup to make sure everything is working properly. +\end{enumerate} + +\section{Upgrading PostgreSQL} +\index[general]{Upgrading PostgreSQL } +\index[general]{Upgrading!PostgreSQL } +\index[general]{Upgrading} +If you upgrade PostgreSQL, you must reconfigure, rebuild, and re-install +Bacula otherwise you are likely to get bizarre failures. If you +to modify the bacula.spec file to account for the new PostgreSQL version. +You can do so by rebuilding from the source rpm. To do so, you may need +install from rpms and you upgrade PostgreSQL, you must also rebuild Bacula. + + +\section{Credits} +\index[general]{Credits } +Many thanks to Dan Langille for writing the PostgreSQL driver. This will +surely become the most popular database that Bacula supports. diff --git a/docs/manuals/en/catalog/setup.sm b/docs/manuals/en/catalog/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/catalog/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/catalog/sqlite.tex b/docs/manuals/en/catalog/sqlite.tex new file mode 100644 index 00000000..a5ef8790 --- /dev/null +++ b/docs/manuals/en/catalog/sqlite.tex @@ -0,0 +1,168 @@ +%% +%% + +\chapter{Installing and Configuring SQLite} +\label{SqlLiteChapter} +\index[general]{Installing and Configuring SQLite } +\index[general]{SQLite!Installing and Configuring } + +Please note that SQLite both versions 2 and 3 are not network enabled, +which means that they must be linked into the Director rather than accessed +by the network as MySQL and PostgreSQL are. This has two consequences: +\begin{enumerate} +\item SQLite cannot be used in the {\bf bweb} web GUI package. +\item If you use SQLite, and your Storage daemon is not on the same +machine as your Director, you will need to transfer your database +to the Storage daemon's machine before you can use any of the SD tools +such as {\bf bscan}, ... +\end{enumerate} + +\section{Installing and Configuring SQLite -- Phase I} +\index[general]{Phase I!Installing and Configuring SQLite -- } +\index[general]{Installing and Configuring SQLite -- Phase I } + +If you use the {\bf ./configure \verb:--:with-sqlite} statement for configuring {\bf +Bacula}, you will need SQLite version 2.8.16 or later installed. Our standard +location (for the moment) for SQLite is in the dependency package {\bf +depkgs/sqlite-2.8.16}. Please note that the version will be updated as new +versions are available and tested. + +Installing and Configuring is quite easy. + +\begin{enumerate} +\item Download the Bacula dependency packages +\item Detar it with something like: + + {\bf tar xvfz depkgs.tar.gz} + + Note, the above command requires GNU tar. If you do not have GNU tar, a + command such as: + + {\bf zcat depkgs.tar.gz | tar xvf -} + + will probably accomplish the same thing. + +\item {\bf cd depkgs} + +\item {\bf make sqlite} + +\end{enumerate} + + +Please note that the {\bf ./configure} used to build {\bf Bacula} will need to +include {\bf \verb:--:with-sqlite} or {\bf \verb:--:with-sqlite3} depending +one which version of SQLite you are using. You should not use the {\bf +\verb:--:enable-batch-insert} configuration parameter for Bacula if you +are using SQLite version 2 as it is probably not thread safe. If you +are using SQLite version 3, you may use the {\bf \verb:--:enable-batch-insert} +configuration option with Bacula, but when building SQLite3 you MUST +configure it with {\bf \verb:--:enable-threadsafe} and +{\bf \verb:--:enable-cross-thread-connections}. + +By default, SQLite3 is now run with {\bf PRAGMA synchronous=OFF} this +increases the speed by more than 30 time, but it also increases the +possibility of a corrupted database if your server crashes (power failure +or kernel bug). If you want more security, you can change the PRAGMA +that is used in the file src/version.h. + + +At this point, you should return to completing the installation of {\bf +Bacula}. + + +\section{Installing and Configuring SQLite -- Phase II} +\label{phase2} +\index[general]{Phase II!Installing and Configuring SQLite -- } +\index[general]{Installing and Configuring SQLite -- Phase II } + +This phase is done {\bf after} you have run the {\bf ./configure} command to +configure {\bf Bacula}. + +{\bf Bacula} will install scripts for manipulating the database (create, +delete, make tables etc) into the main installation directory. These files +will be of the form *\_bacula\_* (e.g. create\_bacula\_database). These files +are also available in the \lt{}bacula-src\gt{}/src/cats directory after +running ./configure. If you inspect create\_bacula\_database, you will see +that it calls create\_sqlite\_database. The *\_bacula\_* files are provided +for convenience. It doesn't matter what database you have chosen; +create\_bacula\_database will always create your database. + +At this point, you can create the SQLite database and tables: + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} + + This directory contains the Bacula catalog interface routines. + +\item ./make\_sqlite\_tables + + This script creates the SQLite database as well as the tables used by {\bf + Bacula}. This script will be automatically setup by the {\bf ./configure} + program to create a database named {\bf bacula.db} in {\bf Bacula's} working + directory. +\end{enumerate} + +\section{Linking Bacula with SQLite} +\index[general]{SQLite!Linking Bacula with } +\index[general]{Linking Bacula with SQLite } + +If you have followed the above steps, this will all happen automatically and +the SQLite libraries will be linked into {\bf Bacula}. + +\section{Testing SQLite} +\index[general]{SQLite!Testing } +\index[general]{Testing SQLite } + +We have much less "production" experience using SQLite than using MySQL. +SQLite has performed flawlessly for us in all our testing. However, +several users have reported corrupted databases while using SQLite. For +that reason, we do not recommend it for production use. + +If Bacula crashes with the following type of error when it is started: +\footnotesize +\begin{verbatim} +Using default Catalog name=MyCatalog DB=bacula +Could not open database "bacula". +sqlite.c:151 Unable to open Database=/var/lib/bacula/bacula.db. +ERR=malformed database schema - unable to open a temporary database file +for storing temporary tables +\end{verbatim} +\normalsize + +this is most likely caused by the fact that some versions of +SQLite attempt to create a temporary file in the current directory. +If that fails, because Bacula does not have write permission on +the current directory, then you may get this errr. The solution is +to start Bacula in a current directory where it has write permission. + + +\section{Re-initializing the Catalog Database} +\index[general]{Database!Re-initializing the Catalog } +\index[general]{Re-initializing the Catalog Database } + +After you have done some initial testing with {\bf Bacula}, you will probably +want to re-initialize the catalog database and throw away all the test Jobs +that you ran. To do so, you can do the following: + +\footnotesize +\begin{verbatim} + cd + ./drop_sqlite_tables + ./make_sqlite_tables +\end{verbatim} +\normalsize + +Please note that all information in the database will be lost and you will be +starting from scratch. If you have written on any Volumes, you must write an +end of file mark on the volume so that Bacula can reuse it. Do so with: + +\footnotesize +\begin{verbatim} + (stop Bacula or unmount the drive) + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +Where you should replace {\bf /dev/nst0} with the appropriate tape drive +device name for your machine. diff --git a/docs/manuals/en/catalog/translate_images.pl b/docs/manuals/en/catalog/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/catalog/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/catalog/update_version b/docs/manuals/en/catalog/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/catalog/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/catalog/update_version.in b/docs/manuals/en/catalog/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/catalog/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/catalog/version.tex.in b/docs/manuals/en/catalog/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/catalog/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/concepts/Makefile.in b/docs/manuals/en/concepts/Makefile.in new file mode 100644 index 00000000..61b86ed0 --- /dev/null +++ b/docs/manuals/en/concepts/Makefile.in @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=concepts + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Concepts and Overview Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Concep_Overvi_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/concepts/STYLE b/docs/manuals/en/concepts/STYLE new file mode 100644 index 00000000..6cd70564 --- /dev/null +++ b/docs/manuals/en/concepts/STYLE @@ -0,0 +1,76 @@ +TODO + +maybe spell out "config" to "configuration" as appropriate + +Use American versus British spelling + +not critical, but for later consider cleaning out some use of +"there" and rewrite to not be so passive. + +make sure use of \elink shows URL in printed book + +get rid of many references of "Red Hat" -- too platform specific? + +remove references to names, like "Dan Langille shared ..." +just put their names in credits for book + +don't refer to very old software by specific version such as +"Red Hat 7" or FreeBSD 4.9 because is too old to put in book. It may be +relevant, but may be confusing. Maybe just remove the version number +if applicable. + +maybe fine, but discuss point-of-view: don't use personal "I" or +possessive "my" unless that is consistent style for book. + +replace "32 bit" and "64 bit" with "32-bit" and "64-bit" respectively. +It seems like more popular style standard + +be consistent with "Note" and "NOTE". maybe use tex header for this + +get rid of redundant or noisy exclamation marks + +style for "ctl-alt-del" and "ctl-d"? and be consisten with formatting + +be consistent for case for ext3, ext2, EXT3, or EXT2. + +fix spelling of "inspite" in source and in docs (maybe use "regardless +in one place where I already changed to "in spite" + +be consistent with software names, like postgres, postgresql, PostreSQL +and others + +instead of using whitehouse for examples, use example.org (as that is defined +for that usage); also check other hostnames and maybe IPs and networks + +use section numbers and cross reference by section number or page number +no underlining in book (this is not the web :) + +some big gaps between paragraphs or between section headers and paragraphs +-- due to tex -- adjust as necessary to look nice + +don't include the GPL and LGPL in book. This will save 19 (A4) pages. +For 6x9 book this will save 30 pages. (Keep GFDL though.) + +many index items are too long + +appendices not listed as appendix + +some how consolidate indexes into one? on 6x9, the indexes are over 30 pages + +don't refer to some website without including URL also +(such as "this FreeBSD Diary article") + +get rid of (R) trademark symbols -- only use on first use; for example +don't put on the RPM Packaging FAQ + +split up very long paragraphs, such as "As mentioned above, you will need ..." +(on my page 783). + +use smaller font or split up long lines (especially from +console output which is wider than printed page) + +don't assume all BSD is "FreeBSD" + +don't assume all "kernel" is Linux. If it is Linux, be clear. + + diff --git a/docs/manuals/en/concepts/ansi-labels.tex b/docs/manuals/en/concepts/ansi-labels.tex new file mode 100644 index 00000000..7d6e14fe --- /dev/null +++ b/docs/manuals/en/concepts/ansi-labels.tex @@ -0,0 +1,58 @@ + +\chapter{ANSI and IBM Tape Labels} +\label{AnsiLabelsChapter} +\index[general]{ANSI and IBM Tape Labels} +\index[general]{Labels!Tape} + +Bacula supports ANSI or IBM tape labels as long as you +enable it. In fact, with the proper configuration, you can +force Bacula to require ANSI or IBM labels. + +Bacula can create an ANSI or IBM label, but if Check Labels is +enabled (see below), Bacula will look for an existing label, and +if it is found, it will keep the label. Consequently, you +can label the tapes with programs other than Bacula, and Bacula +will recognize and support them. + +Even though Bacula will recognize and write ANSI and IBM labels, +it always writes its own tape labels as well. + +When using ANSI or IBM tape labeling, you must restrict your Volume +names to a maximum of six characters. + +If you have labeled your Volumes outside of Bacula, then the +ANSI/IBM label will be recognized by Bacula only if you have created +the HDR1 label with {\bf BACULA.DATA} in the Filename field (starting +with character 5). If Bacula writes the labels, it will use +this information to recognize the tape as a Bacula tape. This allows +ANSI/IBM labeled tapes to be used at sites with multiple machines +and multiple backup programs. + + +\section{Director Pool Directive} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + This directive is implemented in the Director Pool resource and in the SD Device + resource. If it is specified in the SD Device resource, it will take + precedence over the value passed from the Director to the SD. The default + is Label Type = Bacula. +\end{description} + +\section{Storage Daemon Device Directives} + +\begin{description} +\item [ Label Type = ANSI | IBM | Bacula] + This directive is implemented in the Director Pool resource and in the SD Device + resource. If it is specified in the the SD Device resource, it will take + precedence over the value passed from the Director to the SD. + +\item [Check Labels = yes | no] + This directive is implemented in the the SD Device resource. If you intend + to read ANSI or IBM labels, this *must* be set. Even if the volume is + not ANSI labeled, you can set this to yes, and Bacula will check the + label type. Without this directive set to yes, Bacula will assume that + labels are of Bacula type and will not check for ANSI or IBM labels. + In other words, if there is a possibility of Bacula encountering an + ANSI/IBM label, you must set this to yes. +\end{description} diff --git a/docs/manuals/en/concepts/autochangerres.tex b/docs/manuals/en/concepts/autochangerres.tex new file mode 100644 index 00000000..98563c77 --- /dev/null +++ b/docs/manuals/en/concepts/autochangerres.tex @@ -0,0 +1,107 @@ +%% +\chapter{Autochanger Resource} +\index[sd]{Autochanger Resource} +\index[sd]{Resource!Autochanger} + +The Autochanger resource supports single or multiple drive +autochangers by grouping one or more Device resources +into one unit called an autochanger in Bacula (often referred to +as a "tape library" by autochanger manufacturers). + +If you have an Autochanger, and you want it to function correctly, +you {\bf must} have an Autochanger resource in your Storage +conf file, and your Director's Storage directives that want to +use an Autochanger {\bf must} refer to the Autochanger resource name. +In previous versions of Bacula, the Director's Storage directives +referred directly to Device resources that were autochangers. +In version 1.38.0 and later, referring directly to Device resources +will not work for Autochangers. + +\begin{description} +\item [Name = \lt{}Autochanger-Name\gt{}] + \index[sd]{Name} + Specifies the Name of the Autochanger. This name is used in the + Director's Storage definition to refer to the autochanger. This + directive is required. + +\item [Device = \lt{}Device-name1, device-name2, ...\gt{}] + Specifies the names of the Device resource or resources that correspond + to the autochanger drive. If you have a multiple drive autochanger, you + must specify multiple Device names, each one referring to a separate + Device resource that contains a Drive Index specification that + corresponds to the drive number base zero. You may specify multiple + device names on a single line separated by commas, and/or you may + specify multiple Device directives. This directive is required. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + The specified {\bf name-string} gives the system file name of the autochanger + device name. If specified in this resource, the Changer Device name + is not needed in the Device resource. If it is specified in the Device + resource (see above), it will take precedence over one specified in + the Autochanger resource. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command } + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows. + If it is specified here, it need not be specified in the Device + resource. If it is also specified in the Device resource, it will take + precedence over the one specified in the Autochanger resource. + +\end{description} + +The following is an example of a valid Autochanger resource definition: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "DDS-4-changer" + Device = DDS-4-1, DDS-4-2, DDS-4-3 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} +Device { + Name = "DDS-4-1" + Drive Index = 0 + Autochanger = yes + ... +} +Device { + Name = "DDS-4-2" + Drive Index = 1 + Autochanger = yes + ... +Device { + Name = "DDS-4-3" + Drive Index = 2 + Autochanger = yes + Autoselect = no + ... +} +\end{verbatim} +\normalsize + +Please note that it is important to include the {\bf Autochanger = yes} directive +in each Device definition that belongs to an Autochanger. A device definition +should not belong to more than one Autochanger resource. Also, your Device +directive in the Storage resource of the Director's conf file should have +the Autochanger's resource name rather than a name of one of the Devices. + +If you have a drive that physically belongs to an Autochanger but you don't want +to have it automatically used when Bacula references the Autochanger for backups, +for example, you want to reserve it for restores, you can add the directive: + +\footnotesize +\begin{verbatim} +Autoselect = no +\end{verbatim} +\normalsize + +to the Device resource for that drive. In that case, Bacula will not automatically +select that drive when accessing the Autochanger. You can, still use the drive +by referencing it by the Device name directly rather than the Autochanger name. An example +of such a definition is shown above for the Device DDS-4-3, which will not be +selected when the name DDS-4-changer is used in a Storage definition, but will +be used if DDS-4-3 is used. diff --git a/docs/manuals/en/concepts/autochangers.tex b/docs/manuals/en/concepts/autochangers.tex new file mode 100644 index 00000000..154306c4 --- /dev/null +++ b/docs/manuals/en/concepts/autochangers.tex @@ -0,0 +1,981 @@ +%% +%% + +\chapter{Autochanger Support} +\label{AutochangersChapter} +\index[general]{Support!Autochanger } +\index[general]{Autochanger Support } + +Bacula provides autochanger support for reading and writing tapes. In +order to work with an autochanger, Bacula requires a number of things, each of +which is explained in more detail after this list: + +\begin{itemize} +\item A script that actually controls the autochanger according to commands + sent by Bacula. We furnish such a script that works with {\bf mtx} found in + the {\bf depkgs} distribution. + +\item That each Volume (tape) to be used must be defined in the Catalog and + have a Slot number assigned to it so that Bacula knows where the Volume is in + the autochanger. This is generally done with the {\bf label} command, + but can also done after the tape is labeled using the {\bf update slots} + command. See + below for more details. You must pre-label the tapes manually before + using them. + +\item Modifications to your Storage daemon's Device configuration resource to + identify that the device is a changer, as well as a few other parameters. + +\item You should also modify your Storage resource definition in the + Director's configuration file so that you are automatically prompted for the + Slot when labeling a Volume. + +\item You need to ensure that your Storage daemon (if not running as root) + has access permissions to both the tape drive and the control device. + +\item You need to have {\bf Autochanger = yes} in your Storage resource + in your bacula-dir.conf file so that you will be prompted for the + slot number when you label Volumes. +\end{itemize} + +In version 1.37 and later, there is a new \ilink{Autochanger +resource}{AutochangerRes} that permits you to group Device resources thus +creating a multi-drive autochanger. If you have an autochanger, +you {\bf must} use this new resource. + +Bacula uses its own {\bf mtx-changer} script to interface with a program +that actually does the tape changing. Thus in principle, {\bf mtx-changer} +can be adapted to function with any autochanger program, or you can +call any other script or program. The current +version of {\bf mtx-changer} works with the {\bf mtx} program. However, +FreeBSD users have provided a script in the {\bf examples/autochangers} +directory that allows Bacula to use the {\bf chio} program. + +Bacula also supports autochangers with barcode +readers. This support includes two Console commands: {\bf label barcodes} +and {\bf update slots}. For more details on these commands, see the "Barcode +Support" section below. + +Current Bacula autochanger support does not include cleaning, stackers, or +silos. Stackers and silos are not supported because Bacula expects to +be able to access the Slots randomly. +However, if you are very careful to setup Bacula to access the Volumes +in the autochanger sequentially, you may be able to make Bacula +work with stackers (gravity feed and such). + +Support for multi-drive +autochangers requires the \ilink{Autochanger resource}{AutochangerRes} +introduced in version 1.37. This resource is also recommended for single +drive autochangers. + +In principle, if {\bf mtx} will operate your changer correctly, then it is +just a question of adapting the {\bf mtx-changer} script (or selecting one +already adapted) for proper interfacing. You can find a list of autochangers +supported by {\bf mtx} at the following link: +\elink{http://mtx.opensource-sw.net/compatibility.php} +{http://mtx.opensource-sw.net/compatibility.php}. +The home page for the {\bf mtx} project can be found at: +\elink{http://mtx.opensource-sw.net/}{http://mtx.opensource-sw.net/}. + +Note, we have feedback from some users that there are certain +incompatibilities between the Linux kernel and mtx. For example between +kernel 2.6.18-8.1.8.el5 of CentOS and RedHat and version 1.3.10 and 1.3.11 +of mtx. This was fixed by upgrading to a version 2.6.22 kernel. + +In addition, apparently certain versions of mtx, for example, version +1.3.11 limit the number of slots to a maximum of 64. The solution was to +use version 1.3.10. + +If you are having troubles, please use the {\bf auto} command in the {\bf +btape} program to test the functioning of your autochanger with Bacula. When +Bacula is running, please remember that for many distributions (e.g. FreeBSD, +Debian, ...) the Storage daemon runs as {\bf bacula.tape} rather than {\bf +root.root}, so you will need to ensure that the Storage daemon has sufficient +permissions to access the autochanger. + +Some users have reported that the the Storage daemon blocks under certain +circumstances in trying to mount a volume on a drive that has a different +volume loaded. As best we can determine, this is simply a matter of +waiting a bit. The drive was previously in use writing a Volume, and +sometimes the drive will remain BLOCKED for a good deal of time (up to 7 +minutes on a slow drive) waiting for the cassette to rewind and to unload +before the drive can be used with a different Volume. + +\label{SCSI devices} +\section{Knowing What SCSI Devices You Have} +\index[general]{Have!Knowing What SCSI Devices You } +\index[general]{Knowing What SCSI Devices You Have } +\index[general]{SCSI devices} +\index[general]{devices!SCSI} + +Under Linux, you can + +\footnotesize +\begin{verbatim} +cat /proc/scsi/scsi +\end{verbatim} +\normalsize + +to see what SCSI devices you have available. You can also: + +\footnotesize +\begin{verbatim} +cat /proc/scsi/sg/device_hdr /proc/scsi/sg/devices +\end{verbatim} +\normalsize + +to find out how to specify their control address ({\bf /dev/sg0} for the +first, {\bf /dev/sg1} for the second, ...) on the {\bf Changer Device = } +Bacula directive. + +For more detailed information on what SCSI devices you have please see +the \ilink{Linux SCSI Tricks}{SCSITricks} section of the Tape Testing +chapter of this manual. + +Under FreeBSD, you can use: + +\footnotesize +\begin{verbatim} +camcontrol devlist +\end{verbatim} +\normalsize + +To list the SCSI devices as well as the {\bf /dev/passn} that you will use on +the Bacula {\bf Changer Device = } directive. + +Please check that your Storage daemon has permission to access this +device. + +The following tip for FreeBSD users comes from Danny Butroyd: +on reboot Bacula will NOT have permission to +control the device /dev/pass0 (assuming this is your changer device). +To get around this just edit the /etc/devfs.conf file and add the +following to the bottom: +\footnotesize +\begin{verbatim} +own pass0 root:bacula +perm pass0 0666 +own nsa0.0 root:bacula +perm nsa0.0 0666 +\end{verbatim} +\normalsize + +This gives the bacula group permission to write to the nsa0.0 device +too just to be on the safe side. To bring these changes into effect +just run:- + +/etc/rc.d/devfs restart + +Basically this will stop you having to manually change permissions on these +devices to make Bacula work when operating the AutoChanger after a reboot. + +\label{scripts} +\section{Example Scripts} +\index[general]{Scripts!Example } +\index[general]{Example Scripts } + +Please read the sections below so that you understand how autochangers work +with Bacula. Although we supply a default {\bf mtx-changer} script, your +autochanger may require some additional changes. If you want to see examples +of configuration files and scripts, please look in the {\bf +\lt{}bacula-src\gt{}/examples/devices} directory where you will find an +example {\bf HP-autoloader.conf} Bacula Device resource, and several {\bf +mtx-changer} scripts that have been modified to work with different +autochangers. + +\label{Slots} + +\section{Slots} +\index[general]{Slots } + +To properly address autochangers, Bacula must know which Volume is in each +{\bf slot} of the autochanger. Slots are where the changer cartridges reside +when not loaded into the drive. Bacula numbers these slots from one to the +number of cartridges contained in the autochanger. + +Bacula will not automatically use a Volume in your autochanger unless it is +labeled and the slot number is stored in the catalog and the Volume is marked +as InChanger. This is because it must know where each volume is (slot) to +be able to load the volume. +For each Volume in your +changer, you will, using the Console program, assign a slot. This information +is kept in {\bf Bacula's} catalog database along with the other data for the +volume. If no slot is given, or the slot is set to zero, Bacula will not +attempt to use the autochanger even if all the necessary configuration records +are present. When doing a {\bf mount} command on an autochanger, you must +specify which slot you want mounted. If the drive is loaded with a tape +from another slot, it will unload it and load the correct tape, but +normally, no tape will be loaded because an {\bf unmount} command causes +Bacula to unload the tape in the drive. + + +You can check if the Slot number and InChanger flag are set by doing a: +\begin{verbatim} +list Volumes +\end{verbatim} + +in the Console program. + +\label{mult} +\section{Multiple Devices} +\index[general]{Devices!Multiple} +\index[general]{Multiple Devices} + +Some autochangers have more than one read/write device (drive). The +new \ilink{Autochanger resource}{AutochangerRes} introduced in version +1.37 permits you to group Device resources, where each device +represents a drive. The Director may still reference the Devices (drives) +directly, but doing so, bypasses the proper functioning of the +drives together. Instead, the Director (in the Storage resource) +should reference the Autochanger resource name. Doing so permits +the Storage daemon to ensure that only one drive uses the mtx-changer +script at a time, and also that two drives don't reference the +same Volume. + +Multi-drive requires the use of the {\bf +Drive Index} directive in the Device resource of the Storage daemon's +configuration file. Drive numbers or the Device Index are numbered beginning +at zero, which is the default. To use the second Drive in an autochanger, you +need to define a second Device resource and set the Drive Index to 1 for +that device. In general, the second device will have the same {\bf Changer +Device} (control channel) as the first drive, but a different {\bf Archive +Device}. + +As a default, Bacula jobs will prefer to write to a Volume that is +already mounted. If you have a multiple drive autochanger and you want +Bacula to write to more than one Volume in the same Pool at the same +time, you will need to set \ilink{Prefer Mounted Volumes} {PreferMountedVolumes} +in the Directors Job resource to {\bf no}. This will cause +the Storage daemon to maximize the use of drives. + + +\label{ConfigRecords} +\section{Device Configuration Records} +\index[general]{Records!Device Configuration } +\index[general]{Device Configuration Records } + +Configuration of autochangers within Bacula is done in the Device resource of +the Storage daemon. Four records: {\bf Autochanger}, {\bf Changer Device}, +{\bf Changer Command}, and {\bf Maximum Changer Wait} control how Bacula uses +the autochanger. + +These four records, permitted in {\bf Device} resources, are described in +detail below. Note, however, that the {\bf Changer Device} and the +{\bf Changer Command} directives are not needed in the Device resource +if they are present in the {\bf Autochanger} resource. + +\begin{description} + +\item [Autochanger = {\it Yes|No} ] + \index[sd]{Autochanger } + The {\bf Autochanger} record specifies that the current device is or is not +an autochanger. The default is {\bf no}. + +\item [Changer Device = \lt{}device-name\gt{}] + \index[sd]{Changer Device } + In addition to the Archive Device name, you must specify a {\bf Changer +Device} name. This is because most autochangers are controlled through a +different device than is used for reading and writing the cartridges. For +example, on Linux, one normally uses the generic SCSI interface for +controlling the autochanger, but the standard SCSI interface for reading and +writing the tapes. On Linux, for the {\bf Archive Device = /dev/nst0}, you +would typically have {\bf Changer Device = /dev/sg0}. Note, some of the more +advanced autochangers will locate the changer device on {\bf /dev/sg1}. Such +devices typically have several drives and a large number of tapes. + +On FreeBSD systems, the changer device will typically be on {\bf /dev/pass0} +through {\bf /dev/passn}. + +On Solaris, the changer device will typically be some file under {\bf +/dev/rdsk}. + +Please ensure that your Storage daemon has permission to access this +device. + +\item [Changer Command = \lt{}command\gt{}] + \index[sd]{Changer Command } + This record is used to specify the external program to call and what +arguments to pass to it. The command is assumed to be a standard program or +shell script that can be executed by the operating system. This command is +invoked each time that Bacula wishes to manipulate the autochanger. The +following substitutions are made in the {\bf command} before it is sent to +the operating system for execution: + +\footnotesize +\begin{verbatim} + %% = % + %a = archive device name + %c = changer device name + %d = changer drive index base 0 + %f = Client's name + %j = Job name + %o = command (loaded, load, or unload) + %s = Slot base 0 + %S = Slot base 1 + %v = Volume name +\end{verbatim} +\normalsize + +An actual example for using {\bf mtx} with the {\bf mtx-changer} script (part +of the Bacula distribution) is: + +\footnotesize +\begin{verbatim} +Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +\end{verbatim} +\normalsize + +Where you will need to adapt the {\bf /etc/bacula} to be the actual path on +your system where the mtx-changer script resides. Details of the three +commands currently used by Bacula (loaded, load, unload) as well as the +output expected by Bacula are give in the {\bf Bacula Autochanger Interface} +section below. + +\item [Maximum Changer Wait = \lt{}time\gt{}] + \index[sd]{Maximum Changer Wait } + This record is used to define the maximum amount of time that Bacula + will wait for an autoloader to respond to a command (e.g. load). The + default is set to 120 seconds. If you have a slow autoloader you may + want to set it longer. + +If the autoloader program fails to respond in this time, it will be killed +and Bacula will request operator intervention. + +\item [Drive Index = \lt{}number\gt{}] + \index[sd]{Drive Index } + This record allows you to tell Bacula to use the second or subsequent + drive in an autochanger with multiple drives. Since the drives are + numbered from zero, the second drive is defined by + +\footnotesize +\begin{verbatim} +Device Index = 1 + +\end{verbatim} +\normalsize + +To use the second drive, you need a second Device resource definition in the +Bacula configuration file. See the Multiple Drive section above in this +chapter for more information. +\end{description} + +In addition, for proper functioning of the Autochanger, you must +define an Autochanger resource. +\input{autochangerres} + +\label{example} +\section{An Example Configuration File} +\index[general]{Example Configuration File } +\index[general]{File!Example Configuration } + +The following two resources implement an autochanger: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "Autochanger" + Device = DDS-4 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} + +Device { + Name = DDS-4 + Media Type = DDS-4 + Archive Device = /dev/nst0 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} +\end{verbatim} +\normalsize + +where you will adapt the {\bf Archive Device}, the {\bf Changer Device}, and +the path to the {\bf Changer Command} to correspond to the values used on your +system. + +\section{A Multi-drive Example Configuration File} +\index[general]{Multi-drive Example Configuration File } + +The following resources implement a multi-drive autochanger: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "Autochanger" + Device = Drive-1, Drive-2 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} + +Device { + Name = Drive-1 + Drive Index = 0 + Media Type = DDS-4 + Archive Device = /dev/nst0 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} + +Device { + Name = Drive-2 + Drive Index = 1 + Media Type = DDS-4 + Archive Device = /dev/nst1 # Normal archive device + Autochanger = yes + LabelMedia = no; + AutomaticMount = yes; + AlwaysOpen = yes; +} + +\end{verbatim} +\normalsize + +where you will adapt the {\bf Archive Device}, the {\bf Changer Device}, and +the path to the {\bf Changer Command} to correspond to the values used on your +system. + +\label{SpecifyingSlots} +\section{Specifying Slots When Labeling} +\index[general]{Specifying Slots When Labeling } +\index[general]{Labeling!Specifying Slots When } + +If you add an {\bf Autochanger = yes} record to the Storage resource in your +Director's configuration file, the Bacula Console will automatically prompt +you for the slot number when the Volume is in the changer when +you {\bf add} or {\bf label} tapes for that Storage device. If your +{\bf mtx-changer} script is properly installed, Bacula will automatically +load the correct tape during the label command. + +You must also set +{\bf Autochanger = yes} in the Storage daemon's Device resource +as we have described above in +order for the autochanger to be used. Please see the +\ilink{Storage Resource}{Autochanger1} in the Director's chapter +and the +\ilink{Device Resource}{Autochanger} in the Storage daemon +chapter for more details on these records. + +Thus all stages of dealing with tapes can be totally automated. It is also +possible to set or change the Slot using the {\bf update} command in the +Console and selecting {\bf Volume Parameters} to update. + +Even though all the above configuration statements are specified and correct, +Bacula will attempt to access the autochanger only if a {\bf slot} is non-zero +in the catalog Volume record (with the Volume name). + +If your autochanger has barcode labels, you can label all the Volumes in +your autochanger one after another by using the {\bf label barcodes} command. +For each tape in the changer containing a barcode, Bacula will mount the tape +and then label it with the same name as the barcode. An appropriate Media +record will also be created in the catalog. Any barcode that begins with the +same characters as specified on the "CleaningPrefix=xxx" command, will be +treated as a cleaning tape, and will not be labeled. For example with: + +Please note that Volumes must be pre-labeled to be automatically used in +the autochanger during a backup. If you do not have a barcode reader, this +is done manually (or via a script). + +\footnotesize +\begin{verbatim} +Pool { + Name ... + Cleaning Prefix = "CLN" +} +\end{verbatim} +\normalsize + +Any slot containing a barcode of CLNxxxx will be treated as a cleaning tape +and will not be mounted. + +\section{Changing Cartridges} +\index[general]{Changing Cartridges } +If you wish to insert or remove cartridges in your autochanger or +you manually run the {\bf mtx} program, you must first tell Bacula +to release the autochanger by doing: + +\footnotesize +\begin{verbatim} +unmount +(change cartridges and/or run mtx) +mount +\end{verbatim} +\normalsize + +If you do not do the unmount before making such a change, Bacula +will become completely confused about what is in the autochanger +and may stop function because it expects to have exclusive use +of the autochanger while it has the drive mounted. + + +\label{Magazines} +\section{Dealing with Multiple Magazines} +\index[general]{Dealing with Multiple Magazines } +\index[general]{Magazines!Dealing with Multiple } + +If you have several magazines or if you insert or remove cartridges from a +magazine, you should notify Bacula of this. By doing so, Bacula will as +a preference, use Volumes that it knows to be in the autochanger before +accessing Volumes that are not in the autochanger. This prevents unneeded +operator intervention. + +If your autochanger has barcodes (machine readable tape labels), the task of +informing Bacula is simple. Every time, you change a magazine, or add or +remove a cartridge from the magazine, simply do + +\footnotesize +\begin{verbatim} +unmount +(remove magazine) +(insert new magazine) +update slots +mount +\end{verbatim} +\normalsize + +in the Console program. This will cause Bacula to request the autochanger to +return the current Volume names in the magazine. This will be done without +actually accessing or reading the Volumes because the barcode reader does this +during inventory when the autochanger is first turned on. Bacula will ensure +that any Volumes that are currently marked as being in the magazine are marked +as no longer in the magazine, and the new list of Volumes will be marked as +being in the magazine. In addition, the Slot numbers of the Volumes will be +corrected in Bacula's catalog if they are incorrect (added or moved). + +If you do not have a barcode reader on your autochanger, you have several +alternatives. + +\begin{enumerate} +\item You can manually set the Slot and InChanger flag using the {\bf update + volume} command in the Console (quite painful). + +\item You can issue a + +\footnotesize +\begin{verbatim} +update slots scan +\end{verbatim} +\normalsize + + command that will cause Bacula to read the label on each of the cartridges in + the magazine in turn and update the information (Slot, InChanger flag) in the + catalog. This is quite effective but does take time to load each cartridge + into the drive in turn and read the Volume label. + +\item You can modify the mtx-changer script so that it simulates an + autochanger with barcodes. See below for more details. +\end{enumerate} + +\label{simulating} +\section{Simulating Barcodes in your Autochanger} +\index[general]{Autochanger!Simulating Barcodes in your } +\index[general]{Simulating Barcodes in your Autochanger } + +You can simulate barcodes in your autochanger by making the {\bf mtx-changer} +script return the same information that an autochanger with barcodes would do. +This is done by commenting out the one and only line in the {\bf list)} case, +which is: + +\footnotesize +\begin{verbatim} + ${MTX} -f $ctl status | grep " *Storage Element [0-9]*:.*Full" | awk "{print \$3 \$4}" | sed "s/Full *\(:VolumeTag=\)*//" +\end{verbatim} +\normalsize + +at approximately line 99 by putting a \# in column one of that line, or by +simply deleting it. Then in its place add a new line that prints the contents +of a file. For example: + +\footnotesize +\begin{verbatim} +cat /etc/bacula/changer.volumes +\end{verbatim} +\normalsize + +Be sure to include a full path to the file, which can have any name. The +contents of the file must be of the following format: + +\footnotesize +\begin{verbatim} +1:Volume1 +2:Volume2 +3:Volume3 +... +\end{verbatim} +\normalsize + +Where the 1, 2, 3 are the slot numbers and Volume1, Volume2, ... are the +Volume names in those slots. You can have multiple files that represent the +Volumes in different magazines, and when you change magazines, simply copy the +contents of the correct file into your {\bf /etc/bacula/changer.volumes} file. +There is no need to stop and start Bacula when you change magazines, simply +put the correct data in the file, then run the {\bf update slots} command, and +your autochanger will appear to Bacula to be an autochanger with barcodes. +\label{updateslots} + +\section{The Full Form of the Update Slots Command} +\index[general]{Full Form of the Update Slots Command } +\index[general]{Command!Full Form of the Update Slots } + +If you change only one cartridge in the magazine, you may not want to scan all +Volumes, so the {\bf update slots} command (as well as the {\bf update slots +scan} command) has the additional form: + +\footnotesize +\begin{verbatim} +update slots=n1,n2,n3-n4, ... +\end{verbatim} +\normalsize + +where the keyword {\bf scan} can be appended or not. The n1,n2, ... represent +Slot numbers to be updated and the form n3-n4 represents a range of Slot +numbers to be updated (e.g. 4-7 will update Slots 4,5,6, and 7). + +This form is particularly useful if you want to do a scan (time expensive) and +restrict the update to one or two slots. + +For example, the command: + +\footnotesize +\begin{verbatim} +update slots=1,6 scan +\end{verbatim} +\normalsize + +will cause Bacula to load the Volume in Slot 1, read its Volume label and +update the Catalog. It will do the same for the Volume in Slot 6. The command: + + +\footnotesize +\begin{verbatim} +update slots=1-3,6 +\end{verbatim} +\normalsize + +will read the barcoded Volume names for slots 1,2,3 and 6 and make the +appropriate updates in the Catalog. If you don't have a barcode reader or have +not modified the mtx-changer script as described above, the above command will +not find any Volume names so will do nothing. +\label{FreeBSD} + +\section{FreeBSD Issues} +\index[general]{Issues!FreeBSD } +\index[general]{FreeBSD Issues } + +If you are having problems on FreeBSD when Bacula tries to select a tape, and +the message is {\bf Device not configured}, this is because FreeBSD has made +the tape device {\bf /dev/nsa1} disappear when there is no tape mounted in the +autochanger slot. As a consequence, Bacula is unable to open the device. The +solution to the problem is to make sure that some tape is loaded into the tape +drive before starting Bacula. This problem is corrected in Bacula versions +1.32f-5 and later. + +Please see the +\ilink{ Tape Testing}{FreeBSDTapes} chapter of this manual for +{\bf important} information concerning your tape drive before doing the +autochanger testing. +\label{AutochangerTesting} + +\section{Testing Autochanger and Adapting mtx-changer script} +\index[general]{Testing the Autochanger } +\index[general]{Adapting Your mtx-changer script} + + +Before attempting to use the autochanger with Bacula, it is preferable to +"hand-test" that the changer works. To do so, we suggest you do the +following commands (assuming that the {\bf mtx-changer} script is installed in +{\bf /etc/bacula/mtx-changer}): + +\begin{description} + +\item [Make sure Bacula is not running.] + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ list \ 0 \ /dev/nst0 \ 0] +\index[sd]{mtx-changer list} + +This command should print: + +\footnotesize +\begin{verbatim} + 1: + 2: + 3: + ... + +\end{verbatim} +\normalsize + +or one number per line for each slot that is occupied in your changer, and +the number should be terminated by a colon ({\bf :}). If your changer has +barcodes, the barcode will follow the colon. If an error message is printed, +you must resolve the problem (e.g. try a different SCSI control device name +if {\bf /dev/sg0} is incorrect). For example, on FreeBSD systems, the +autochanger SCSI control device is generally {\bf /dev/pass2}. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ slots ] +\index[sd]{mtx-changer slots} + +This command should return the number of slots in your autochanger. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ 1 \ /dev/nst0 \ 0 ] +\index[sd]{mtx-changer unload} + + If a tape is loaded from slot 1, this should cause it to be unloaded. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ load \ 3 \ /dev/nst0 \ 0 ] +\index[sd]{mtx-changer load} + +Assuming you have a tape in slot 3, it will be loaded into drive (0). + + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ loaded \ 0 \ /dev/nst0 \ 0] +\index[sd]{mtx-changer loaded} + +It should print "3" +Note, we have used an "illegal" slot number 0. In this case, it is simply +ignored because the slot number is not used. However, it must be specified +because the drive parameter at the end of the command is needed to select +the correct drive. + +\item [/etc/bacula/mtx-changer \ /dev/sg0 \ unload \ 3 /dev/nst0 \ 0] + +will unload the tape into slot 3. + +\end{description} + +Once all the above commands work correctly, assuming that you have the right +{\bf Changer Command} in your configuration, Bacula should be able to operate +the changer. The only remaining area of problems will be if your autoloader +needs some time to get the tape loaded after issuing the command. After the +{\bf mtx-changer} script returns, Bacula will immediately rewind and read the +tape. If Bacula gets rewind I/O errors after a tape change, you will probably +need to insert a {\bf sleep 20} after the {\bf mtx} command, but be careful to +exit the script with a zero status by adding {\bf exit 0} after any additional +commands you add to the script. This is because Bacula checks the return +status of the script, which should be zero if all went well. + +You can test whether or not you need a {\bf sleep} by putting the following +commands into a file and running it as a script: + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload 1 /dev/nst0 0 +/etc/bacula/mtx-changer /dev/sg0 load 3 /dev/nst0 0 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +If the above script runs, you probably have no timing problems. If it does not +run, start by putting a {\bf sleep 30} or possibly a {\bf sleep 60} in the +script just after the mtx-changer load command. If that works, then you should +move the sleep into the actual {\bf mtx-changer} script so that it will be +effective when Bacula runs. + +A second problem that comes up with a small number of autochangers is that +they need to have the cartridge ejected before it can be removed. If this is +the case, the {\bf load 3} will never succeed regardless of how long you wait. +If this seems to be your problem, you can insert an eject just after the +unload so that the script looks like: + +\footnotesize +\begin{verbatim} +#!/bin/sh +/etc/bacula/mtx-changer /dev/sg0 unload 1 /dev/nst0 0 +mt -f /dev/st0 offline +/etc/bacula/mtx-changer /dev/sg0 load 3 /dev/nst0 0 +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +Obviously, if you need the {\bf offline} command, you should move it into the +mtx-changer script ensuring that you save the status of the {\bf mtx} command +or always force an {\bf exit 0} from the script, because Bacula checks the +return status of the script. + +As noted earlier, there are several scripts in {\bf +\lt{}bacula-source\gt{}/examples/devices} that implement the above features, +so they may be a help to you in getting your script to work. + +If Bacula complains "Rewind error on /dev/nst0. ERR=Input/output error." you +most likely need more sleep time in your {\bf mtx-changer} before returning to +Bacula after a load command has been completed. + +\label{using} + +\section{Using the Autochanger} +\index[general]{Using the Autochanger } +\index[general]{Autochanger!Using the } + +Let's assume that you have properly defined the necessary Storage daemon +Device records, and you have added the {\bf Autochanger = yes} record to the +Storage resource in your Director's configuration file. + +Now you fill your autochanger with say six blank tapes. + +What do you do to make Bacula access those tapes? + +One strategy is to prelabel each of the tapes. Do so by starting Bacula, then +with the Console program, enter the {\bf label} command: + +\footnotesize +\begin{verbatim} +./bconsole +Connecting to Director rufus:8101 +1000 OK: rufus-dir Version: 1.26 (4 October 2002) +*label +\end{verbatim} +\normalsize + +it will then print something like: + +\footnotesize +\begin{verbatim} +Using default Catalog name=BackupDB DB=bacula +The defined Storage resources are: + 1: Autochanger + 2: File +Select Storage resource (1-2): 1 +\end{verbatim} +\normalsize + +I select the autochanger (1), and it prints: + +\footnotesize +\begin{verbatim} +Enter new Volume name: TestVolume1 +Enter slot (0 for none): 1 +\end{verbatim} +\normalsize + +where I entered {\bf TestVolume1} for the tape name, and slot {\bf 1} for the +slot. It then asks: + +\footnotesize +\begin{verbatim} +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 +\end{verbatim} +\normalsize + +I select the Default pool. This will be automatically done if you only have a +single pool, then Bacula will proceed to unload any loaded volume, load the +volume in slot 1 and label it. In this example, nothing was in the drive, so +it printed: + +\footnotesize +\begin{verbatim} +Connecting to Storage daemon Autochanger at localhost:9103 ... +Sending label command ... +3903 Issuing autochanger "load slot 1" command. +3000 OK label. Volume=TestVolume1 Device=/dev/nst0 +Media record for Volume=TestVolume1 successfully created. +Requesting mount Autochanger ... +3001 Device /dev/nst0 is mounted with Volume TestVolume1 +You have messages. +* +\end{verbatim} +\normalsize + +You may then proceed to label the other volumes. The messages will change +slightly because Bacula will unload the volume (just labeled TestVolume1) +before loading the next volume to be labeled. + +Once all your Volumes are labeled, Bacula will automatically load them as they +are needed. + +To "see" how you have labeled your Volumes, simply enter the {\bf list +volumes} command from the Console program, which should print something like +the following: + +\footnotesize +\begin{verbatim} +*{\bf list volumes} +Using default Catalog name=BackupDB DB=bacula +Defined Pools: + 1: Default + 2: File +Select the Pool (1-2): 1 ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| MedId | VolName | MedTyp | VolStat | Bites | LstWrt | VolReten | Recyc | Slot | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +| 1 | TestVol1 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 1 | +| 2 | TestVol2 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 2 | +| 3 | TestVol3 | DDS-4 | Append | 0 | 0 | 30672000 | 0 | 3 | +| ... | ++-------+----------+--------+---------+-------+--------+----------+-------+------+ +\end{verbatim} +\normalsize + +\label{Barcodes} + +\section{Barcode Support} +\index[general]{Support!Barcode } +\index[general]{Barcode Support } + +Bacula provides barcode support with two Console commands, {\bf label +barcodes} and {\bf update slots}. + +The {\bf label barcodes} will cause Bacula to read the barcodes of all the +cassettes that are currently installed in the magazine (cassette holder) using +the {\bf mtx-changer} {\bf list} command. Each cassette is mounted in turn and +labeled with the same Volume name as the barcode. + +The {\bf update slots} command will first obtain the list of cassettes and +their barcodes from {\bf mtx-changer}. Then it will find each volume in turn +in the catalog database corresponding to the barcodes and set its Slot to +correspond to the value just read. If the Volume is not in the catalog, then +nothing will be done. This command is useful for synchronizing Bacula with the +current magazine in case you have changed magazines or in case you have moved +cassettes from one slot to another. + +The {\bf Cleaning Prefix} statement can be used in the Pool resource to define +a Volume name prefix, which if it matches that of the Volume (barcode) will +cause that Volume to be marked with a VolStatus of {\bf Cleaning}. This will +prevent Bacula from attempting to write on the Volume. + +\label{interface} + +\section{Bacula Autochanger Interface} +\index[general]{Interface!Bacula Autochanger } +\index[general]{Bacula Autochanger Interface } + +Bacula calls the autochanger script that you specify on the {\bf Changer +Command} statement. Normally this script will be the {\bf mtx-changer} script +that we provide, but it can in fact be any program. The only requirement +for the script is that it must understand the commands that +Bacula uses, which are {\bf loaded}, {\bf load}, {\bf +unload}, {\bf list}, and {\bf slots}. In addition, +each of those commands must return the information in the precise format as +specified below: + +\footnotesize +\begin{verbatim} +- Currently the changer commands used are: + loaded -- returns number of the slot that is loaded, base 1, + in the drive or 0 if the drive is empty. + load -- loads a specified slot (note, some autochangers + require a 30 second pause after this command) into + the drive. + unload -- unloads the device (returns cassette to its slot). + list -- returns one line for each cassette in the autochanger + in the format :. Where + the {\bf slot} is the non-zero integer representing + the slot number, and {\bf barcode} is the barcode + associated with the cassette if it exists and if you + autoloader supports barcodes. Otherwise the barcode + field is blank. + slots -- returns total number of slots in the autochanger. +\end{verbatim} +\normalsize + +Bacula checks the exit status of the program called, and if it is zero, the +data is accepted. If the exit status is non-zero, Bacula will print an +error message and request the tape be manually mounted on the drive. diff --git a/docs/manuals/en/concepts/bimagemgr.bix b/docs/manuals/en/concepts/bimagemgr.bix new file mode 100644 index 00000000..8c18e201 --- /dev/null +++ b/docs/manuals/en/concepts/bimagemgr.bix @@ -0,0 +1,7 @@ +\indexentry {Bimagemgr }{2} +\indexentry {bimagemgr!Installation }{2} +\indexentry {bimagemgr Installation }{2} +\indexentry {bimagemgr!Usage }{4} +\indexentry {bimagemgr Usage }{4} +\indexentry {GNU Free Documentation License}{7} +\indexentry {License!GNU Free Documentation}{7} diff --git a/docs/manuals/en/concepts/bootstrap.tex b/docs/manuals/en/concepts/bootstrap.tex new file mode 100644 index 00000000..b69cdfbf --- /dev/null +++ b/docs/manuals/en/concepts/bootstrap.tex @@ -0,0 +1,418 @@ +%% +%% + +\chapter{The Bootstrap File} +\label{BootstrapChapter} +\index[general]{File!Bootstrap } +\index[general]{Bootstrap File } + +The information in this chapter is provided so that you may either create your +own bootstrap files, or so that you can edit a bootstrap file produced by {\bf +Bacula}. However, normally the bootstrap file will be automatically created +for you during the +\ilink{restore\_command}{_ConsoleChapter} command in the Console program, or +by using a +\ilink{ Write Bootstrap}{writebootstrap} record in your Backup +Jobs, and thus you will never need to know the details of this file. + +The {\bf bootstrap} file contains ASCII information that permits precise +specification of what files should be restored, what volume they are on, +and where they are on the volume. It is a relatively compact +form of specifying the information, is human readable, and can be edited with +any text editor. + +\section{Bootstrap File Format} +\index[general]{Format!Bootstrap} +\index[general]{Bootstrap File Format } + +The general format of a {\bf bootstrap} file is: + +{\bf \lt{}keyword\gt{}= \lt{}value\gt{}} + +Where each {\bf keyword} and the {\bf value} specify which files to restore. +More precisely the {\bf keyword} and their {\bf values} serve to limit which +files will be restored and thus act as a filter. The absence of a keyword +means that all records will be accepted. + +Blank lines and lines beginning with a pound sign (\#) in the bootstrap file +are ignored. + +There are keywords which permit filtering by Volume, Client, Job, FileIndex, +Session Id, Session Time, ... + +The more keywords that are specified, the more selective the specification of +which files to restore will be. In fact, each keyword is {\bf AND}ed with +other keywords that may be present. + +For example, + +\footnotesize +\begin{verbatim} +Volume = Test-001 +VolSessionId = 1 +VolSessionTime = 108927638 +\end{verbatim} +\normalsize + +directs the Storage daemon (or the {\bf bextract} program) to restore only +those files on Volume Test-001 {\bf AND} having VolumeSessionId equal to one +{\bf AND} having VolumeSession time equal to 108927638. + +The full set of permitted keywords presented in the order in which they are +matched against the Volume records are: + +\begin{description} + +\item [Volume] + \index[general]{Volume } + The value field specifies what Volume the following commands apply to. + Each Volume specification becomes the current Volume, to which all the + following commands apply until a new current Volume (if any) is + specified. If the Volume name contains spaces, it should be enclosed in + quotes. At lease one Volume specification is required. + +\item [Count] + \index[general]{Count} + The value is the total number of files that will be restored for this Volume. + This allows the Storage daemon to know when to stop reading the Volume. + This value is optional. + +\item [VolFile] + \index[general]{VolFile} + The value is a file number, a list of file numbers, or a range of file + numbers to match on the current Volume. The file number represents the + physical file on the Volume where the data is stored. For a tape + volume, this record is used to position to the correct starting file, + and once the tape is past the last specified file, reading will stop. + +\item [VolBlock] + \index[general]{VolBlock} + The value is a block number, a list of block numbers, or a range of + block numbers to match on the current Volume. The block number + represents the physical block within the file on the Volume where the + data is stored. + + +\item [VolSessionTime] + \index[general]{VolSessionTime } + The value specifies a Volume Session Time to be matched from the current + volume. + +\item [VolSessionId] + \index[general]{VolSessionId } + The value specifies a VolSessionId, a list of volume session ids, or a + range of volume session ids to be matched from the current Volume. Each + VolSessionId and VolSessionTime pair corresponds to a unique Job that is + backed up on the Volume. + +\item [JobId] + \index[general]{JobId } + The value specifies a JobId, list of JobIds, or range of JobIds to be + selected from the current Volume. Note, the JobId may not be unique if you + have multiple Directors, or if you have reinitialized your database. The + JobId filter works only if you do not run multiple simultaneous jobs. + This value is optional and not used by Bacula to restore files. + +\item [Job] + \index[general]{Job } + The value specifies a Job name or list of Job names to be matched on the + current Volume. The Job corresponds to a unique VolSessionId and + VolSessionTime pair. However, the Job is perhaps a bit more readable by + humans. Standard regular expressions (wildcards) may be used to match Job + names. The Job filter works only if you do not run multiple simultaneous + jobs. + This value is optional and not used by Bacula to restore files. + +\item [Client] + \index[general]{Client } + The value specifies a Client name or list of Clients to will be matched on + the current Volume. Standard regular expressions (wildcards) may be used to + match Client names. The Client filter works only if you do not run multiple + simultaneous jobs. + This value is optional and not used by Bacula to restore files. + +\item [FileIndex] + \index[general]{FileIndex } + The value specifies a FileIndex, list of FileIndexes, or range of FileIndexes + to be selected from the current Volume. Each file (data) stored on a Volume + within a Session has a unique FileIndex. For each Session, the first file + written is assigned FileIndex equal to one and incremented for each file + backed up. + + This for a given Volume, the triple VolSessionId, VolSessionTime, and + FileIndex uniquely identifies a file stored on the Volume. Multiple copies of + the same file may be stored on the same Volume, but for each file, the triple + VolSessionId, VolSessionTime, and FileIndex will be unique. This triple is + stored in the Catalog database for each file. + + To restore a particular file, this value (or a range of FileIndexes) is + required. + +\item [Slot] + \index[general]{Slot } + The value specifies the autochanger slot. There may be only a single {\bf + Slot} specification for each Volume. + +\item [Stream] + \index[general]{Stream } + The value specifies a Stream, a list of Streams, or a range of Streams to be + selected from the current Volume. Unless you really know what you are doing + (the internals of {\bf Bacula}), you should avoid this specification. + This value is optional and not used by Bacula to restore files. + +\item [*JobType] + \index[general]{*JobType } + Not yet implemented. + +\item [*JobLevel] + \index[general]{*JobLevel } + Not yet implemented. +\end{description} + +The {\bf Volume} record is a bit special in that it must be the first record. +The other keyword records may appear in any order and any number following a +Volume record. + +Multiple Volume records may be specified in the same bootstrap file, but each +one starts a new set of filter criteria for the Volume. + +In processing the bootstrap file within the current Volume, each filter +specified by a keyword is {\bf AND}ed with the next. Thus, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} Client records for {\bf +My machine} {\bf AND} FileIndex equal to {\bf one}. + +Multiple occurrences of the same record are {\bf OR}ed together. Thus, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine" +Client = "Backup machine" +FileIndex = 1 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} (Client records for {\bf +My machine} {\bf OR} {\bf Backup machine}) {\bf AND} FileIndex equal to {\bf +one}. + +For integer values, you may supply a range or a list, and for all other values +except Volumes, you may specify a list. A list is equivalent to multiple +records of the same keyword. For example, + +\footnotesize +\begin{verbatim} +Volume = Test-01 +Client = "My machine", "Backup machine" +FileIndex = 1-20, 35 +\end{verbatim} +\normalsize + +will match records on Volume {\bf Test-01} {\bf AND} {\bf (}Client records for +{\bf My machine} {\bf OR} {\bf Backup machine}{\bf )} {\bf AND} {\bf +(}FileIndex 1 {\bf OR} 2 {\bf OR} 3 ... {\bf OR} 20 {\bf OR} 35{\bf )}. + +As previously mentioned above, there may be multiple Volume records in the +same bootstrap file. Each new Volume definition begins a new set of filter +conditions that apply to that Volume and will be {\bf OR}ed with any other +Volume definitions. + +As an example, suppose we query for the current set of tapes to restore all +files on Client {\bf Rufus} using the {\bf query} command in the console +program: + +\footnotesize +\begin{verbatim} +Using default Catalog name=MySQL DB=bacula +*query +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 10 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: +Choose a query (1-8): 8 +Enter Client Name: Rufus ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 154 | 2002-05-30 12:08 | test-02 | 0 | 1 | 1022753312 | +| 202 | 2002-06-15 10:16 | test-02 | 0 | 2 | 1024128917 | +| 203 | 2002-06-15 11:12 | test-02 | 3 | 1 | 1024132350 | +| 204 | 2002-06-18 08:11 | test-02 | 4 | 1 | 1024380678 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +The output shows us that there are four Jobs that must be restored. The first +one is a Full backup, and the following three are all Incremental backups. + +The following bootstrap file will restore those files: + +\footnotesize +\begin{verbatim} +Volume=test-02 +VolSessionId=1 +VolSessionTime=1022753312 +Volume=test-02 +VolSessionId=2 +VolSessionTime=1024128917 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024132350 +Volume=test-02 +VolSessionId=1 +VolSessionTime=1024380678 +\end{verbatim} +\normalsize + +As a final example, assume that the initial Full save spanned two Volumes. The +output from {\bf query} might look like: + +\footnotesize +\begin{verbatim} ++-------+------------------+------------+-----------+----------+------------+ +| JobId | StartTime | VolumeName | StartFile | VolSesId | VolSesTime | ++-------+------------------+------------+-----------+----------+------------+ +| 242 | 2002-06-25 16:50 | File0003 | 0 | 1 | 1025016612 | +| 242 | 2002-06-25 16:50 | File0004 | 0 | 1 | 1025016612 | +| 243 | 2002-06-25 16:52 | File0005 | 0 | 2 | 1025016612 | +| 246 | 2002-06-25 19:19 | File0006 | 0 | 2 | 1025025494 | ++-------+------------------+------------+-----------+----------+------------+ +\end{verbatim} +\normalsize + +and the following bootstrap file would restore those files: + +\footnotesize +\begin{verbatim} +Volume=File0003 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0004 +VolSessionId=1 +VolSessionTime=1025016612 +Volume=File0005 +VolSessionId=2 +VolSessionTime=1025016612 +Volume=File0006 +VolSessionId=2 +VolSessionTime=1025025494 +\end{verbatim} +\normalsize + +\section{Automatic Generation of Bootstrap Files} +\index[general]{Files!Automatic Generation of Bootstrap } +\index[general]{Automatic Generation of Bootstrap Files } + +One thing that is probably worth knowing: the bootstrap files that are +generated automatically at the end of the job are not as optimized as those +generated by the restore command. This is because during Incremental and +Differential jobs, the records pertaining to the files written for the +Job are appended to the end of the bootstrap file. +As consequence, all the files saved to an Incremental or Differential job will be +restored first by the Full save, then by any Incremental or Differential +saves. + +When the bootstrap file is generated for the restore command, only one copy +(the most recent) of each file is restored. + +So if you have spare cycles on your machine, you could optimize the bootstrap +files by doing the following: + +\footnotesize +\begin{verbatim} + ./bconsole + restore client=xxx select all + done + no + quit + Backup bootstrap file. +\end{verbatim} +\normalsize + +The above will not work if you have multiple FileSets because that will be an +extra prompt. However, the {\bf restore client=xxx select all} builds the +in-memory tree, selecting everything and creates the bootstrap file. + +The {\bf no} answers the {\bf Do you want to run this (yes/mod/no)} question. + +\label{bscanBootstrap} +\section{Bootstrap for bscan} +\index[general]{bscan} +\index[general]{bscan!bootstrap} +\index[general]{bscan bootstrap} +If you have a very large number of Volumes to scan with {\bf bscan}, +you may exceed the command line limit (511 characters). I that case, +you can create a simple bootstrap file that consists of only the +volume names. An example might be: + +\footnotesize +\begin{verbatim} +Volume="Vol001" +Volume="Vol002" +Volume="Vol003" +Volume="Vol004" +Volume="Vol005" +\end{verbatim} +\normalsize + + +\section{A Final Bootstrap Example} +\index[general]{Bootstrap Example} +\index[general]{Example!Bootstrap} + +If you want to extract or copy a single Job, you can do it by selecting by +JobId (code not tested) or better yet, if you know the VolSessionTime and the +VolSessionId (printed on Job report and in Catalog), specifying this is by far +the best. Using the VolSessionTime and VolSessionId is the way Bacula does +restores. A bsr file might look like the following: + +\footnotesize +\begin{verbatim} +Volume="Vol001" +VolSessionId=10 +VolSessionTime=1080847820 +\end{verbatim} +\normalsize + +If you know how many files are backed up (on the job report), you can +enormously speed up the selection by adding (let's assume there are 157 +files): + +\footnotesize +\begin{verbatim} +FileIndex=1-157 +Count=157 +\end{verbatim} +\normalsize + +Finally, if you know the File number where the Job starts, you can also cause +bcopy to forward space to the right file without reading every record: + +\footnotesize +\begin{verbatim} +VolFile=20 +\end{verbatim} +\normalsize + +There is nothing magic or complicated about a BSR file. Parsing it and +properly applying it within Bacula *is* magic, but you don't need to worry +about that. + +If you want to see a *real* bsr file, simply fire up the {\bf restore} command +in the console program, select something, then answer no when it prompts to +run the job. Then look at the file {\bf restore.bsr} in your working +directory. diff --git a/docs/manuals/en/concepts/bugs.tex b/docs/manuals/en/concepts/bugs.tex new file mode 100644 index 00000000..42df829d --- /dev/null +++ b/docs/manuals/en/concepts/bugs.tex @@ -0,0 +1,21 @@ +%% +%% + +\section{Bacula Bugs} +\label{BugsChapter} +\index[general]{Bacula Bugs } +\index[general]{Bugs!Bacula } + +Well fortunately there are not too many bugs, but thanks to Dan Langille, we +have a +\elink{bugs database}{http://bugs.bacula.org} where bugs are reported. +Generally, when a bug is fixed, a patch for the currently released version will +be attached to the bug report. + +The directory {\bf patches} in the current SVN always contains a list of +the patches that have been created for the previously released version +of Bacula. In addition, the file {\bf patches-version-number} in the +{\bf patches} directory contains a summary of each of the patches. + +A "raw" list of the current task list and known issues can be found in {\bf +kernstodo} in the main Bacula source directory. diff --git a/docs/manuals/en/concepts/catmaintenance.tex b/docs/manuals/en/concepts/catmaintenance.tex new file mode 100644 index 00000000..eeb36b8b --- /dev/null +++ b/docs/manuals/en/concepts/catmaintenance.tex @@ -0,0 +1,762 @@ +%% +%% + +\chapter{Catalog Maintenance} +\label{CatMaintenanceChapter} +\index[general]{Maintenance!Catalog } +\index[general]{Catalog Maintenance } + +Without proper setup and maintenance, your Catalog may continue to grow +indefinitely as you run Jobs and backup Files, and/or it may become +very inefficient and slow. How fast the size of your +Catalog grows depends on the number of Jobs you run and how many files they +backup. By deleting records within the database, you can make space available +for the new records that will be added during the next Job. By constantly +deleting old expired records (dates older than the Retention period), your +database size will remain constant. + +If you started with the default configuration files, they already contain +reasonable defaults for a small number of machines (less than 5), so if you +fall into that case, catalog maintenance will not be urgent if you have a few +hundred megabytes of disk space free. Whatever the case may be, some knowledge +of retention periods will be useful. +\label{Retention} + +\section{Setting Retention Periods} +\index[general]{Setting Retention Periods } +\index[general]{Periods!Setting Retention } + +{\bf Bacula} uses three Retention periods: the {\bf File Retention} period, +the {\bf Job Retention} period, and the {\bf Volume Retention} period. Of +these three, the File Retention period is by far the most important in +determining how large your database will become. + +The {\bf File Retention} and the {\bf Job Retention} are specified in each +Client resource as is shown below. The {\bf Volume Retention} period is +specified in the Pool resource, and the details are given in the next chapter +of this manual. + +\begin{description} + +\item [File Retention = \lt{}time-period-specification\gt{}] + \index[dir]{File Retention } + The File Retention record defines the length of time that Bacula will keep +File records in the Catalog database. When this time period expires, and if +{\bf AutoPrune} is set to {\bf yes}, Bacula will prune (remove) File records +that are older than the specified File Retention period. The pruning will +occur at the end of a backup Job for the given Client. Note that the Client +database record contains a copy of the File and Job retention periods, but +Bacula uses the current values found in the Director's Client resource to do +the pruning. + +Since File records in the database account for probably 80 percent of the +size of the database, you should carefully determine exactly what File +Retention period you need. Once the File records have been removed from +the database, you will no longer be able to restore individual files +in a Job. However, with Bacula version 1.37 and later, as long as the +Job record still exists, you will be able to restore all files in the +job. + +Retention periods are specified in seconds, but as a convenience, there are +a number of modifiers that permit easy specification in terms of minutes, +hours, days, weeks, months, quarters, or years on the record. See the +\ilink{ Configuration chapter}{Time} of this manual for additional details +of modifier specification. + +The default File retention period is 60 days. + +\item [Job Retention = \lt{}time-period-specification\gt{}] + \index[dir]{Job Retention } + The Job Retention record defines the length of time that {\bf Bacula} +will keep Job records in the Catalog database. When this time period +expires, and if {\bf AutoPrune} is set to {\bf yes} Bacula will prune +(remove) Job records that are older than the specified Job Retention +period. Note, if a Job record is selected for pruning, all associated File +and JobMedia records will also be pruned regardless of the File Retention +period set. As a consequence, you normally will set the File retention +period to be less than the Job retention period. + +As mentioned above, once the File records are removed from the database, +you will no longer be able to restore individual files from the Job. +However, as long as the Job record remains in the database, you will be +able to restore all the files backuped for the Job (on version 1.37 and +later). As a consequence, it is generally a good idea to retain the Job +records much longer than the File records. + +The retention period is specified in seconds, but as a convenience, there +are a number of modifiers that permit easy specification in terms of +minutes, hours, days, weeks, months, quarters, or years. See the \ilink{ +Configuration chapter}{Time} of this manual for additional details of +modifier specification. + +The default Job Retention period is 180 days. + +\item [AutoPrune = \lt{}yes/no\gt{}] + \index[dir]{AutoPrune } + If AutoPrune is set to {\bf yes} (default), Bacula will automatically apply +the File retention period and the Job retention period for the Client at the +end of the Job. + +If you turn this off by setting it to {\bf no}, your Catalog will grow each +time you run a Job. +\end{description} + +\label{CompactingMySQL} +\section{Compacting Your MySQL Database} +\index[general]{Database!Compacting Your MySQL } +\index[general]{Compacting Your MySQL Database } + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, {\bf MySQL} does not effectively +use the space, and instead continues growing. To avoid this, from time to +time, you must compact your database. Normally, large commercial database such +as Oracle have commands that will compact a database to reclaim wasted file +space. MySQL has the {\bf OPTIMIZE TABLE} command that you can use, and SQLite +version 2.8.4 and greater has the {\bf VACUUM} command. We leave it to you to +explore the utility of the {\bf OPTIMIZE TABLE} command in MySQL. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for MySQL, PostgreSQL and SQLite. + +For a {\bf MySQL} database, you could write the Bacula database as an ASCII +file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +mysqldump -f --opt bacula > bacula.sql +mysql bacula < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, if I cd to the location of the MySQL +Bacula database (typically /opt/mysql/var or something similar) and enter: + +\footnotesize +\begin{verbatim} +du bacula +\end{verbatim} +\normalsize + +I get {\bf 620,644} which means there are that many blocks containing 1024 +bytes each or approximately 635 MB of data. After doing the {\bf mysqldump}, I +had a bacula.sql file that had {\bf 174,356} blocks, and after doing the {\bf +mysql} command to recreate the database, I ended up with a total of {\bf +210,464} blocks rather than the original {\bf 629,644}. In other words, the +compressed version of the database took approximately one third of the space +of the database that had been in use for about a year. + +As a consequence, I suggest you monitor the size of your database and from +time to time (once every six months or year), compress it. + +\label{DatabaseRepair} +\label{RepairingMySQL} +\section{Repairing Your MySQL Database} +\index[general]{Database!Repairing Your MySQL } +\index[general]{Repairing Your MySQL Database } + +If you find that you are getting errors writing to your MySQL database, or +Bacula hangs each time it tries to access the database, you should consider +running MySQL's database check and repair routines. The program you need to +run depends on the type of database indexing you are using. If you are using +the default, you will probably want to use {\bf myisamchk}. For more details +on how to do this, please consult the MySQL document at: +\elink{ +http://www.mysql.com/doc/en/Repair.html} +{http://www.mysql.com/doc/en/Repair.html}. + +If the errors you are getting are simply SQL warnings, then you might try +running dbcheck before (or possibly after) using the MySQL database repair +program. It can clean up many of the orphaned record problems, and certain +other inconsistencies in the Bacula database. + +A typical cause of MySQL database problems is if your partition fills. In +such a case, you will need to create additional space on the partition or +free up some space then repair the database probably using {\bf myisamchk}. +Recently my root partition filled and the MySQL database was corrupted. +Simply running {\bf myisamchk -r} did not fix the problem. However, +the following script did the trick for me: + +\footnotesize +\begin{verbatim} +#!/bin/sh +for i in *.MYD ; do + mv $i x${i} + t=`echo $i | cut -f 1 -d '.' -` + mysql bacula <bacula.db +select * from sqlite_master where type='index' and tbl_name='File'; +\end{verbatim} +\normalsize + +If the indexes are not present, especially the JobId index, you can +create them with the following commands: + +\footnotesize +\begin{verbatim} +mysql bacula +CREATE INDEX file_jobid_idx on File (JobId); +CREATE INDEX file_jfp_idx on File (Job, FilenameId, PathId); +\end{verbatim} +\normalsize + + + +\label{CompactingPostgres} +\section{Compacting Your PostgreSQL Database} +\index[general]{Database!Compacting Your PostgreSQL } +\index[general]{Compacting Your PostgreSQL Database } + +Over time, as noted above, your database will tend to grow. I've noticed that +even though Bacula regularly prunes files, PostgreSQL has a {\bf VACUUM} +command that will compact your database for you. Alternatively you may want to +use the {\bf vacuumdb} command, which can be run from a cron job. + +All database programs have some means of writing the database out in ASCII +format and then reloading it. Doing so will re-create the database from +scratch producing a compacted result, so below, we show you how you can do +this for PostgreSQL. + +For a {\bf PostgreSQL} database, you could write the Bacula database as an +ASCII file (bacula.sql) then reload it by doing the following: + +\footnotesize +\begin{verbatim} +pg_dump -c bacula > bacula.sql +cat bacula.sql | psql bacula +rm -f bacula.sql +\end{verbatim} +\normalsize + +Depending on the size of your database, this will take more or less time and a +fair amount of disk space. For example, you can {\bf cd} to the location of +the Bacula database (typically /usr/local/pgsql/data or possible +/var/lib/pgsql/data) and check the size. + +There are certain PostgreSQL users who do not recommend the above +procedure. They have the following to say: +PostgreSQL does not +need to be dumped/restored to keep the database efficient. A normal +process of vacuuming will prevent the database from every getting too +large. If you want to fine-tweak the database storage, commands such +as VACUUM FULL, REINDEX, and CLUSTER exist specifically to keep you +from having to do a dump/restore. + +Finally, you might want to look at the PostgreSQL documentation on +this subject at +\elink{http://www.postgresql.org/docs/8.1/interactive/maintenance.html} +{http://www.postgresql.org/docs/8.1/interactive/maintenance.html}. + +\section{Compacting Your SQLite Database} +\index[general]{Compacting Your SQLite Database } +\index[general]{Database!Compacting Your SQLite } + +First please read the previous section that explains why it is necessary to +compress a database. SQLite version 2.8.4 and greater have the {\bf Vacuum} +command for compacting the database. + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo 'vacuum;' | sqlite bacula.db +\end{verbatim} +\normalsize + +As an alternative, you can use the following commands, adapted to your system: + + +\footnotesize +\begin{verbatim} +cd {\bf working-directory} +echo '.dump' | sqlite bacula.db > bacula.sql +rm -f bacula.db +sqlite bacula.db < bacula.sql +rm -f bacula.sql +\end{verbatim} +\normalsize + +Where {\bf working-directory} is the directory that you specified in the +Director's configuration file. Note, in the case of SQLite, it is necessary to +completely delete (rm) the old database before creating a new compressed +version. + +\section{Migrating from SQLite to MySQL} +\index[general]{MySQL!Migrating from SQLite to } +\index[general]{Migrating from SQLite to MySQL } + +You may begin using Bacula with SQLite then later find that you want to switch +to MySQL for any of a number of reasons: SQLite tends to use more disk than +MySQL; when the database is corrupted it is often more catastrophic than +with MySQL or PostgreSQL. +Several users have succeeded in converting from SQLite to MySQL by +exporting the MySQL data and then processing it with Perl scripts +prior to putting it into MySQL. This is, however, not a simple +process. + +\label{BackingUpBacula} +\section{Backing Up Your Bacula Database} +\index[general]{Backing Up Your Bacula Database } +\index[general]{Database!Backing Up Your Bacula } + +If ever the machine on which your Bacula database crashes, and you need to +restore from backup tapes, one of your first priorities will probably be to +recover the database. Although Bacula will happily backup your catalog +database if it is specified in the FileSet, this is not a very good way to do +it, because the database will be saved while Bacula is modifying it. Thus the +database may be in an instable state. Worse yet, you will backup the database +before all the Bacula updates have been applied. + +To resolve these problems, you need to backup the database after all the backup +jobs have been run. In addition, you will want to make a copy while Bacula is +not modifying it. To do so, you can use two scripts provided in the release +{\bf make\_catalog\_backup} and {\bf delete\_catalog\_backup}. These files +will be automatically generated along with all the other Bacula scripts. The +first script will make an ASCII copy of your Bacula database into {\bf +bacula.sql} in the working directory you specified in your configuration, and +the second will delete the {\bf bacula.sql} file. + +The basic sequence of events to make this work correctly is as follows: + +\begin{itemize} +\item Run all your nightly backups +\item After running your nightly backups, run a Catalog backup Job +\item The Catalog backup job must be scheduled after your last nightly backup + +\item You use {\bf RunBeforeJob} to create the ASCII backup file and {\bf + RunAfterJob} to clean up +\end{itemize} + +Assuming that you start all your nightly backup jobs at 1:05 am (and that they +run one after another), you can do the catalog backup with the following +additional Director configuration statements: + +\footnotesize +\begin{verbatim} +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Type = Backup + Client=rufus-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = DLTDrive + Messages = Standard + Pool = Default + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/home/kern/bacula/bin/make_catalog_backup" + RunAfterJob = "/home/kern/bacula/bin/delete_catalog_backup" + Write Bootstrap = "/home/kern/bacula/working/BackupCatalog.bsr" +} +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup + Run = Level=Full sun-sat at 1:10 +} +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include { + Options { + signature=MD5 + } + File = \lt{}working_directory\gt{}/bacula.sql + } +} +\end{verbatim} +\normalsize + +Be sure to write a bootstrap file as in the above example. However, it is preferable +to write or copy the bootstrap file to another computer. It will allow +you to quickly recover the database backup should that be necessary. If +you do not have a bootstrap file, it is still possible to recover your +database backup, but it will be more work and take longer. + + +\label{BackingUpBaculaSecurityConsiderations} +\section{Security considerations} +\index[general]{Backing Up Your Bacula Database - Security Considerations } +\index[general]{Database!Backing Up Your Bacula Database - Security Considerations } + +We provide make\_catalog\_backup as an example of what can be used to backup +your Bacula database. We expect you to take security precautions relevant +to your situation. make\_catalog\_backup is designed to take a password on +the command line. This is fine on machines with only trusted users. It is +not acceptable on machines without trusted users. Most database systems +provide a alternative method, which does not place the password on the +command line. + +The make\_catalog\_backup script contains some warnings about how to use it. Please +read those tips. + +To help you get started, we know PostgreSQL has a password file, +\elink{ +.pgpass}{http://www.postgresql.org/docs/8.2/static/libpq-pgpass.html}, and +we know MySQL has +\elink{ .my.cnf}{http://dev.mysql.com/doc/refman/4.1/en/password-security.html}. + +Only you can decide what is appropriate for your situation. We have provided +you with a starting point. We hope it helps. + + +\label{BackingUPOtherDBs} +\section{Backing Up Third Party Databases} +\index[general]{Backing Up Third Party Databases } +\index[general]{Databases!Backing Up Third Party } + +If you are running a database in production mode on your machine, Bacula will +happily backup the files, but if the database is in use while Bacula is +reading it, you may back it up in an unstable state. + +The best solution is to shutdown your database before backing it up, or use +some tool specific to your database to make a valid live copy perhaps by +dumping the database in ASCII format. I am not a database expert, so I cannot +provide you advice on how to do this, but if you are unsure about how to +backup your database, you might try visiting the Backup Central site, which +has been renamed Storage Mountain (www.backupcentral.com). In particular, +their +\elink{ Free Backup and Recovery +Software}{http://www.backupcentral.com/toc-free-backup-software.html} page has +links to scripts that show you how to shutdown and backup most major +databases. +\label{Size} + +\section{Database Size} +\index[general]{Size!Database } +\index[general]{Database Size } + +As mentioned above, if you do not do automatic pruning, your Catalog will grow +each time you run a Job. Normally, you should decide how long you want File +records to be maintained in the Catalog and set the {\bf File Retention} +period to that time. Then you can either wait and see how big your Catalog +gets or make a calculation assuming approximately 154 bytes for each File +saved and knowing the number of Files that are saved during each backup and +the number of Clients you backup. + +For example, suppose you do a backup of two systems, each with 100,000 files. +Suppose further that you do a Full backup weekly and an Incremental every day, +and that the Incremental backup typically saves 4,000 files. The size of your +database after a month can roughly be calculated as: + +\footnotesize +\begin{verbatim} + Size = 154 * No. Systems * (100,000 * 4 + 10,000 * 26) +\end{verbatim} +\normalsize + +where we have assumed four weeks in a month and 26 incremental backups per month. +This would give the following: + +\footnotesize +\begin{verbatim} + Size = 154 * 2 * (100,000 * 4 + 10,000 * 26) +or + Size = 308 * (400,000 + 260,000) +or + Size = 203,280,000 bytes +\end{verbatim} +\normalsize + +So for the above two systems, we should expect to have a database size of +approximately 200 Megabytes. Of course, this will vary according to how many +files are actually backed up. + +Below are some statistics for a MySQL database containing Job records for five +Clients beginning September 2001 through May 2002 (8.5 months) and File +records for the last 80 days. (Older File records have been pruned). For these +systems, only the user files and system files that change are backed up. The +core part of the system is assumed to be easily reloaded from the Red Hat rpms. + + +In the list below, the files (corresponding to Bacula Tables) with the +extension .MYD contain the data records whereas files with the extension .MYI +contain indexes. + +You will note that the File records (containing the file attributes) make up +the large bulk of the number of records as well as the space used (459 Mega +Bytes including the indexes). As a consequence, the most important Retention +period will be the {\bf File Retention} period. A quick calculation shows that +for each File that is saved, the database grows by approximately 150 bytes. + +\footnotesize +\begin{verbatim} + Size in + Bytes Records File + ============ ========= =========== + 168 5 Client.MYD + 3,072 Client.MYI + 344,394,684 3,080,191 File.MYD + 115,280,896 File.MYI + 2,590,316 106,902 Filename.MYD + 3,026,944 Filename.MYI + 184 4 FileSet.MYD + 2,048 FileSet.MYI + 49,062 1,326 JobMedia.MYD + 30,720 JobMedia.MYI + 141,752 1,378 Job.MYD + 13,312 Job.MYI + 1,004 11 Media.MYD + 3,072 Media.MYI + 1,299,512 22,233 Path.MYD + 581,632 Path.MYI + 36 1 Pool.MYD + 3,072 Pool.MYI + 5 1 Version.MYD + 1,024 Version.MYI +\end{verbatim} +\normalsize + +This database has a total size of approximately 450 Megabytes. + +If we were using SQLite, the determination of the total database size would be +much easier since it is a single file, but we would have less insight to the +size of the individual tables as we have in this case. + +Note, SQLite databases may be as much as 50\% larger than MySQL databases due +to the fact that all data is stored as ASCII strings. That is even binary +integers are stored as ASCII strings, and this seems to increase the space +needed. diff --git a/docs/manuals/en/concepts/check_tex.pl b/docs/manuals/en/concepts/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/concepts/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/concepts/concepts.tex b/docs/manuals/en/concepts/concepts.tex new file mode 100644 index 00000000..c4f4b08c --- /dev/null +++ b/docs/manuals/en/concepts/concepts.tex @@ -0,0 +1,115 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{dir}{ddx}{dnd}{Director Index} +\newindex{fd}{fdx}{fnd}{File Daemon Index} +\newindex{sd}{sdx}{snd}{Storage Daemon Index} +\newindex{console}{cdx}{cnd}{Console Index} +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Concepts and Overview Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\pagenumbering{roman} +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\markboth{Bacula Manual}{} +\pagenumbering{arabic} +\include{general} +\include{state} +\include{requirements} +\include{supportedoses} +\include{supporteddrives} +\include{tutorial} +\include{restore} +\include{recycling} +\include{disk} +\include{dvd} +\include{pools} +\include{migration} +\include{strategies} +\include{autochangers} +\include{supportedchangers} +\include{spooling} +\include{python} +\include{ansi-labels} +\include{win32} +\include{rescue} +\include{tls} +\include{dataencryption} +\include{verify} +\include{bootstrap} +\include{license} +\include{fdl} +\include{gpl} +\include{lesser} +\include{projects} +\include{thanks} +\include{bugs} +\include{vars} +\include{stunnel} + +% pull in the index +\clearpage +\printindex[general] +\printindex[dir] +\printindex[fd] +\printindex[sd] +\printindex[console] + +\end{document} diff --git a/docs/manuals/en/concepts/dataencryption.tex b/docs/manuals/en/concepts/dataencryption.tex new file mode 100644 index 00000000..34b050fe --- /dev/null +++ b/docs/manuals/en/concepts/dataencryption.tex @@ -0,0 +1,195 @@ + +\chapter{Data Encryption} +\label{DataEncryption} +\index[general]{Data Encryption} +\index[general]{Encryption!Data} +\index[general]{Data Encryption} + +Bacula permits file data encryption and signing within the File Daemon (or +Client) prior to sending data to the Storage Daemon. Upon restoration, +file signatures are validated and any mismatches are reported. At no time +does the Director or the Storage Daemon have access to unencrypted file +contents. + + +It is very important to specify what this implementation does NOT +do: +\begin{itemize} +\item There is one important restore problem to be aware of, namely, it's + possible for the director to restore new keys or a Bacula configuration + file to the client, and thus force later backups to be made with a + compromised key and/or with no encryption at all. You can avoid this by + not not changing the location of the keys in your Bacula File daemon + configuration file, and not changing your File daemon keys. If you do + change either one, you must ensure that no restore is done that restores + the old configuration or the old keys. In general, the worst effect of + this will be that you can no longer connect the File daemon. + +\item The implementation does not encrypt file metadata such as file path + names, permissions, and ownership. Extended attributes are also currently + not encrypted. However, Mac OS X resource forks are encrypted. +\end{itemize} + +Encryption and signing are implemented using RSA private keys coupled with +self-signed x509 public certificates. This is also sometimes known as PKI +or Public Key Infrastructure. + +Each File Daemon should be given its own unique private/public key pair. +In addition to this key pair, any number of "Master Keys" may be specified +-- these are key pairs that may be used to decrypt any backups should the +File Daemon key be lost. Only the Master Key's public certificate should +be made available to the File Daemon. Under no circumstances should the +Master Private Key be shared or stored on the Client machine. + +The Master Keys should be backed up to a secure location, such as a CD +placed in a in a fire-proof safe or bank safety deposit box. The Master +Keys should never be kept on the same machine as the Storage Daemon or +Director if you are worried about an unauthorized party compromising either +machine and accessing your encrypted backups. + +While less critical than the Master Keys, File Daemon Keys are also a prime +candidate for off-site backups; burn the key pair to a CD and send the CD +home with the owner of the machine. + +NOTE!!! If you lose your encryption keys, backups will be unrecoverable. +{\bf ALWAYS} store a copy of your master keys in a secure, off-site location. + +The basic algorithm used for each backup session (Job) is: +\begin{enumerate} +\item The File daemon generates a session key. +\item The FD encrypts that session key via PKE for all recipients (the file +daemon, any master keys). +\item The FD uses that session key to perform symmetric encryption on the data. +\end{enumerate} + + +\section{Building Bacula with Encryption Support} +\index[general]{Building Bacula with Encryption Support} + +The configuration option for enabling OpenSSL encryption support has not changed +since Bacula 1.38. To build Bacula with encryption support, you will need +the OpenSSL libraries and headers installed. When configuring Bacula, use: + +\begin{verbatim} + ./configure --with-openssl ... +\end{verbatim} + +\section{Encryption Technical Details} +\index[general]{Encryption Technical Details} + +The implementation uses 128bit AES-CBC, with RSA encrypted symmetric +session keys. The RSA key is user supplied. +If you are running OpenSSL 0.9.8 or later, the signed file hash uses +SHA-256 -- otherwise, SHA-1 is used. + +End-user configuration settings for the algorithms are not currently +exposed -- only the algorithms listed above are used. However, the +data written to Volume supports arbitrary symmetric, asymmetric, and +digest algorithms for future extensibility, and the back-end +implementation currently supports: + +\begin{verbatim} +Symmetric Encryption: + - 128, 192, and 256-bit AES-CBC + - Blowfish-CBC + +Asymmetric Encryption (used to encrypt symmetric session keys): + - RSA + +Digest Algorithms: + - MD5 + - SHA1 + - SHA256 + - SHA512 +\end{verbatim} + +The various algorithms are exposed via an entirely re-usable, +OpenSSL-agnostic API (ie, it is possible to drop in a new encryption +backend). The Volume format is DER-encoded ASN.1, modeled after the +Cryptographic Message Syntax from RFC 3852. Unfortunately, using CMS +directly was not possible, as at the time of coding a free software +streaming DER decoder/encoder was not available. + + +\section{Decrypting with a Master Key} +\index[general]{Decrypting with a Master Key} + +It is preferable to retain a secure, non-encrypted copy of the +client's own encryption keypair. However, should you lose the +client's keypair, recovery with the master keypair is possible. + +You must: +\begin{itemize} +\item Concatenate the master private and public key into a single + keypair file, ie: + cat master.key master.cert >master.keypair + +\item 2) Set the PKI Keypair statement in your bacula configuration file: + +\begin{verbatim} + PKI Keypair = master.keypair +\end{verbatim} + +\item Start the restore. The master keypair will be used to decrypt + the file data. + +\end{itemize} + + +\section{Generating Private/Public Encryption Keys} +\index[general]{Generating Private/Public Encryption Keypairs} + +Generate a Master Key Pair with: + +\footnotesize +\begin{verbatim} + openssl genrsa -out master.key 2048 + openssl req -new -key master.key -x509 -out master.cert +\end{verbatim} +\normalsize + +Generate a File Daemon Key Pair for each FD: + +\footnotesize +\begin{verbatim} + openssl genrsa -out fd-example.key 2048 + openssl req -new -key fd-example.key -x509 -out fd-example.cert + cat fd-example.key fd-example.cert >fd-example.pem +\end{verbatim} +\normalsize + +Note, there seems to be a lot of confusion around the file extensions given +to these keys. For example, a .pem file can contain all the following: +private keys (RSA and DSA), public keys (RSA and DSA) and (x509) certificates. +It is the default format for OpenSSL. It stores data Base64 encoded DER format, +surrounded by ASCII headers, so is suitable for text mode transfers between +systems. A .pem file may contain any number of keys either public or +private. We use it in cases where there is both a public and a private +key. + +Typically, above we have used the .cert extension to refer to X509 +certificate encoding that contains only a single public key. + + +\section{Example Data Encryption Configuration} +\index[general]{Example!File Daemon Configuration File} +\index[general]{Example!Data Encryption Configuration File} +\index[general]{Example Data Encryption Configuration} + +{\bf bacula-fd.conf} +\footnotesize +\begin{verbatim} +FileDaemon { + Name = example-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /var/bacula/working + Pid Directory = /var/run + Maximum Concurrent Jobs = 20 + + PKI Signatures = Yes # Enable Data Signing + PKI Encryption = Yes # Enable Data Encryption + PKI Keypair = "/etc/bacula/fd-example.pem" # Public and Private Keys + PKI Master Key = "/etc/bacula/master.cert" # ONLY the Public Key +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/concepts/disk.tex b/docs/manuals/en/concepts/disk.tex new file mode 100644 index 00000000..3f38be9f --- /dev/null +++ b/docs/manuals/en/concepts/disk.tex @@ -0,0 +1,789 @@ +%% +%% + +\chapter{Basic Volume Management} +\label{DiskChapter} +\index[general]{Basic Volume Management} +\index[general]{Management!Basic Volume} +\index[general]{Disk Volumes} + +This chapter presents most all the features needed to do Volume management. +Most of the concepts apply equally well to both tape and disk Volumes. +However, the chapter was originally written to explain backing up to disk, so +you will see it is slanted in that direction, but all the directives +presented here apply equally well whether your volume is disk or tape. + +If you have a lot of hard disk storage or you absolutely must have your +backups run within a small time window, you may want to direct Bacula to +backup to disk Volumes rather than tape Volumes. This chapter is intended to +give you some of the options that are available to you so that you can manage +either disk or tape volumes. + +\label{Concepts} +\section{Key Concepts and Resource Records} +\index[general]{Key Concepts and Resource Records } +\index[general]{Records!Key Concepts and Resource } + +Getting Bacula to write to disk rather than tape in the simplest case is +rather easy. In the Storage daemon's configuration file, you simply define an +{\bf Archive Device} to be a directory. For example, if you want your disk +backups to go into the directory {\bf /home/bacula/backups}, you could use the +following: + +\footnotesize +\begin{verbatim} +Device { + Name = FileBackup + Media Type = File + Archive Device = /home/bacula/backups + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +Assuming you have the appropriate {\bf Storage} resource in your Director's +configuration file that references the above Device resource, + +\footnotesize +\begin{verbatim} +Storage { + Name = FileStorage + Address = ... + Password = ... + Device = FileBackup + Media Type = File +} +\end{verbatim} +\normalsize + +Bacula will then write the archive to the file {\bf +/home/bacula/backups/\lt{}volume-name\gt{}} where \lt{}volume-name\gt{} is the +volume name of a Volume defined in the Pool. For example, if you have labeled +a Volume named {\bf Vol001}, Bacula will write to the file {\bf +/home/bacula/backups/Vol001}. Although you can later move the archive file to +another directory, you should not rename it or it will become unreadable by +Bacula. This is because each archive has the filename as part of the internal +label, and the internal label must agree with the system filename before +Bacula will use it. + +Although this is quite simple, there are a number of problems. The first is +that unless you specify otherwise, Bacula will always write to the same volume +until you run out of disk space. This problem is addressed below. + +In addition, if you want to use concurrent jobs that write to several +different volumes at the same time, you will need to understand a number +of other details. An example of such a configuration is given +at the end of this chapter under \ilink{Concurrent Disk +Jobs}{ConcurrentDiskJobs}. + +\subsection{Pool Options to Limit the Volume Usage} +\index[general]{Usage!Pool Options to Limit the Volume } +\index[general]{Pool Options to Limit the Volume Usage } + +Some of the options you have, all of which are specified in the Pool record, +are: + +\begin{itemize} +\item To write each Volume only once (i.e. one Job per Volume or file in this + case), use: + +{\bf UseVolumeOnce = yes}. + +\item To write nnn Jobs to each Volume, use: + + {\bf Maximum Volume Jobs = nnn}. + +\item To limit the maximum size of each Volume, use: + + {\bf Maximum Volume Bytes = mmmm}. + + Note, if you use disk volumes, with all versions up to and including + 1.39.28, you should probably limit the Volume size to some reasonable + value such as say 5GB. This is because during a restore, Bacula is + currently unable to seek to the proper place in a disk volume to restore + a file, which means that it must read all records up to where the + restore begins. If your Volumes are 50GB, reading half or more of the + volume could take quite a bit of time. Also, if you ever have a partial + hard disk failure, you are more likely to be able to recover more data + if they are in smaller Volumes. + +\item To limit the use time (i.e. write the Volume for a maximum of five days), + use: + +{\bf Volume Use Duration = ttt}. +\end{itemize} + +Note that although you probably would not want to limit the number of bytes on +a tape as you would on a disk Volume, the other options can be very useful in +limiting the time Bacula will use a particular Volume (be it tape or disk). +For example, the above directives can allow you to ensure that you rotate +through a set of daily Volumes if you wish. + +As mentioned above, each of those directives is specified in the Pool or +Pools that you use for your Volumes. In the case of {\bf Maximum Volume Job}, +{\bf Maximum Volume Bytes}, and {\bf Volume Use Duration}, you can actually +specify the desired value on a Volume by Volume basis. The value specified in +the Pool record becomes the default when labeling new Volumes. Once a Volume +has been created, it gets its own copy of the Pool defaults, and subsequently +changing the Pool will have no effect on existing Volumes. You can either +manually change the Volume values, or refresh them from the Pool defaults using +the {\bf update volume} command in the Console. As an example +of the use of one of the above, suppose your Pool resource contains: + +\footnotesize +\begin{verbatim} +Pool { + Name = File + Pool Type = Backup + Volume Use Duration = 23h +} +\end{verbatim} +\normalsize + +then if you run a backup once a day (every 24 hours), Bacula will use a new +Volume for each backup, because each Volume it writes can only be used for 23 hours +after the first write. Note, setting the use duration to 23 hours is not a very +good solution for tapes unless you have someone on-site during the weekends, +because Bacula will want a new Volume and no one will be present to mount it, +so no weekend backups will be done until Monday morning. + +\label{AutomaticLabeling} +\subsection{Automatic Volume Labeling} +\index[general]{Automatic Volume Labeling } +\index[general]{Labeling!Automatic Volume } + +Use of the above records brings up another problem -- that of labeling your +Volumes. For automated disk backup, you can either manually label each of your +Volumes, or you can have Bacula automatically label new Volumes when they are +needed. While, the automatic Volume labeling in version 1.30 and prior is a +bit simplistic, but it does allow for automation, the features added in +version 1.31 permit automatic creation of a wide variety of labels including +information from environment variables and special Bacula Counter variables. +In version 1.37 and later, it is probably much better to use Python scripting +and the NewVolume event since generating Volume labels in a Python script is +much easier than trying to figure out Counter variables. See the +\ilink{Python Scripting}{PythonChapter} chapter of this manual for more +details. + +Please note that automatic Volume labeling can also be used with tapes, but +it is not nearly so practical since the tapes must be pre-mounted. This +requires some user interaction. Automatic labeling from templates does NOT +work with autochangers since Bacula will not access unknown slots. There +are several methods of labeling all volumes in an autochanger magazine. +For more information on this, please see the \ilink{ +Autochanger}{AutochangersChapter} chapter of this manual. + +Automatic Volume labeling is enabled by making a change to both the Pool +resource (Director) and to the Device resource (Storage daemon) shown above. +In the case of the Pool resource, you must provide Bacula with a label format +that it will use to create new names. In the simplest form, the label format +is simply the Volume name, to which Bacula will append a four digit number. +This number starts at 0001 and is incremented for each Volume the catalog +contains. Thus if you modify your Pool resource to be: + +\footnotesize +\begin{verbatim} +Pool { + Name = File + Pool Type = Backup + Volume Use Duration = 23h + LabelFormat = "Vol" +} +\end{verbatim} +\normalsize + +Bacula will create Volume names Vol0001, Vol0002, and so on when new Volumes +are needed. Much more complex and elaborate labels can be created using +variable expansion defined in the +\ilink{Variable Expansion}{VarsChapter} chapter of this manual. + +The second change that is necessary to make automatic labeling work is to give +the Storage daemon permission to automatically label Volumes. Do so by adding +{\bf LabelMedia = yes} to the Device resource as follows: + +\footnotesize +\begin{verbatim} +Device { + Name = File + Media Type = File + Archive Device = /home/bacula/backups + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; + LabelMedia = yes +} +\end{verbatim} +\normalsize + +You can find more details of the {\bf Label Format} Pool record in +\ilink{Label Format}{Label} description of the Pool resource +records. + +\label{Recycling1} +\subsection{Restricting the Number of Volumes and Recycling} +\index[general]{Recycling!Restricting the Number of Volumes and Recycling} +\index[general]{Restricting the Number of Volumes and Recycling} + +Automatic labeling discussed above brings up the problem of Volume management. +With the above scheme, a new Volume will be created every day. If you have not +specified Retention periods, your Catalog will continue to fill keeping track +of all the files Bacula has backed up, and this procedure will create one new +archive file (Volume) every day. + +The tools Bacula gives you to help automatically manage these problems are the +following: + +\begin{enumerate} +\item Catalog file record retention periods, the + \ilink{File Retention = ttt}{FileRetention} record in the Client + resource. +\item Catalog job record retention periods, the + \ilink{Job Retention = ttt}{JobRetention} record in the Client + resource. +\item The + \ilink{ AutoPrune = yes}{AutoPrune} record in the Client resource + to permit application of the above two retention periods. +\item The + \ilink{ Volume Retention = ttt}{VolRetention} record in the Pool + resource. +\item The + \ilink{ AutoPrune = yes}{PoolAutoPrune} record in the Pool + resource to permit application of the Volume retention period. +\item The + \ilink{ Recycle = yes}{PoolRecycle} record in the Pool resource + to permit automatic recycling of Volumes whose Volume retention period has + expired. +\item The + \ilink{ Recycle Oldest Volume = yes}{RecycleOldest} record in the + Pool resource tells Bacula to Prune the oldest volume in the Pool, and if all + files were pruned to recycle this volume and use it. +\item The + \ilink{ Recycle Current Volume = yes}{RecycleCurrent} record in + the Pool resource tells Bacula to Prune the currently mounted volume in the + Pool, and if all files were pruned to recycle this volume and use it. +\item The + \ilink{ Purge Oldest Volume = yes}{PurgeOldest} record in the + Pool resource permits a forced recycling of the oldest Volume when a new one + is needed. {\bf N.B. This record ignores retention periods! We highly + recommend not to use this record, but instead use Recycle Oldest Volume} +\item The + \ilink{ Maximum Volumes = nnn}{MaxVolumes} record in the Pool + resource to limit the number of Volumes that can be created. +\end{enumerate} + +The first three records (File Retention, Job Retention, and AutoPrune) +determine the amount of time that Job and File records will remain in your +Catalog, and they are discussed in detail in the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +Volume Retention, AutoPrune, and Recycle determine how long Bacula will keep +your Volumes before reusing them, and they are also discussed in detail in the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +The Maximum Volumes record can also be used in conjunction with the Volume +Retention period to limit the total number of archive Volumes (files) that +Bacula will create. By setting an appropriate Volume Retention period, a +Volume will be purged just before it is needed and thus Bacula can cycle +through a fixed set of Volumes. Cycling through a fixed set of Volumes can +also be done by setting {\bf Recycle Oldest Volume = yes} or {\bf Recycle +Current Volume = yes}. In this case, when Bacula needs a new Volume, it will +prune the specified volume. + +\label{ConcurrentDiskJobs} +\section{Concurrent Disk Jobs} +\index[general]{Concurrent Disk Jobs} +Above, we discussed how you could have a single device named {\bf +FileBackup} that writes to volumes in {\bf /home/bacula/backups}. +You can, in fact, run multiple concurrent jobs using the +Storage definition given with this example, and all the jobs will +simultaneously write into the Volume that is being written. + +Now suppose you want to use multiple Pools, which means multiple +Volumes, or suppose you want each client to have its own Volume +and perhaps its own directory such as {\bf /home/bacula/client1} +and {\bf /home/bacula/client2} ... With the single Storage and Device +definition above, neither of these two is possible. Why? Because +Bacula disk storage follows the same rules as tape devices. Only +one Volume can be mounted on any Device at any time. If you want +to simultaneously write multiple Volumes, you will need multiple +Device resources in your bacula-sd.conf file, and thus multiple +Storage resources in your bacula-dir.conf. + +OK, so now you should understand that you need multiple Device definitions +in the case of different directories or different Pools, but you also +need to know that the catalog data that Bacula keeps contains only +the Media Type and not the specific storage device. This permits a tape +for example to be re-read on any compatible tape drive. The compatibility +being determined by the Media Type. The same applies to disk storage. +Since a volume that is written by a Device in say directory {\bf +/home/bacula/backups} cannot be read by a Device with an Archive Device +definition of {\bf /home/bacula/client1}, you will not be able to +restore all your files if you give both those devices +{\bf Media Type = File}. During the restore, Bacula will simply choose +the first available device, which may not be the correct one. If this +is confusing, just remember that the Directory has only the Media Type +and the Volume name. It does not know the {\bf Archive Device} (or the +full path) that is specified in the Storage daemon. Thus you must +explicitly tie your Volumes to the correct Device by using the Media Type. + +The example shown below shows a case where there are two clients, each +using its own Pool and storing their Volumes in different directories. + + +\label{Example2} +\section{An Example} +\index[general]{Example } + +The following example is not very practical, but can be used to demonstrate +the proof of concept in a relatively short period of time. The example +consists of a two clients that are backed up to a set of 12 archive files +(Volumes) for each client into different directories on the Storage +machine. Each Volume is used (written) only once, and there are four Full +saves done every hour (so the whole thing cycles around after three hours). + +What is key here is that each physical device on the Storage daemon +has a different Media Type. This allows the Director to choose the +correct device for restores ... + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { + Name = my-dir + QueryFile = "~/bacula/bin/query.sql" + PidDirectory = "~/bacula/working" + WorkingDirectory = "~/bacula/working" + Password = dir_password +} +Schedule { + Name = "FourPerHour" + Run = Level=Full hourly at 0:05 + Run = Level=Full hourly at 0:20 + Run = Level=Full hourly at 0:35 + Run = Level=Full hourly at 0:50 +} +Job { + Name = "RecycleExample" + Type = Backup + Level = Full + Client = Rufus + FileSet= "Example FileSet" + Messages = Standard + Storage = FileStorage + Pool = Recycle + Schedule = FourPerHour +} + +Job { + Name = "RecycleExample2" + Type = Backup + Level = Full + Client = Roxie + FileSet= "Example FileSet" + Messages = Standard + Storage = FileStorage1 + Pool = Recycle1 + Schedule = FourPerHour +} + +FileSet { + Name = "Example FileSet" + Include = compression=GZIP signature=SHA1 { + /home/kern/bacula/bin + } +} +Client { + Name = Rufus + Address = rufus + Catalog = BackupDB + Password = client_password +} + +Client { + Name = Roxie + Address = roxie + Catalog = BackupDB + Password = client1_password +} + +Storage { + Name = FileStorage + Address = rufus + Password = local_storage_password + Device = RecycleDir + Media Type = File +} + +Storage { + Name = FileStorage1 + Address = rufus + Password = local_storage_password + Device = RecycleDir1 + Media Type = File1 +} + +Catalog { + Name = BackupDB + dbname = bacula; user = bacula; password = "" +} +Messages { + Name = Standard + ... +} +Pool { + Name = Recycle + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Recycle-" + AutoPrune = yes + VolumeRetention = 2h + Maximum Volumes = 12 + Recycle = yes +} + +Pool { + Name = Recycle1 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Recycle1-" + AutoPrune = yes + VolumeRetention = 2h + Maximum Volumes = 12 + Recycle = yes +} + +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { + Name = my-sd + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" + MaximumConcurrentJobs = 10 +} +Director { + Name = my-dir + Password = local_storage_password +} +Device { + Name = RecycleDir + Media Type = File + Archive Device = /home/bacula/backups + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Device { + Name = RecycleDir1 + Media Type = File1 + Archive Device = /home/bacula/backups1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Messages { + Name = Standard + director = my-dir = all +} +\end{verbatim} +\normalsize + +With a little bit of work, you can change the above example into a weekly or +monthly cycle (take care about the amount of archive disk space used). + +\label{MultipleDisks} +\section{Backing up to Multiple Disks} +\index[general]{Disks!Backing up to Multiple } +\index[general]{Backing up to Multiple Disks } + +Bacula can, of course, use multiple disks, but in general, each disk must be a +separate Device specification in the Storage daemon's conf file, and you must +then select what clients to backup to each disk. You will also want to +give each Device specification a different Media Type so that during +a restore, Bacula will be able to find the appropriate drive. + +The situation is a bit more complicated if you want to treat two different +physical disk drives (or partitions) logically as a single drive, which +Bacula does not directly support. However, it is possible to back up your +data to multiple disks as if they were a single drive by linking the +Volumes from the first disk to the second disk. + +For example, assume that you have two disks named {\bf /disk1} and {\bf +/disk2}. If you then create a standard Storage daemon Device resource for +backing up to the first disk, it will look like the following: + +\footnotesize +\begin{verbatim} +Device { + Name = client1 + Media Type = File + Archive Device = /disk1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +Since there is no way to get the above Device resource to reference both {\bf +/disk1} and {\bf /disk2} we do it by pre-creating Volumes on /disk2 with the +following: + +\footnotesize +\begin{verbatim} +ln -s /disk2/Disk2-vol001 /disk1/Disk2-vol001 +ln -s /disk2/Disk2-vol002 /disk1/Disk2-vol002 +ln -s /disk2/Disk2-vol003 /disk1/Disk2-vol003 +... +\end{verbatim} +\normalsize + +At this point, you can label the Volumes as Volume {\bf Disk2-vol001}, {\bf +Disk2-vol002}, ... and Bacula will use them as if they were on /disk1 but +actually write the data to /disk2. The only minor inconvenience with this +method is that you must explicitly name the disks and cannot use automatic +labeling unless you arrange to have the labels exactly match the links you +have created. + +An important thing to know is that Bacula treats disks like tape drives +as much as it can. This means that you can only have a single Volume +mounted at one time on a disk as defined in your Device resource in +the Storage daemon's conf file. You can have multiple concurrent +jobs running that all write to the one Volume that is being used, but +if you want to have multiple concurrent jobs that are writing to +separate disks drives (or partitions), you will need to define +separate Device resources for each one, exactly as you would do for +two different tape drives. There is one fundamental difference, however. +The Volumes that you create on the two drives cannot be easily exchanged +as they can for a tape drive, because they are physically resident (already +mounted in a sense) on the particular drive. As a consequence, you will +probably want to give them different Media Types so that Bacula can +distinguish what Device resource to use during a restore. +An example would be the following: + +\footnotesize +\begin{verbatim} +Device { + Name = Disk1 + Media Type = File1 + Archive Device = /disk1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} + +Device { + Name = Disk2 + Media Type = File2 + Archive Device = /disk2 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +With the above device definitions, you can run two concurrent +jobs each writing at the same time, one to {\bf /disk2} and the +other to {\bf /disk2}. The fact that you have given them different +Media Types will allow Bacula to quickly choose the correct +Storage resource in the Director when doing a restore. + +\label{MultipleClients} +\section{Considerations for Multiple Clients} +\index[general]{Clients!Considerations for Multiple } +\index[general]{Multiple Clients} + +If we take the above example and add a second Client, here are a few +considerations: + +\begin{itemize} +\item Although the second client can write to the same set of Volumes, you + will probably want to write to a different set. +\item You can write to a different set of Volumes by defining a second Pool, + which has a different name and a different {\bf LabelFormat}. +\item If you wish the Volumes for the second client to go into a different + directory (perhaps even on a different filesystem to spread the load), you + would do so by defining a second Device resource in the Storage daemon. The +{\bf Name} must be different, and the {\bf Archive Device} could be +different. To ensure that Volumes are never mixed from one pool to another, +you might also define a different MediaType (e.g. {\bf File1}). +\end{itemize} + +In this example, we have two clients, each with a different Pool and a +different number of archive files retained. They also write to different +directories with different Volume labeling. + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { + Name = my-dir + QueryFile = "~/bacula/bin/query.sql" + PidDirectory = "~/bacula/working" + WorkingDirectory = "~/bacula/working" + Password = dir_password +} +# Basic weekly schedule +Schedule { + Name = "WeeklySchedule" + Run = Level=Full fri at 1:30 + Run = Level=Incremental sat-thu at 1:30 +} +FileSet { + Name = "Example FileSet" + Include = compression=GZIP signature=SHA1 { + /home/kern/bacula/bin + } +} +Job { + Name = "Backup-client1" + Type = Backup + Level = Full + Client = client1 + FileSet= "Example FileSet" + Messages = Standard + Storage = File1 + Pool = client1 + Schedule = "WeeklySchedule" +} +Job { + Name = "Backup-client2" + Type = Backup + Level = Full + Client = client2 + FileSet= "Example FileSet" + Messages = Standard + Storage = File2 + Pool = client2 + Schedule = "WeeklySchedule" +} +Client { + Name = client1 + Address = client1 + Catalog = BackupDB + Password = client1_password + File Retention = 7d +} +Client { + Name = client2 + Address = client2 + Catalog = BackupDB + Password = client2_password +} +# Two Storage definitions with different Media Types +# permits different directories +Storage { + Name = File1 + Address = rufus + Password = local_storage_password + Device = client1 + Media Type = File1 +} +Storage { + Name = File2 + Address = rufus + Password = local_storage_password + Device = client2 + Media Type = File2 +} +Catalog { + Name = BackupDB + dbname = bacula; user = bacula; password = "" +} +Messages { + Name = Standard + ... +} +# Two pools permits different cycling periods and Volume names +# Cycle through 15 Volumes (two weeks) +Pool { + Name = client1 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Client1-" + AutoPrune = yes + VolumeRetention = 13d + Maximum Volumes = 15 + Recycle = yes +} +# Cycle through 8 Volumes (1 week) +Pool { + Name = client2 + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "Client2-" + AutoPrune = yes + VolumeRetention = 6d + Maximum Volumes = 8 + Recycle = yes +} +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { + Name = my-sd + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" + MaximumConcurrentJobs = 10 +} +Director { + Name = my-dir + Password = local_storage_password +} +# Archive directory for Client1 +Device { + Name = client1 + Media Type = File1 + Archive Device = /home/bacula/client1 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +# Archive directory for Client2 +Device { + Name = client2 + Media Type = File2 + Archive Device = /home/bacula/client2 + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +Messages { + Name = Standard + director = my-dir = all +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/concepts/do_echo b/docs/manuals/en/concepts/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/en/concepts/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/en/concepts/dvd.tex b/docs/manuals/en/concepts/dvd.tex new file mode 100644 index 00000000..f11e70d6 --- /dev/null +++ b/docs/manuals/en/concepts/dvd.tex @@ -0,0 +1,329 @@ +%% +%% + +\chapter{DVD Volumes} +\label{_DVDChapterStart} +\index[general]{DVD Volumes} +\index[general]{Writing DVDs} +\index[general]{DVD Writing} +\index[general]{Volumes!DVD} + +Bacula allows you to specify that you want to write to DVD. However, +this feature is implemented only in version 1.37 or later. +You may in fact write to DVD+RW, DVD+R, DVD-R, or DVD-RW +media. The actual process used by Bacula is to first write +the image to a spool directory, then when the Volume reaches +a certain size or, at your option, at the end of a Job, Bacula +will transfer the image from the spool directory to the +DVD. The actual work of transferring the image is done +by a script {\bf dvd-handler}, and the heart of that +script is a program called {\bf growisofs} which allows +creating or adding to a DVD ISO filesystem. + +You must have {\bf dvd+rw-tools} loaded on your system for DVD writing to +work. Please note that the original {\bf dvd+rw-tools} package does {\bf +NOT} work with Bacula. You must apply a patch which can be found in the +{\bf patches} directory of Bacula sources with the name +{\bf dvd+rw-tools-5.21.4.10.8.bacula.patch} for version 5.21 of the tools, +or patch {bf dvd+rw-tools-6.1.bacula.patch} if you have version 6.1 +on your system. Unfortunately, this requires you to build the dvd\_rw-tools +from source. + +Note, some Linux distros such as Debian dvd+rw-tools-7.0-4 package already +have the patch applied, so please check. + +The fact that Bacula cannot use the OS to write directly +to the DVD makes the whole process a bit more error prone than +writing to a disk or a tape, but nevertheless, it does work if you +use some care to set it up properly. However, at the current time +(version 1.39.30 -- 12 December 2006) we still consider this code to be +BETA quality. As a consequence, please do careful testing before relying +on DVD backups in production. + +The remainder of this chapter explains the various directives that you can +use to control the DVD writing. + +\label{DVDdirectives} +\section{DVD Specific SD Directives} +\index[general]{Directives!DVD} +\index[general]{DVD Specific SD Directives } + +The following directives are added to the Storage daemon's +Device resource. + +\begin{description} + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount } + You must set this directive to {\bf yes} for DVD-writers, and to {\bf no} for + all other devices (tapes/files). This directive indicates if the device + requires to be mounted using the {\bf Mount Command}. + To be able to write a DVD, the following directives must also be + defined: {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount Command} and + {\bf Write Part Command}. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + Directory where the device can be mounted. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + Command that must be executed to mount the device. Although the + device is written directly, the mount command is necessary in + order to determine the free space left on the DVD. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +However, if you have defined a mount point in /etc/fstab, you might be +able to use a mount command such as: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount /media/dvd" +\end{verbatim} +\normalsize + + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + Command that must be executed to unmount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +\item [Write Part Command = {\it name-string}] + \index[sd]{Write Part Command } + Command that must be executed to write a part to the device. Before the + command is executed, \%a is replaced with the Archive Device, \%m with the + Mount Point, \%e is replaced with 1 if we are writing the first part, + and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Write Part Command = "/path/dvd-handler %a write %e %v" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + +\item [Free Space Command = {\it name-string}] + \index[sd]{Free Space Command } + Command that must be executed to check how much free space is left on the + device. Before the command is executed,\%a is replaced with the Archive + Device. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Free Space Command = "/path/dvd-handler %a free" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-freespace is the Bacula supplied script file. + If you want to specify your own command, please look at the code in + dvd-handler to see what output Bacula expects from this command. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + If you do not set it, Bacula will expect there is always free space on the + device. + +\end{description} + +In addition to the directives specified above, you must also +specify the other standard Device resource directives. Please see the +sample DVD Device resource in the default bacula-sd.conf file. Be sure +to specify the raw device name for {\bf Archive Device}. It should +be a name such as {\bf /dev/cdrom} or {\bf /media/cdrecorder} or +{\bf /dev/dvd} depending on your system. It will not be a name such +as {\bf /mnt/cdrom}. + +Finally, for {\bf growisofs} to work, it must be able to lock +a certain amount of memory in RAM. If you have restrictions on +this function, you may have failures. Under {\bf bash}, you can +set this with the following command: + +\footnotesize +\begin{verbatim} +ulimit -l unlimited +\end{verbatim} +\normalsize + +\section{Edit Codes for DVD Directives} +\index[general]{Directives!DVD Edit Codes} +\index[general]{Edit Codes for DVD Directives } + +Before submitting the {\bf Mount Command}, {\bf Unmount Command}, +{\bf Write Part Command}, or {\bf Free Space Command} directives +to the operating system, Bacula performs character substitution of the +following characters: + +\footnotesize +\begin{verbatim} + %% = % + %a = Archive device name + %e = erase (set if cannot mount and first part) + %n = part number + %m = mount point + %v = last part name (i.e. filename) +\end{verbatim} +\normalsize + + + +\section{DVD Specific Director Directives} +\index[general]{Directives!DVD} +\index[general]{DVD Specific Director Directives } + +The following directives are added to the Director's Job resource. + +\label{WritePartAfterJob} +\begin{description} +\item [Write Part After Job = \lt{}yes|no\gt{}] + \index[dir]{Write Part After Job } + If this directive is set to {\bf yes} (default {\bf no}), the + Volume written to a temporary spool file for the current Job will + be written to the DVD as a new part file + will be created after the job is finished. + + It should be set to {\bf yes} when writing to devices that require a mount + (for example DVD), so you are sure that the current part, containing + this job's data, is written to the device, and that no data is left in + the temporary file on the hard disk. However, on some media, like DVD+R + and DVD-R, a lot of space (about 10Mb) is lost everytime a part is + written. So, if you run several jobs each after another, you could set + this directive to {\bf no} for all jobs, except the last one, to avoid + wasting too much space, but to ensure that the data is written to the + medium when all jobs are finished. + + This directive is ignored for devices other than DVDs. +\end{description} + + + +\label{DVDpoints} +\section{Other Points} +\index[general]{Points!Other } +\index[general]{Other Points } + +\begin{itemize} +\item Please be sure that you have any automatic DVD mounting + disabled before running Bacula -- this includes auto mounting + in /etc/fstab, hotplug, ... If the DVD is automatically + mounted by the OS, it will cause problems when Bacula tries + to mount/unmount the DVD. +\item Please be sure that you the directive {\bf Write Part After Job} + set to {\bf yes}, otherwise the last part of the data to be + written will be left in the DVD spool file and not written to + the DVD. The DVD will then be unreadable until this last part + is written. If you have a series of jobs that are run one at + a time, you can turn this off until the last job is run. +\item The current code is not designed to have multiple simultaneous + jobs writing to the DVD. As a consequence, please ensure that + only one DVD backup job runs at any time. +\item Writing and reading of DVD+RW seems to work quite reliably + provided you are using the patched dvd+rw-mediainfo programs. + On the other hand, we do not have enough information to ensure + that DVD-RW or other forms of DVDs work correctly. +\item DVD+RW supports only about 1000 overwrites. Every time you + mount the filesystem read/write will count as one write. This can + add up quickly, so it is best to mount your DVD+RW filesystem read-only. + Bacula does not need the DVD to be mounted read-write, since it uses + the raw device for writing. +\item Reformatting DVD+RW 10-20 times can apparently make the medium + unusable. Normally you should not have to format or reformat + DVD+RW media. If it is necessary, current versions of growisofs will + do so automatically. +\item We have had several problems writing to DVD-RWs (this does NOT + concern DVD+RW), because these media have two writing-modes: {\bf + Incremental Sequential} and {\bf Restricted Overwrite}. Depending on + your device and the media you use, one of these modes may not work + correctly (e.g. {\bf Incremental Sequential} does not work with my NEC + DVD-writer and Verbatim DVD-RW). + + To retrieve the current mode of a DVD-RW, run: +\begin{verbatim} + dvd+rw-mediainfo /dev/xxx +\end{verbatim} + where you replace xxx with your DVD device name. + + {\bf Mounted Media} line should give you the information. + + To set the device to {\bf Restricted Overwrite} mode, run: +\begin{verbatim} + dvd+rw-format /dev/xxx +\end{verbatim} + If you want to set it back to the default {\bf Incremental Sequential} mode, run: +\begin{verbatim} + dvd+rw-format -blank /dev/xxx +\end{verbatim} + +\item Bacula only accepts to write to blank DVDs. To quickly blank a DVD+/-RW, run + this command: +\begin{verbatim} + dd if=/dev/zero bs=1024 count=512 | growisofs -Z /dev/xxx=/dev/fd/0 +\end{verbatim} + Then, try to mount the device, if it cannot be mounted, it will be considered + as blank by Bacula, if it can be mounted, try a full blank (see below). + +\item If you wish to blank completely a DVD+/-RW, use the following: +\begin{verbatim} + growisofs -Z /dev/xxx=/dev/zero +\end{verbatim} + where you replace xxx with your DVD device name. However, note that this + blanks the whole DVD, which takes quite a long time (16 minutes on mine). +\item DVD+RW and DVD-RW support only about 1000 overwrites (i.e. don't use the +same medium for years if you don't want to have problems...). + +To write to the DVD the first time use: +\begin{verbatim} + growisofs -Z /dev/xxx filename +\end{verbatim} + +To add additional files (more parts use): + +\begin{verbatim} + growisofs -M /dev/xxx filename +\end{verbatim} + +The option {\bf -use-the-force-luke=4gms} was added in growisofs 5.20 to +override growisofs' behavior of always checking for the 4GB limit. +Normally, this option is recommended for all Linux 2.6.8 kernels or +greater, since these newer kernels can handle writing more than 4GB. +See below for more details on this subject. + +\item For more information about DVD writing, please look at the +\elink{dvd+rw-tools homepage}{http://fy.chalmers.se/~appro/linux/DVD+RW/}. + +\item According to bug \#912, bscan cannot read multi-volume DVDs. This is +on our TODO list, but unless someone submits a patch it is not likely to be +done any time in the near future. (9 Sept 2007). + +\end{itemize} diff --git a/docs/manuals/en/concepts/fdl.tex b/docs/manuals/en/concepts/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/en/concepts/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/concepts/fix_tex.pl b/docs/manuals/en/concepts/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/concepts/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/concepts/general.tex b/docs/manuals/en/concepts/general.tex new file mode 100644 index 00000000..b8dc2147 --- /dev/null +++ b/docs/manuals/en/concepts/general.tex @@ -0,0 +1,523 @@ +%% +%% + +\chapter{What is Bacula?} +\label{GeneralChapter} +\index[general]{Bacula!What is } +\index[general]{What is Bacula? } + +Bacula is a set of computer programs that permits the system +administrator to manage backup, recovery, and verification of computer data +across a network of computers of different kinds. Bacula can also run entirely +upon a single computer and can backup to various types of media, including tape +and disk. + +In technical terms, it is a +network Client/Server based backup program. Bacula is relatively easy to use +and efficient, while offering many advanced storage management features that +make it easy to find and recover lost or damaged files. Due to its modular +design, Bacula is scalable from small single computer systems to systems +consisting of hundreds of computers located over a large network. + +\section{Who Needs Bacula?} +\index[general]{Who Needs Bacula? } +\index[general]{Bacula!Who Needs } + +If you are currently using a program such as tar, dump, or +bru to backup your computer data, and you would like a network solution, more +flexibility, or catalog services, Bacula will most likely provide the +additional features you want. However, if you are new to Unix systems or do +not have offsetting experience with a sophisticated backup package, the Bacula project does not +recommend using Bacula as it is much more difficult to setup and use than +tar or dump. + +If you want Bacula to behave like the above mentioned simple +programs and write over any tape that you put in the drive, then you will find +working with Bacula difficult. Bacula is designed to protect your data +following the rules you specify, and this means reusing a tape only +as the last resort. It is possible to "force" Bacula to write +over any tape in the drive, but it is easier and more efficient to use a +simpler program for that kind of operation. + +If you are running Amanda and would like a backup program that can write +to multiple volumes (i.e. is not limited by your tape drive capacity), Bacula +can most likely fill your needs. In addition, quite a number of Bacula users +report that Bacula is simpler to setup and use than other equivalent programs. + +If you are currently using a sophisticated commercial package such as Legato +Networker. ARCserveIT, Arkeia, or PerfectBackup+, you may be interested in +Bacula, which provides many of the same features and is free software +available under the GNU Version 2 software license. + +\section{Bacula Components or Services} +\index[general]{Bacula Components or Services } +\index[general]{Services!Bacula Components or } + +Bacula is made up of the following five major components or services: +Director, Console, File, Storage, and Monitor services. + + +\addcontentsline{lof}{figure}{Bacula Applications} +\includegraphics{./bacula-applications.eps} +(thanks to Aristedes Maniatis for this graphic and the one below) +% TODO: move the thanks to Credits section in preface + +\subsection*{Bacula Director} + \label{DirDef} + The Bacula Director service is the program that supervises + all the backup, restore, verify and archive operations. The system + administrator uses the Bacula Director to schedule backups and to + recover files. For more details see the Director Services Daemon Design + Document in the Bacula Developer's Guide. The Director runs as a daemon + (or service) in the background. +% TODO: tell reader where this Developer's Guide is at? + \label{UADef} + +\subsection*{Bacula Console} + + The Bacula Console service is the program that allows the + administrator or user to communicate with the Bacula Director + Currently, the Bacula Console is available in three versions: + text-based console interface, GNOME-based interface, and a + wxWidgets graphical interface. + The first and simplest is to run the Console program in a shell window + (i.e. TTY interface). Most system administrators will find this + completely adequate. The second version is a GNOME GUI interface that + is far from complete, but quite functional as it has most the + capabilities of the shell Console. The third version is a wxWidgets GUI + with an interactive file restore. It also has most of the capabilities + of the shell console, allows command completion with tabulation, and + gives you instant help about the command you are typing. For more + details see the \ilink{Bacula Console Design Document}{_ConsoleChapter}. + +\subsection*{Bacula File} + \label{FDDef} + The Bacula File service (also known as the Client program) is the software + program that is installed on the machine to be backed up. + It is specific to the + operating system on which it runs and is responsible for providing the + file attributes and data when requested by the Director. The File + services are also responsible for the file system dependent part of + restoring the file attributes and data during a recovery operation. For + more details see the File Services Daemon Design Document in the Bacula + Developer's Guide. This program runs as a daemon on the machine to be + backed up. + In addition to Unix/Linux File daemons, there is a Windows File daemon + (normally distributed in binary format). The Windows File daemon runs + on current Windows versions (NT, 2000, XP, 2003, and possibly Me and + 98). +% TODO: maybe do not list Windows here as that is listed elsewhere +% TODO: remove "possibly"? +% TODO: mention Vista? + +\subsection*{Bacula Storage} + \label{SDDef} + The Bacula Storage services consist of the software programs that + perform the storage and recovery of the file attributes and data to the + physical backup media or volumes. In other words, the Storage daemon is + responsible for reading and writing your tapes (or other storage media, + e.g. files). For more details see the Storage Services Daemon Design + Document in the Bacula Developer's Guide. The Storage services runs as + a daemon on the machine that has the backup device (usually a tape + drive). +% TODO: may switch e.g. to "for example" or "such as" as appropriate +% TODO: is "usually" correct? Maybe "such as" instead? + +\subsection*{Catalog} + \label{DBDefinition} + The Catalog services are comprised of the software programs + responsible for maintaining the file indexes and volume databases for + all files backed up. The Catalog services permit the system + administrator or user to quickly locate and restore any desired file. + The Catalog services sets Bacula apart from simple backup programs like + tar and bru, because the catalog maintains a record of all Volumes used, + all Jobs run, and all Files saved, permitting efficient restoration and + Volume management. Bacula currently supports three different databases, + MySQL, PostgreSQL, and SQLite, one of which must be chosen when building + Bacula. + + The three SQL databases currently supported (MySQL, PostgreSQL or + SQLite) provide quite a number of features, including rapid indexing, + arbitrary queries, and security. Although the Bacula project plans to support other + major SQL databases, the current Bacula implementation interfaces only + to MySQL, PostgreSQL and SQLite. For the technical and porting details + see the Catalog Services Design Document in the developer's documented. + + The packages for MySQL and PostgreSQL are available for several operating + systems. + Alternatively, installing from the + source is quite easy, see the \ilink{ Installing and Configuring + MySQL}{MySqlChapter} chapter of this document for the details. For + more information on MySQL, please see: + \elink{www.mysql.com}{http://www.mysql.com}. Or see the \ilink{ + Installing and Configuring PostgreSQL}{PostgreSqlChapter} chapter of this + document for the details. For more information on PostgreSQL, please + see: \elink{www.postgresql.org}{http://www.postgresql.org}. + + Configuring and building SQLite is even easier. For the details of + configuring SQLite, please see the \ilink{ Installing and Configuring + SQLite}{SqlLiteChapter} chapter of this document. + +\subsection*{Bacula Monitor} + \label{MonDef} + A Bacula Monitor service is the program that allows the + administrator or user to watch current status of Bacula Directors, + Bacula File Daemons and Bacula Storage Daemons. + Currently, only a GTK+ version is available, which works with GNOME, + KDE, or any window manager that supports the FreeDesktop.org system tray + standard. + + To perform a successful save or restore, the following four daemons must be + configured and running: the Director daemon, the File daemon, the Storage + daemon, and the Catalog service (MySQL, PostgreSQL or SQLite). + +\section{Bacula Configuration} +\index[general]{Configuration!Bacula } +\index[general]{Bacula Configuration } + +In order for Bacula to understand your system, what clients you want backed +up and how, you must create a number of configuration files containing +resources (or objects). The following presents an overall picture of this: + +\addcontentsline{lof}{figure}{Bacula Objects} +\includegraphics{./bacula-objects.eps} + +\section{Conventions Used in this Document} +\index[general]{Conventions Used in this Document } +\index[general]{Document!Conventions Used in this } + +Bacula is in a state of evolution, and as a consequence, this manual +will not always agree with the code. If an item in this manual is preceded by +an asterisk (*), it indicates that the particular feature is not implemented. +If it is preceded by a plus sign (+), it indicates that the feature may be +partially implemented. +% TODO: search for plus sign and asterisk and "IMPLEMENTED" and fix for printed book + +If you are reading this manual as supplied in a released version of the +software, the above paragraph holds true. If you are reading the online +version of the manual, +\elink{ www.bacula.org}{http://www.bacula.org}, please bear in +mind that this version describes the current version in development (in the +CVS) that may contain features not in the released version. Just the same, it +generally lags behind the code a bit. +% TODO: is this still true? there are separate websites + +\section{Quick Start} +\index[general]{Quick Start } +\index[general]{Start!Quick } + +To get Bacula up and running quickly, the author recommends +that you first scan the +Terminology section below, then quickly review the next chapter entitled +\ilink{The Current State of Bacula}{StateChapter}, then the +\ilink{Getting Started with Bacula}{QuickStartChapter}, which will +give you a quick overview of getting Bacula running. After which, you should +proceed to the chapter on +\ilink{Installing Bacula}{InstallChapter}, then +\ilink{How to Configure Bacula}{ConfigureChapter}, and finally the +chapter on +\ilink{ Running Bacula}{TutorialChapter}. + +\section{Terminology} +\index[general]{Terminology } + +\begin{description} + +\item [Administrator] + \index[fd]{Administrator } + The person or persons responsible for administrating the Bacula system. + +\item [Backup] + \index[fd]{Backup } + The term Backup refers to a Bacula Job that saves files. + +\item [Bootstrap File] + \index[fd]{Bootstrap File } + The bootstrap file is an ASCII file containing a compact form of + commands that allow Bacula or the stand-alone file extraction utility + (bextract) to restore the contents of one or more Volumes, for + example, the current state of a system just backed up. With a bootstrap + file, Bacula can restore your system without a Catalog. You can create + a bootstrap file from a Catalog to extract any file or files you wish. + +\item [Catalog] + \index[fd]{Catalog } + The Catalog is used to store summary information about the Jobs, + Clients, and Files that were backed up and on what Volume or Volumes. + The information saved in the Catalog permits the administrator or user + to determine what jobs were run, their status as well as the important + characteristics of each file that was backed up, and most importantly, + it permits you to choose what files to restore. + The Catalog is an + online resource, but does not contain the data for the files backed up. + Most of the information stored in the catalog is also stored on the + backup volumes (i.e. tapes). Of course, the tapes will also have a + copy of the file data in addition to the File Attributes (see below). + + The catalog feature is one part of Bacula that distinguishes it from + simple backup and archive programs such as dump and tar. + +\item [Client] + \index[fd]{Client } + In Bacula's terminology, the word Client refers to the machine being + backed up, and it is synonymous with the File services or File daemon, + and quite often, it is referred to it as the FD. A Client is defined in a + configuration file resource. + +\item [Console] + \index[fd]{Console } + The program that interfaces to the Director allowing the user or system + administrator to control Bacula. + +\item [Daemon] + \index[fd]{Daemon } + Unix terminology for a program that is always present in the background to + carry out a designated task. On Windows systems, as well as some Unix + systems, daemons are called Services. + +\item [Directive] + \index[fd]{Directive } + The term directive is used to refer to a statement or a record within a + Resource in a configuration file that defines one specific setting. For + example, the {\bf Name} directive defines the name of the Resource. + +\item [Director] + \index[fd]{Director } + The main Bacula server daemon that schedules and directs all Bacula + operations. Occasionally, the project refers to the Director as DIR. + +\item [Differential] + \index[fd]{Differential } + A backup that includes all files changed since the last Full save started. + Note, other backup programs may define this differently. + +\item [File Attributes] + \index[fd]{File Attributes } + The File Attributes are all the information necessary about a file to + identify it and all its properties such as size, creation date, modification + date, permissions, etc. Normally, the attributes are handled entirely by + Bacula so that the user never needs to be concerned about them. The + attributes do not include the file's data. + +\item [File Daemon] + \index[fd]{File Daemon } + The daemon running on the client computer to be backed up. This is also + referred to as the File services, and sometimes as the Client services or the + FD. + +\label{FileSetDef} +\item [FileSet] +\index[fd]{a name } + A FileSet is a Resource contained in a configuration file that defines + the files to be backed up. It consists of a list of included files or + directories, a list of excluded files, and how the file is to be stored + (compression, encryption, signatures). For more details, see the + \ilink{FileSet Resource definition}{FileSetResource} in the Director + chapter of this document. + +\item [Incremental] + \index[fd]{Incremental } + A backup that includes all files changed since the last Full, Differential, + or Incremental backup started. It is normally specified on the {\bf Level} + directive within the Job resource definition, or in a Schedule resource. + +\label{JobDef} +\item [Job] +\index[fd]{a name } + A Bacula Job is a configuration resource that defines the work that + Bacula must perform to backup or restore a particular Client. It + consists of the {\bf Type} (backup, restore, verify, etc), the {\bf + Level} (full, incremental,...), the {\bf FileSet}, and {\bf Storage} the + files are to be backed up (Storage device, Media Pool). For more + details, see the \ilink{Job Resource definition}{JobResource} in the + Director chapter of this document. +% TODO: clean up "..." for book + +\item [Monitor] + \index[fd]{Monitor } + The program that interfaces to all the daemons allowing the user or + system administrator to monitor Bacula status. + +\item [Resource] + \index[fd]{Resource } + A resource is a part of a configuration file that defines a specific + unit of information that is available to Bacula. It consists of several + directives (individual configuration statements). For example, the {\bf + Job} resource defines all the properties of a specific Job: name, + schedule, Volume pool, backup type, backup level, ... +% TODO: clean up "..." for book + +\item [Restore] + \index[fd]{Restore } + A restore is a configuration resource that describes the operation of + recovering a file from backup media. It is the inverse of a save, + except that in most cases, a restore will normally have a small set of + files to restore, while normally a Save backs up all the files on the + system. Of course, after a disk crash, Bacula can be called upon to do + a full Restore of all files that were on the system. +% TODO: Why? Why say "Of course"?? + +% TODO: define "Save" +% TODO: define "Full" + +\item [Schedule] + \index[fd]{Schedule } + A Schedule is a configuration resource that defines when the Bacula Job + will be scheduled for execution. To use the Schedule, the Job resource + will refer to the name of the Schedule. For more details, see the + \ilink{Schedule Resource definition}{ScheduleResource} in the Director + chapter of this document. + +\item [Service] + \index[fd]{Service } + This is a program that remains permanently in memory awaiting + instructions. In Unix environments, services are also known as + {\bf daemons}. + +\item [Storage Coordinates] + \index[fd]{Storage Coordinates } + The information returned from the Storage Services that uniquely locates + a file on a backup medium. It consists of two parts: one part pertains + to each file saved, and the other part pertains to the whole Job. + Normally, this information is saved in the Catalog so that the user + doesn't need specific knowledge of the Storage Coordinates. The Storage + Coordinates include the File Attributes (see above) plus the unique + location of the information on the backup Volume. + +\item [Storage Daemon] + \index[fd]{Storage Daemon } + The Storage daemon, sometimes referred to as the SD, is the code that + writes the attributes and data to a storage Volume (usually a tape or + disk). + +\item [Session] + \index[sd]{Session } + Normally refers to the internal conversation between the File daemon and + the Storage daemon. The File daemon opens a {\bf session} with the + Storage daemon to save a FileSet or to restore it. A session has a + one-to-one correspondence to a Bacula Job (see above). + +\item [Verify] + \index[sd]{Verify } + A verify is a job that compares the current file attributes to the + attributes that have previously been stored in the Bacula Catalog. This + feature can be used for detecting changes to critical system files + similar to what a file integrity checker like Tripwire does. + One of the major advantages of + using Bacula to do this is that on the machine you want protected such + as a server, you can run just the File daemon, and the Director, Storage + daemon, and Catalog reside on a different machine. As a consequence, if + your server is ever compromised, it is unlikely that your verification + database will be tampered with. + + Verify can also be used to check that the most recent Job data written + to a Volume agrees with what is stored in the Catalog (i.e. it compares + the file attributes), *or it can check the Volume contents against the + original files on disk. + +\item [*Archive] + \index[fd]{*Archive } + An Archive operation is done after a Save, and it consists of removing the + Volumes on which data is saved from active use. These Volumes are marked as + Archived, and may no longer be used to save files. All the files contained + on an Archived Volume are removed from the Catalog. NOT YET IMPLEMENTED. + +\item [Retention Period] + \index[fd]{Retention Period } + There are various kinds of retention periods that Bacula recognizes. + The most important are the {\bf File} Retention Period, {\bf Job} + Retention Period, and the {\bf Volume} Retention Period. Each of these + retention periods applies to the time that specific records will be kept + in the Catalog database. This should not be confused with the time that + the data saved to a Volume is valid. + + The File Retention Period determines the time that File records are kept + in the catalog database. This period is important for two reasons: the + first is that as long as File records remain in the database, you + can "browse" the database with a console program and restore any + individual file. Once the File records are removed or pruned from the + database, the individual files of a backup job can no longer be + "browsed". The second reason for carefully choosing the File Retention + Period is because the volume of + the database File records use the most storage space in the + database. As a consequence, you must ensure that regular "pruning" of + the database file records is done to keep your database from growing + too large. (See the Console {\bf prune} + command for more details on this subject). + + The Job Retention Period is the length of time that Job records will be + kept in the database. Note, all the File records are tied to the Job + that saved those files. The File records can be purged leaving the Job + records. In this case, information will be available about the jobs + that ran, but not the details of the files that were backed up. + Normally, when a Job record is purged, all its File records will also be + purged. + + The Volume Retention Period is the minimum of time that a Volume will be + kept before it is reused. Bacula will normally never overwrite a Volume + that contains the only backup copy of a file. Under ideal conditions, + the Catalog would retain entries for all files backed up for all current + Volumes. Once a Volume is overwritten, the files that were backed up on + that Volume are automatically removed from the Catalog. However, if + there is a very large pool of Volumes or a Volume is never overwritten, + the Catalog database may become enormous. To keep the Catalog to a + manageable size, the backup information should be removed from the + Catalog after the defined File Retention Period. Bacula provides the + mechanisms for the catalog to be automatically pruned according to the + retention periods defined. + +\item [Scan] + \index[sd]{Scan } + A Scan operation causes the contents of a Volume or a series of Volumes + to be scanned. These Volumes with the information on which files they + contain are restored to the Bacula Catalog. Once the information is + restored to the Catalog, the files contained on those Volumes may be + easily restored. This function is particularly useful if certain + Volumes or Jobs have exceeded their retention period and have been + pruned or purged from the Catalog. Scanning data from Volumes into the + Catalog is done by using the {\bf bscan} program. See the \ilink{ bscan + section}{bscan} of the Bacula Utilities Chapter of this manual for more + details. + +\item [Volume] + \index[sd]{Volume } + A Volume is an archive unit, normally a tape or a named disk file where + Bacula stores the data from one or more backup jobs. All Bacula Volumes + have a software label written to the Volume by Bacula so that it + identifies what Volume it is really reading. (Normally there should be + no confusion with disk files, but with tapes, it is easy to mount the + wrong one.) +\end{description} + +\section{What Bacula is Not} +\index[general]{What Bacula is Not} + +Bacula is a backup, restore and verification program and is not a +complete disaster recovery system in itself, but it can be a key part of one +if you plan carefully and follow the instructions included in the +\ilink{ Disaster Recovery}{RescueChapter} Chapter of this manual. + +With proper planning, as mentioned in the Disaster Recovery chapter, +Bacula can be a central component of your disaster recovery system. For +example, if you have created an emergency boot disk, a Bacula Rescue disk to +save the current partitioning information of your hard disk, and maintain a +complete Bacula backup, it is possible to completely recover your system from +"bare metal" that is starting from an empty disk. +% TODO: should is say "or" between boot disk and rescue disk? + +If you have used the {\bf WriteBootstrap} record in your job or some other +means to save a valid bootstrap file, you will be able to use it to extract +the necessary files (without using the catalog or manually searching for the +files to restore). + +\section{Interactions Between the Bacula Services} +\index[general]{Interactions Between the Bacula Services} +\index[general]{Services!Interactions Between the Bacula} + +The following block diagram shows the typical interactions between the Bacula +Services for a backup job. Each block represents in general a separate process +(normally a daemon). In general, the Director oversees the flow of +information. It also maintains the Catalog. + +\addcontentsline{lof}{figure}{Interactions between Bacula Services} +\includegraphics{./flow.eps} diff --git a/docs/manuals/en/concepts/gpl.tex b/docs/manuals/en/concepts/gpl.tex new file mode 100644 index 00000000..a368afc7 --- /dev/null +++ b/docs/manuals/en/concepts/gpl.tex @@ -0,0 +1,420 @@ +%% +%% + +\section*{GNU General Public License} +\label{GplChapter} +\index[general]{GNU General Public License } +\index[general]{License!GNU General Public } + +\elink{image of a Philosophical +GNU}{http://www.gnu.org/graphics/philosophicalgnu.html} + +\begin{itemize} +\item + \elink{What to do if you see a possible GPL + violation}{http://www.gnu.org/copyleft/gpl-violation.html} +\item + \elink{Translations of the + GPL}{http://www.gnu.org/copyleft/copyleft.html\#translations} +\end{itemize} + + +\section{Table of Contents} +\index[general]{Table of Contents } +\index[general]{Contents!Table of } + +\begin{itemize} +\item + \label{TOC1} + \ilink{GNU GENERAL PUBLIC LICENSE}{SEC1} + +\begin{itemize} +\item + \label{TOC2} + \ilink{Preamble}{SEC2} +\item + \label{TOC3} + \ilink{TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND +MODIFICATION}{SEC3} +\item + \label{TOC4} + \ilink{How to Apply These Terms to Your New Programs}{SEC4} +\end{itemize} + +\end{itemize} + + +\section{GNU GENERAL PUBLIC LICENSE} +\label{SEC1} +\index[general]{GNU GENERAL PUBLIC LICENSE } +\index[general]{LICENSE!GNU GENERAL PUBLIC } + +Version 2, June 1991 + +\footnotesize +\begin{verbatim} +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. +\end{verbatim} +\normalsize + +\section{Preamble} +\label{SEC2} +\index[general]{Preamble } + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software\verb:--:to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software is +covered by the GNU Library General Public License instead.) You can apply it +to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom to +distribute copies of free software (and charge for this service if you wish), +that you receive source code or can get it if you want it, that you can change +the software or use pieces of it in new free programs; and that you know you +can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to +deny you these rights or to ask you to surrender the rights. These +restrictions translate to certain responsibilities for you if you distribute +copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for +a fee, you must give the recipients all the rights that you have. You must +make sure that they, too, receive or can get the source code. And you must +show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If the +software is modified by someone else and passed on, we want its recipients to +know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program proprietary. +To prevent this, we have made it clear that any patent must be licensed for +everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +\section{TERMS AND CONDITIONS} +\label{SEC3} +\index[general]{CONDITIONS!TERMS AND } +\index[general]{TERMS AND CONDITIONS } + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +{\bf 0.} This License applies to any program or other work which contains a +notice placed by the copyright holder saying it may be distributed under the +terms of this General Public License. The "Program", below, refers to any +such program or work, and a "work based on the Program" means either the +Program or any derivative work under copyright law: that is to say, a work +containing the Program or a portion of it, either verbatim or with +modifications and/or translated into another language. (Hereinafter, +translation is included without limitation in the term "modification".) Each +licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running the Program is +not restricted, and the output from the Program is covered only if its +contents constitute a work based on the Program (independent of having been +made by running the Program). Whether that is true depends on what the Program +does. + +{\bf 1.} You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and give any other recipients of the +Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +{\bf 2.} You may modify your copy or copies of the Program or any portion of +it, thus forming a work based on the Program, and copy and distribute such +modifications or work under the terms of Section 1 above, provided that you +also meet all of these conditions: + +\begin{itemize} +\item {\bf a)} You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + +\item {\bf b)} You must cause any work that you distribute or publish, that + in whole or in part contains or is derived from the Program or any part + thereof, to be licensed as a whole at no charge to all third parties under + the terms of this License. + +\item {\bf c)} If the modified program normally reads commands interactively + when run, you must cause it, when started running for such interactive use in + the most ordinary way, to print or display an announcement including an + appropriate copyright notice and a notice that there is no warranty (or else, + saying that you provide a warranty) and that users may redistribute the + program under these conditions, and telling the user how to view a copy of + this License. (Exception: if the Program itself is interactive but does not + normally print such an announcement, your work based on the Program is not + required to print an announcement.) +\end{itemize} + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Program, the distribution of the whole must be on +the terms of this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Program. + +In addition, mere aggregation of another work not based on the Program with +the Program (or with a work based on the Program) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. + +{\bf 3.} You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections 1 and +2 above provided that you also do one of the following: + +\begin{itemize} +\item {\bf a)} Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections 1 and 2 + above on a medium customarily used for software interchange; or, + +\item {\bf b)} Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your cost of + physically performing source distribution, a complete machine-readable copy of + the corresponding source code, to be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + +\item {\bf c)} Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is allowed only + for noncommercial distribution and only if you received the program in object + code or executable form with such an offer, in accord with Subsection b + above.) +\end{itemize} + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and +installation of the executable. However, as a special exception, the source +code distributed need not include anything that is normally distributed (in +either source or binary form) with the major components (compiler, kernel, and +so on) of the operating system on which the executable runs, unless that +component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to +copy from a designated place, then offering equivalent access to copy the +source code from the same place counts as distribution of the source code, +even though third parties are not compelled to copy the source along with the +object code. + +{\bf 4.} You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt otherwise to +copy, modify, sublicense or distribute the Program is void, and will +automatically terminate your rights under this License. However, parties who +have received copies, or rights, from you under this License will not have +their licenses terminated so long as such parties remain in full compliance. + +{\bf 5.} You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or distribute +the Program or its derivative works. These actions are prohibited by law if +you do not accept this License. Therefore, by modifying or distributing the +Program (or any work based on the Program), you indicate your acceptance of +this License to do so, and all its terms and conditions for copying, +distributing or modifying the Program or works based on it. + +{\bf 6.} Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the original +licensor to copy, distribute or modify the Program subject to these terms and +conditions. You may not impose any further restrictions on the recipients' +exercise of the rights granted herein. You are not responsible for enforcing +compliance by third parties to this License. + +{\bf 7.} If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from +the conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Program at all. +For example, if a patent license would not permit royalty-free redistribution +of the Program by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system, which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +{\bf 8.} If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Program under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +{\bf 9.} The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will be +similar in spirit to the present version, but may differ in detail to address +new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of this License, +you may choose any version ever published by the Free Software Foundation. + +{\bf 10.} If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author to +ask for permission. For software which is copyrighted by the Free Software +Foundation, write to the Free Software Foundation; we sometimes make +exceptions for this. Our decision will be guided by the two goals of +preserving the free status of all derivatives of our free software and of +promoting the sharing and reuse of software generally. + +{\bf NO WARRANTY} + +{\bf 11.} BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE +THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR +IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO +THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM +PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +{\bf 12.} IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO +LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR +THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +END OF TERMS AND CONDITIONS + +\section{How to Apply These Terms to Your New Programs} +\label{SEC4} +\index[general]{Programs!How to Apply These Terms to Your New } +\index[general]{How to Apply These Terms to Your New Programs } + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + +\footnotesize +\begin{verbatim} +{\em one line to give the program's name and an idea of what it does.} +Copyright (C) {\em yyyy} {\em name of author} +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +02110-1301 USA +\end{verbatim} +\normalsize + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it +starts in an interactive mode: + +\footnotesize +\begin{verbatim} +Gnomovision version 69, Copyright (C) {\em year} {\em name of author} +Gnomovision comes with ABSOLUTELY NO WARRANTY; for details +type `show w'. This is free software, and you are welcome +to redistribute it under certain conditions; type `show c' +for details. +\end{verbatim} +\normalsize + +The hypothetical commands {\tt `show w'} and {\tt `show c'} should show the +appropriate parts of the General Public License. Of course, the commands you +use may be called something other than {\tt `show w'} and {\tt `show c'}; they +could even be mouse-clicks or menu items\verb:--:whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + +\footnotesize +\begin{verbatim} +Yoyodyne, Inc., hereby disclaims all copyright +interest in the program `Gnomovision' +(which makes passes at compilers) written +by James Hacker. +{\em signature of Ty Coon}, 1 April 1989 +Ty Coon, President of Vice +\end{verbatim} +\normalsize + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General Public +License instead of this License. +Return to +\elink{GNU's home page}{http://www.gnu.org/home.html}. + +FSF \& GNU inquiries \& questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. Other +\elink{ways to contact}{http://www.gnu.org/home.html\#ContactInfo} the FSF. + +Comments on these web pages to +\elink{webmasters@www.gnu.org}{mailto:webmasters@www.gnu.org}, send other +questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. + +Copyright notice above. +Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +Boston, MA 02110-1301 USA + +Updated: 3 Jan 2000 rms diff --git a/docs/manuals/en/concepts/index.perl b/docs/manuals/en/concepts/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/concepts/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/concepts/latex2html-init.pl b/docs/manuals/en/concepts/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/concepts/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/concepts/lesser.tex b/docs/manuals/en/concepts/lesser.tex new file mode 100644 index 00000000..6fcc81ed --- /dev/null +++ b/docs/manuals/en/concepts/lesser.tex @@ -0,0 +1,573 @@ +%% +%% + +\section*{GNU Lesser General Public License} +\label{LesserChapter} +\index[general]{GNU Lesser General Public License } +\index[general]{License!GNU Lesser General Public } + +\elink{image of a Philosophical GNU} +{\url{http://www.gnu.org/graphics/philosophicalgnu.html}} [ +\elink{English}{\url{http://www.gnu.org/copyleft/lesser.html}} | +\elink{Japanese}{\url{http://www.gnu.org/copyleft/lesser.ja.html}} ] + +\begin{itemize} +\item + \elink{Why you shouldn't use the Lesser GPL for your next + library}{\url{http://www.gnu.org/philosophy/why-not-lgpl.html}} +\item + \elink{What to do if you see a possible LGPL + violation}{\url{http://www.gnu.org/copyleft/gpl-violation.html}} +\item + \elink{Translations of the LGPL} +{\url{http://www.gnu.org/copyleft/copyleft.html\#translationsLGPL}} +\item The GNU Lesser General Public License as a + \elink{text file}{\url{http://www.gnu.org/copyleft/lesser.txt}} +\item The GNU Lesser General Public License as a + \elink{Texinfo}{\url{http://www.gnu.org/copyleft/lesser.texi}} file + \end{itemize} + + +This GNU Lesser General Public License counts as the successor of the GNU +Library General Public License. For an explanation of why this change was +necessary, read the +\elink{Why you shouldn't use the Lesser GPL for your next +library}{\url{http://www.gnu.org/philosophy/why-not-lgpl.html}} article. + +\section{Table of Contents} +\index[general]{Table of Contents } +\index[general]{Contents!Table of } + +\begin{itemize} +\item + \label{TOC12} + \ilink{GNU LESSER GENERAL PUBLIC LICENSE}{SEC12} + +\begin{itemize} +\item + \label{TOC23} + \ilink{Preamble}{SEC23} +\item + \label{TOC34} + \ilink{TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND +MODIFICATION}{SEC34} +\item + \label{TOC45} + \ilink{How to Apply These Terms to Your New Libraries}{SEC45} +\end{itemize} + +\end{itemize} + + +\section{GNU LESSER GENERAL PUBLIC LICENSE} +\label{SEC12} +\index[general]{LICENSE!GNU LESSER GENERAL PUBLIC } +\index[general]{GNU LESSER GENERAL PUBLIC LICENSE } + +Version 2.1, February 1999 + +\footnotesize +\begin{verbatim} +Copyright (C) 1991, 1999 Free Software Foundation, Inc. +51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] +\end{verbatim} +\normalsize + +\section{Preamble} +\label{SEC23} +\index[general]{Preamble } + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public Licenses are intended to +guarantee your freedom to share and change free software\verb:--:to make sure the +software is free for all its users. + +This license, the Lesser General Public License, applies to some specially +designated software packages\verb:--:typically libraries\verb:--:of the Free Software +Foundation and other authors who decide to use it. You can use it too, but we +suggest you first think carefully about whether this license or the ordinary +General Public License is the better strategy to use in any particular case, +based on the explanations below. + +When we speak of free software, we are referring to freedom of use, not price. +Our General Public Licenses are designed to make sure that you have the +freedom to distribute copies of free software (and charge for this service if +you wish); that you receive source code or can get it if you want it; that you +can change the software and use pieces of it in new free programs; and that +you are informed that you can do these things. + +To protect your rights, we need to make restrictions that forbid distributors +to deny you these rights or to ask you to surrender these rights. These +restrictions translate to certain responsibilities for you if you distribute +copies of the library or if you modify it. + +For example, if you distribute copies of the library, whether gratis or for a +fee, you must give the recipients all the rights that we gave you. You must +make sure that they, too, receive or can get the source code. If you link +other code with the library, you must provide complete object files to the +recipients, so that they can relink them with the library after making changes +to the library and recompiling it. And you must show them these terms so they +know their rights. + +We protect your rights with a two-step method: (1) we copyright the library, +and (2) we offer you this license, which gives you legal permission to copy, +distribute and/or modify the library. + +To protect each distributor, we want to make it very clear that there is no +warranty for the free library. Also, if the library is modified by someone +else and passed on, the recipients should know that what they have is not the +original version, so that the original author's reputation will not be +affected by problems that might be introduced by others. + +Finally, software patents pose a constant threat to the existence of any free +program. We wish to make sure that a company cannot effectively restrict the +users of a free program by obtaining a restrictive license from a patent +holder. Therefore, we insist that any patent license obtained for a version of +the library must be consistent with the full freedom of use specified in this +license. + +Most GNU software, including some libraries, is covered by the ordinary GNU +General Public License. This license, the GNU Lesser General Public License, +applies to certain designated libraries, and is quite different from the +ordinary General Public License. We use this license for certain libraries in +order to permit linking those libraries into non-free programs. + +When a program is linked with a library, whether statically or using a shared +library, the combination of the two is legally speaking a combined work, a +derivative of the original library. The ordinary General Public License +therefore permits such linking only if the entire combination fits its +criteria of freedom. The Lesser General Public License permits more lax +criteria for linking other code with the library. + +We call this license the "Lesser" General Public License because it does +Less to protect the user's freedom than the ordinary General Public License. +It also provides other free software developers Less of an advantage over +competing non-free programs. These disadvantages are the reason we use the +ordinary General Public License for many libraries. However, the Lesser +license provides advantages in certain special circumstances. + +For example, on rare occasions, there may be a special need to encourage the +widest possible use of a certain library, so that it becomes a de-facto +standard. To achieve this, non-free programs must be allowed to use the +library. A more frequent case is that a free library does the same job as +widely used non-free libraries. In this case, there is little to gain by +limiting the free library to free software only, so we use the Lesser General +Public License. + +In other cases, permission to use a particular library in non-free programs +enables a greater number of people to use a large body of free software. For +example, permission to use the GNU C Library in non-free programs enables many +more people to use the whole GNU operating system, as well as its variant, the +GNU/Linux operating system. + +Although the Lesser General Public License is Less protective of the users' +freedom, it does ensure that the user of a program that is linked with the +Library has the freedom and the wherewithal to run that program using a +modified version of the Library. + +The precise terms and conditions for copying, distribution and modification +follow. Pay close attention to the difference between a "work based on the +library" and a "work that uses the library". The former contains code +derived from the library, whereas the latter must be combined with the library +in order to run. + +\section{TERMS AND CONDITIONS} +\label{SEC34} +\index[general]{CONDITIONS!TERMS AND } +\index[general]{TERMS AND CONDITIONS } + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +{\bf 0.} This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or other +authorized party saying it may be distributed under the terms of this Lesser +General Public License (also called "this License"). Each licensee is +addressed as "you". + +A "library" means a collection of software functions and/or data prepared so +as to be conveniently linked with application programs (which use some of +those functions and data) to form executables. + +The "Library", below, refers to any such software library or work which has +been distributed under these terms. A "work based on the Library" means +either the Library or any derivative work under copyright law: that is to say, +a work containing the Library or a portion of it, either verbatim or with +modifications and/or translated straightforwardly into another language. +(Hereinafter, translation is included without limitation in the term +"modification".) + +"Source code" for a work means the preferred form of the work for making +modifications to it. For a library, complete source code means all the source +code for all modules it contains, plus any associated interface definition +files, plus the scripts used to control compilation and installation of the +library. + +Activities other than copying, distribution and modification are not covered +by this License; they are outside its scope. The act of running a program +using the Library is not restricted, and output from such a program is covered +only if its contents constitute a work based on the Library (independent of +the use of the Library in a tool for writing it). Whether that is true depends +on what the Library does and what the program that uses the Library does. + +{\bf 1.} You may copy and distribute verbatim copies of the Library's complete +source code as you receive it, in any medium, provided that you conspicuously +and appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and distribute a copy of this License +along with the Library. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +{\bf 2.} You may modify your copy or copies of the Library or any portion of +it, thus forming a work based on the Library, and copy and distribute such +modifications or work under the terms of Section 1 above, provided that you +also meet all of these conditions: + +\begin{itemize} +\item {\bf a)} The modified work must itself be a software library. +\item {\bf b)} You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. +\item {\bf c)} You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. +\item {\bf d)} If a facility in the modified Library refers to a function or + a table of data to be supplied by an application program that uses the + facility, other than as an argument passed when the facility is invoked, then +you must make a good faith effort to ensure that, in the event an application +does not supply such function or table, the facility still operates, and +performs whatever part of its purpose remains meaningful. + +(For example, a function in a library to compute square roots has a purpose +that is entirely well-defined independent of the application. Therefore, +Subsection 2d requires that any application-supplied function or table used +by this function must be optional: if the application does not supply it, the +square root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Library, and can be reasonably +considered independent and separate works in themselves, then this License, +and its terms, do not apply to those sections when you distribute them as +separate works. But when you distribute the same sections as part of a whole +which is a work based on the Library, the distribution of the whole must be +on the terms of this License, whose permissions for other licensees extend to +the entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Library. + +In addition, mere aggregation of another work not based on the Library with +the Library (or with a work based on the Library) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. +\end{itemize} + +{\bf 3.} You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do this, +you must alter all the notices that refer to this License, so that they refer +to the ordinary GNU General Public License, version 2, instead of to this +License. (If a newer version than version 2 of the ordinary GNU General Public +License has appeared, then you can specify that version instead if you wish.) +Do not make any other change in these notices. + +Once this change is made in a given copy, it is irreversible for that copy, so +the ordinary GNU General Public License applies to all subsequent copies and +derivative works made from that copy. + +This option is useful when you wish to copy part of the code of the Library +into a program that is not a library. + +{\bf 4.} You may copy and distribute the Library (or a portion or derivative +of it, under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you accompany it with the complete +corresponding machine-readable source code, which must be distributed under +the terms of Sections 1 and 2 above on a medium customarily used for software +interchange. + +If distribution of object code is made by offering access to copy from a +designated place, then offering equivalent access to copy the source code from +the same place satisfies the requirement to distribute the source code, even +though third parties are not compelled to copy the source along with the +object code. + +{\bf 5.} A program that contains no derivative of any portion of the Library, +but is designed to work with the Library by being compiled or linked with it, +is called a "work that uses the Library". Such a work, in isolation, is not +a derivative work of the Library, and therefore falls outside the scope of +this License. + +However, linking a "work that uses the Library" with the Library creates an +executable that is a derivative of the Library (because it contains portions +of the Library), rather than a "work that uses the library". The executable +is therefore covered by this License. Section 6 states terms for distribution +of such executables. + +When a "work that uses the Library" uses material from a header file that is +part of the Library, the object code for the work may be a derivative work of +the Library even though the source code is not. Whether this is true is +especially significant if the work can be linked without the Library, or if +the work is itself a library. The threshold for this to be true is not +precisely defined by law. + +If such an object file uses only numerical parameters, data structure layouts +and accessors, and small macros and small inline functions (ten lines or less +in length), then the use of the object file is unrestricted, regardless of +whether it is legally a derivative work. (Executables containing this object +code plus portions of the Library will still fall under Section 6.) + +Otherwise, if the work is a derivative of the Library, you may distribute the +object code for the work under the terms of Section 6. Any executables +containing that work also fall under Section 6, whether or not they are linked +directly with the Library itself. + +{\bf 6.} As an exception to the Sections above, you may also combine or link a +"work that uses the Library" with the Library to produce a work containing +portions of the Library, and distribute that work under terms of your choice, +provided that the terms permit modification of the work for the customer's own +use and reverse engineering for debugging such modifications. + +You must give prominent notice with each copy of the work that the Library is +used in it and that the Library and its use are covered by this License. You +must supply a copy of this License. If the work during execution displays +copyright notices, you must include the copyright notice for the Library among +them, as well as a reference directing the user to the copy of this License. +Also, you must do one of these things: + +\begin{itemize} +\item {\bf a)} Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever changes were + used in the work (which must be distributed under Sections 1 and 2 above); +and, if the work is an executable linked with the Library, with the complete +machine-readable "work that uses the Library", as object code and/or source +code, so that the user can modify the Library and then relink to produce a +modified executable containing the modified Library. (It is understood that +the user who changes the contents of definitions files in the Library will +not necessarily be able to recompile the application to use the modified +definitions.) +\item {\bf b)} Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a copy of the + library already present on the user's computer system, rather than copying +library functions into the executable, and (2) will operate properly with a +modified version of the library, if the user installs one, as long as the +modified version is interface-compatible with the version that the work was +made with. +\item {\bf c)} Accompany the work with a written offer, valid for at least + three years, to give the same user the materials specified in Subsection 6a, + above, for a charge no more than the cost of performing this distribution. +\item {\bf d)} If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above specified + materials from the same place. +\item {\bf e)} Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + \end{itemize} + +For an executable, the required form of the "work that uses the Library" +must include any data and utility programs needed for reproducing the +executable from it. However, as a special exception, the materials to be +distributed need not include anything that is normally distributed (in either +source or binary form) with the major components (compiler, kernel, and so on) +of the operating system on which the executable runs, unless that component +itself accompanies the executable. + +It may happen that this requirement contradicts the license restrictions of +other proprietary libraries that do not normally accompany the operating +system. Such a contradiction means you cannot use both them and the Library +together in an executable that you distribute. + +{\bf 7.} You may place library facilities that are a work based on the Library +side-by-side in a single library together with other library facilities not +covered by this License, and distribute such a combined library, provided that +the separate distribution of the work based on the Library and of the other +library facilities is otherwise permitted, and provided that you do these two +things: + +\begin{itemize} +\item {\bf a)} Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library facilities. This must + be distributed under the terms of the Sections above. +\item {\bf b)} Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining where to find + the accompanying uncombined form of the same work. +\end{itemize} + +{\bf 8.} You may not copy, modify, sublicense, link with, or distribute the +Library except as expressly provided under this License. Any attempt otherwise +to copy, modify, sublicense, link with, or distribute the Library is void, and +will automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will not have +their licenses terminated so long as such parties remain in full compliance. + +{\bf 9.} You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or distribute +the Library or its derivative works. These actions are prohibited by law if +you do not accept this License. Therefore, by modifying or distributing the +Library (or any work based on the Library), you indicate your acceptance of +this License to do so, and all its terms and conditions for copying, +distributing or modifying the Library or works based on it. + +{\bf 10.} Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the original +licensor to copy, distribute, link with or modify the Library subject to these +terms and conditions. You may not impose any further restrictions on the +recipients' exercise of the rights granted herein. You are not responsible for +enforcing compliance by third parties with this License. + +{\bf 11.} If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from +the conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Library at all. +For example, if a patent license would not permit royalty-free redistribution +of the Library by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +{\bf 12.} If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Library under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +{\bf 13.} The Free Software Foundation may publish revised and/or new versions +of the Lesser General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software +Foundation. If the Library does not specify a license version number, you may +choose any version ever published by the Free Software Foundation. + +{\bf 14.} If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, write to +the author to ask for permission. For software which is copyrighted by the +Free Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals of +preserving the free status of all derivatives of our free software and of +promoting the sharing and reuse of software generally. + +{\bf NO WARRANTY} + +{\bf 15.} BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE +THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR +IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO +THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY +PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +{\bf 16.} IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO +LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR +THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +END OF TERMS AND CONDITIONS + +\section{How to Apply These Terms to Your New Libraries} +\label{SEC45} +\index[general]{Libraries!How to Apply These Terms to Your New } +\index[general]{How to Apply These Terms to Your New Libraries } + + +If you develop a new library, and you want it to be of the greatest possible +use to the public, we recommend making it free software that everyone can +redistribute and change. You can do so by permitting redistribution under +these terms (or, alternatively, under the terms of the ordinary General Public +License). + +To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + +\footnotesize +\begin{verbatim} +{\it one line to give the library's name and an idea of what it does.} +Copyright (C) {\it year} {\it name of author} +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 +USA +\end{verbatim} +\normalsize + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + +\footnotesize +\begin{verbatim} +Yoyodyne, Inc., hereby disclaims all copyright interest in +the library "Frob" (a library for tweaking knobs) written +by James Random Hacker. +{\it signature of Ty Coon}, 1 April 1990 +Ty Coon, President of Vice +\end{verbatim} +\normalsize + +That's all there is to it! +Return to +\elink{GNU's home page}{\url{http://www.gnu.org/home.html}}. + +FSF \& GNU inquiries \& questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. Other +\elink{ways to contact}{\url{http://www.gnu.org/home.html\#ContactInfo}} the FSF. + +Comments on these web pages to +\elink{webmasters@www.gnu.org}{mailto:webmasters@www.gnu.org}, send other +questions to +\elink{gnu@gnu.org}{mailto:gnu@gnu.org}. + +Copyright notice above. +Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +Boston, MA 02110-1301 USA +USA + +Updated: 27 Nov 2000 paulv diff --git a/docs/manuals/en/concepts/license.tex b/docs/manuals/en/concepts/license.tex new file mode 100644 index 00000000..b50269f2 --- /dev/null +++ b/docs/manuals/en/concepts/license.tex @@ -0,0 +1,115 @@ +%% +%% + +\chapter{Bacula Copyright, Trademark, and Licenses} +\label{LicenseChapter} +\index[general]{Licenses!Bacula Copyright Trademark} +\index[general]{Bacula Copyright, Trademark, and Licenses} + +There are a number of different licenses that are used in Bacula. +If you have a printed copy of this manual, the details of each of +the licenses referred to in this chapter can be found in the +online version of the manual at +\elink{http://www.bacula.org}{\url{http://www.bacula.org}}. + +\section{FDL} +\index[general]{FDL } + +The GNU Free Documentation License (FDL) is used for this manual, +which is a free and open license. This means that you may freely +reproduce it and even make changes to it. However, rather than +distribute your own version of this manual, we would much prefer +if you would send any corrections or changes to the Bacula project. + +The most recent version of the manual can always be found online +at \elink{http://www.bacula.org}{\url{http://www.bacula.org}}. + +% TODO: Point to appendix that has it + + +\section{GPL} +\index[general]{GPL } + +The vast bulk of the source code is released under the +\ilink{GNU General Public License version 2.}{GplChapter}. + +Most of this code is copyrighted: Copyright \copyright 2000-2007 +Free Software Foundation Europe e.V. + +Portions may be copyrighted by other people (ATT, the Free Software +Foundation, ...). These files are released under the GPL license. + +\section{LGPL} +\index[general]{LGPL } + +Some of the Bacula library source code is released under the +\ilink{GNU Lesser General Public License.}{LesserChapter} This +permits third parties to use these parts of our code in their proprietary +programs to interface to Bacula. + +\section{Public Domain} +\index[general]{Domain!Public } +\index[general]{Public Domain } + +Some of the Bacula code, or code that Bacula references, has been released +to the public domain. E.g. md5.c, SQLite. + +\section{Trademark} +\index[general]{Trademark } + +Bacula\raisebox{.6ex}{\textsuperscript{\textregistered}} is a registered +trademark of John Walker. + +We have trademarked the Bacula name to ensure that any program using the +name Bacula will be exactly compatible with the program that we have +released. The use of the name Bacula is restricted to software systems +that agree exactly with the program presented here. + +\section{Fiduciary License Agreement} +\index[general]{Fiduciary License Agreement } +Developers who have contributed significant changes to the Bacula code +should have signed a Fiduciary License Agreement (FLA), which +guarantees them the right to use the code they have developed, and also +ensures that the Free Software Foundation Europe (and thus the Bacula +project) has the rights to the code. This Fiduciary License Agreement +is found on the Bacula web site at: + +\elink{http://www.bacula.org/FLA-bacula.en.pdf}{\url{http://www.bacula.org/FLA-bacula.en.pdf}} + +and should be filled out then sent to: + +\begin{quote} + Free Software Foundation Europe \\ + Freedom Task Force \\ + Sumatrastrasse 25 \\ + 8006 Z\"{u}rich \\ + Switzerland \\ +\end{quote} + +Please note that the above address is different from the officially +registered office mentioned in the document. When you send in such a +complete document, please notify me: kern at sibbald dot com. + + +\section{Disclaimer} +\index[general]{Disclaimer } + +NO WARRANTY + +BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE +PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE +PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, +YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY +COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE +PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE +OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR +DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR +A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH +HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. diff --git a/docs/manuals/en/concepts/migration.tex b/docs/manuals/en/concepts/migration.tex new file mode 100644 index 00000000..b0d49df2 --- /dev/null +++ b/docs/manuals/en/concepts/migration.tex @@ -0,0 +1,445 @@ + +\chapter{Migration} +\label{MigrationChapter} +\index[general]{Migration} + +The term Migration, as used in the context of Bacula, means moving data from +one Volume to another. In particular it refers to a Job (similar to a backup +job) that reads data that was previously backed up to a Volume and writes +it to another Volume. As part of this process, the File catalog records +associated with the first backup job are purged. In other words, Migration +moves Bacula Job data from one Volume to another by reading the Job data +from the Volume it is stored on, writing it to a different Volume in a +different Pool, and then purging the database records for the first Job. + +The section process for which Job or Jobs are migrated +can be based on quite a number of different criteria such as: +\begin{itemize} +\item a single previous Job +\item a Volume +\item a Client +\item a regular expression matching a Job, Volume, or Client name +\item the time a Job has been on a Volume +\item high and low water marks (usage or occupation) of a Pool +\item Volume size +\end{itemize} + +The details of these selection criteria will be defined below. + +To run a Migration job, you must first define a Job resource very similar +to a Backup Job but with {\bf Type = Migrate} instead of {\bf Type = +Backup}. One of the key points to remember is that the Pool that is +specified for the migration job is the only pool from which jobs will +be migrated, with one exception noted below. In addition, the Pool to +which the selected Job or Jobs will be migrated is defined by the {\bf +Next Pool = ...} in the Pool resource specified for the Migration Job. + +Bacula permits pools to contain Volumes with different Media Types. +However, when doing migration, this is a very undesirable condition. For +migration to work properly, you should use pools containing only Volumes of +the same Media Type for all migration jobs. + +The migration job normally is either manually started or starts +from a Schedule much like a backup job. It searches +for a previous backup Job or Jobs that match the parameters you have +specified in the migration Job resource, primarily a {\bf Selection Type} +(detailed a bit later). Then for +each previous backup JobId found, the Migration Job will run a new Job which +copies the old Job data from the previous Volume to a new Volume in +the Migration Pool. It is possible that no prior Jobs are found for +migration, in which case, the Migration job will simply terminate having +done nothing, but normally at a minimum, three jobs are involved during a +migration: + +\begin{itemize} +\item The currently running Migration control Job. This is only + a control job for starting the migration child jobs. +\item The previous Backup Job (already run). The File records + for this Job are purged if the Migration job successfully + terminates. The original data remains on the Volume until + it is recycled and rewritten. +\item A new Migration Backup Job that moves the data from the + previous Backup job to the new Volume. If you subsequently + do a restore, the data will be read from this Job. +\end{itemize} + +If the Migration control job finds a number of JobIds to migrate (e.g. +it is asked to migrate one or more Volumes), it will start one new +migration backup job for each JobId found on the specified Volumes. +Please note that Migration doesn't scale too well since Migrations are +done on a Job by Job basis. This if you select a very large volume or +a number of volumes for migration, you may have a large number of +Jobs that start. Because each job must read the same Volume, they will +run consecutively (not simultaneously). + +\section{Migration Job Resource Directives} + +The following directives can appear in a Director's Job resource, and they +are used to define a Migration job. + +\begin{description} +\item [Pool = \lt{}Pool-name\gt{}] The Pool specified in the Migration + control Job is not a new directive for the Job resource, but it is + particularly important because it determines what Pool will be examined for + finding JobIds to migrate. The exception to this is when {\bf Selection + Type = SQLQuery}, in which case no Pool is used, unless you + specifically include it in the SQL query. Note, the Pool resource + referenced must contain a {\bf Next Pool = ...} directive to define + the Pool to which the data will be migrated. + +\item [Type = Migrate] + {\bf Migrate} is a new type that defines the job that is run as being a + Migration Job. A Migration Job is a sort of control job and does not have + any Files associated with it, and in that sense they are more or less like + an Admin job. Migration jobs simply check to see if there is anything to + Migrate then possibly start and control new Backup jobs to migrate the data + from the specified Pool to another Pool. + +\item [Selection Type = \lt{}Selection-type-keyword\gt{}] + The \lt{}Selection-type-keyword\gt{} determines how the migration job + will go about selecting what JobIds to migrate. In most cases, it is + used in conjunction with a {\bf Selection Pattern} to give you fine + control over exactly what JobIds are selected. The possible values + for \lt{}Selection-type-keyword\gt{} are: + \begin{description} + \item [SmallestVolume] This selection keyword selects the volume with the + fewest bytes from the Pool to be migrated. The Pool to be migrated + is the Pool defined in the Migration Job resource. The migration + control job will then start and run one migration backup job for + each of the Jobs found on this Volume. The Selection Pattern, if + specified, is not used. + + \item [OldestVolume] This selection keyword selects the volume with the + oldest last write time in the Pool to be migrated. The Pool to be + migrated is the Pool defined in the Migration Job resource. The + migration control job will then start and run one migration backup + job for each of the Jobs found on this Volume. The Selection + Pattern, if specified, is not used. + + \item [Client] The Client selection type, first selects all the Clients + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Client names, giving + a filtered Client name list. All jobs that were backed up for those + filtered (regexed) Clients will be migrated. + The migration control job will then start and run one migration + backup job for each of the JobIds found for those filtered Clients. + + \item [Volume] The Volume selection type, first selects all the Volumes + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Volume names, giving + a filtered Volume list. All JobIds that were backed up for those + filtered (regexed) Volumes will be migrated. + The migration control job will then start and run one migration + backup job for each of the JobIds found on those filtered Volumes. + + \item [Job] The Job selection type, first selects all the Jobs (as + defined on the {\bf Name} directive in a Job resource) + that have been backed up in the Pool specified by the Migration + Job resource, then it applies the {\bf Selection Pattern} (defined + below) as a regular expression to the list of Job names, giving + a filtered Job name list. All JobIds that were run for those + filtered (regexed) Job names will be migrated. Note, for a given + Job named, they can be many jobs (JobIds) that ran. + The migration control job will then start and run one migration + backup job for each of the Jobs found. + + \item [SQLQuery] The SQLQuery selection type, used the {\bf Selection + Pattern} as an SQL query to obtain the JobIds to be migrated. + The Selection Pattern must be a valid SELECT SQL statement for your + SQL engine, and it must return the JobId as the first field + of the SELECT. + + \item [PoolOccupancy] This selection type will cause the Migration job + to compute the total size of the specified pool for all Media Types + combined. If it exceeds the {\bf Migration High Bytes} defined in + the Pool, the Migration job will migrate all JobIds beginning with + the oldest Volume in the pool (determined by Last Write time) until + the Pool bytes drop below the {\bf Migration Low Bytes} defined in the + Pool. This calculation should be consider rather approximative because + it is made once by the Migration job before migration is begun, and + thus does not take into account additional data written into the Pool + during the migration. In addition, the calculation of the total Pool + byte size is based on the Volume bytes saved in the Volume (Media) +database + entries. The bytes calculate for Migration is based on the value stored + in the Job records of the Jobs to be migrated. These do not include the + Storage daemon overhead as is in the total Pool size. As a consequence, + normally, the migration will migrate more bytes than strictly necessary. + + \item [PoolTime] The PoolTime selection type will cause the Migration job to + look at the time each JobId has been in the Pool since the job ended. + All Jobs in the Pool longer than the time specified on {\bf Migration Time} + directive in the Pool resource will be migrated. + \end{description} + +\item [Selection Pattern = \lt{}Quoted-string\gt{}] + The Selection Patterns permitted for each Selection-type-keyword are + described above. + + For the OldestVolume and SmallestVolume, this + Selection pattern is not used (ignored). + + For the Client, Volume, and Job + keywords, this pattern must be a valid regular expression that will filter + the appropriate item names found in the Pool. + + For the SQLQuery keyword, this pattern must be a valid SELECT SQL statement + that returns JobIds. + +\end{description} + +\section{Migration Pool Resource Directives} + +The following directives can appear in a Director's Pool resource, and they +are used to define a Migration job. + +\begin{description} +\item [Migration Time = \lt{}time-specification\gt{}] + If a PoolTime migration is done, the time specified here in seconds (time + modifiers are permitted -- e.g. hours, ...) will be used. If the + previous Backup Job or Jobs selected have been in the Pool longer than + the specified PoolTime, then they will be migrated. + +\item [Migration High Bytes = \lt{}byte-specification\gt{}] + This directive specifies the number of bytes in the Pool which will + trigger a migration if a {\bf PoolOccupancy} migration selection + type has been specified. The fact that the Pool + usage goes above this level does not automatically trigger a migration + job. However, if a migration job runs and has the PoolOccupancy selection + type set, the Migration High Bytes will be applied. Bacula does not + currently restrict a pool to have only a single Media Type, so you + must keep in mind that if you mix Media Types in a Pool, the results + may not be what you want, as the Pool count of all bytes will be + for all Media Types combined. + +\item [Migration Low Bytes = \lt{}byte-specification\gt{}] + This directive specifies the number of bytes in the Pool which will + stop a migration if a {\bf PoolOccupancy} migration selection + type has been specified and triggered by more than Migration High + Bytes being in the pool. In other words, once a migration job + is started with {\bf PoolOccupancy} migration selection and it + determines that there are more than Migration High Bytes, the + migration job will continue to run jobs until the number of + bytes in the Pool drop to or below Migration Low Bytes. + +\item [Next Pool = \lt{}pool-specification\gt{}] + The Next Pool directive specifies the pool to which Jobs will be + migrated. This directive is required to define the Pool into which + the data will be migrated. Without this directive, the migration job + will terminate in error. + +\item [Storage = \lt{}storage-specification\gt{}] + The Storage directive specifies what Storage resource will be used + for all Jobs that use this Pool. It takes precedence over any other + Storage specifications that may have been given such as in the + Schedule Run directive, or in the Job resource. We highly recommend + that you define the Storage resource to be used in the Pool rather + than elsewhere (job, schedule run, ...). +\end{description} + +\section{Important Migration Considerations} +\index[general]{Important Migration Considerations} +\begin{itemize} +\item Each Pool into which you migrate Jobs or Volumes {\bf must} + contain Volumes of only one Media Type. + +\item Migration takes place on a JobId by JobId basis. That is + each JobId is migrated in its entirety and independently + of other JobIds. Once the Job is migrated, it will be + on the new medium in the new Pool, but for the most part, + aside from having a new JobId, it will appear with all the + same characteristics of the original job (start, end time, ...). + The column RealEndTime in the catalog Job table will contain the + time and date that the Migration terminated, and by comparing + it with the EndTime column you can tell whether or not the + job was migrated. The original job is purged of its File + records, and its Type field is changed from "B" to "M" to + indicate that the job was migrated. + +\item Jobs on Volumes will be Migration only if the Volume is + marked, Full, Used, or Error. Volumes that are still + marked Append will not be considered for migration. This + prevents Bacula from attempting to read the Volume at + the same time it is writing it. It also reduces other deadlock + situations, as well as avoids the problem that you migrate a + Volume and later find new files appended to that Volume. + +\item As noted above, for the Migration High Bytes, the calculation + of the bytes to migrate is somewhat approximate. + +\item If you keep Volumes of different Media Types in the same Pool, + it is not clear how well migration will work. We recommend only + one Media Type per pool. + +\item It is possible to get into a resource deadlock where Bacula does + not find enough drives to simultaneously read and write all the + Volumes needed to do Migrations. For the moment, you must take + care as all the resource deadlock algorithms are not yet implemented. + +\item Migration is done only when you run a Migration job. If you set a + Migration High Bytes and that number of bytes is exceeded in the Pool + no migration job will automatically start. You must schedule the + migration jobs, and they must run for any migration to take place. + +\item If you migrate a number of Volumes, a very large number of Migration + jobs may start. + +\item Figuring out what jobs will actually be migrated can be a bit complicated + due to the flexibility provided by the regex patterns and the number of + different options. Turning on a debug level of 100 or more will provide + a limited amount of debug information about the migration selection + process. + +\item Bacula currently does only minimal Storage conflict resolution, so you + must take care to ensure that you don't try to read and write to the + same device or Bacula may block waiting to reserve a drive that it + will never find. In general, ensure that all your migration + pools contain only one Media Type, and that you always + migrate to pools with different Media Types. + +\item The {\bf Next Pool = ...} directive must be defined in the Pool + referenced in the Migration Job to define the Pool into which the + data will be migrated. + +\item Pay particular attention to the fact that data is migrated on a Job + by Job basis, and for any particular Volume, only one Job can read + that Volume at a time (no simultaneous read), so migration jobs that + all reference the same Volume will run sequentially. This can be a + potential bottle neck and does not scale very well to large numbers + of jobs. + +\item Only migration of Selection Types of Job and Volume have + been carefully tested. All the other migration methods (time, + occupancy, smallest, oldest, ...) need additional testing. + +\item Migration is only implemented for a single Storage daemon. You + cannot read on one Storage daemon and write on another. +\end{itemize} + + +\section{Example Migration Jobs} +\index[general]{Example Migration Jobs} + +When you specify a Migration Job, you must specify all the standard +directives as for a Job. However, certain such as the Level, Client, and +FileSet, though they must be defined, are ignored by the Migration job +because the values from the original job used instead. + +As an example, suppose you have the following Job that +you run every night. To note: there is no Storage directive in the +Job resource; there is a Storage directive in each of the Pool +resources; the Pool to be migrated (File) contains a Next Pool +directive that defines the output Pool (where the data is written +by the migration job). + +\footnotesize +\begin{verbatim} +# Define the backup Job +Job { + Name = "NightlySave" + Type = Backup + Level = Incremental # default + Client=rufus-fd + FileSet="Full Set" + Schedule = "WeeklyCycle" + Messages = Standard + Pool = Default +} + +# Default pool definition +Pool { + Name = Default + Pool Type = Backup + AutoPrune = yes + Recycle = yes + Next Pool = Tape + Storage = File + LabelFormat = "File" +} + +# Tape pool definition +Pool { + Name = Tape + Pool Type = Backup + AutoPrune = yes + Recycle = yes + Storage = DLTDrive +} + +# Definition of File storage device +Storage { + Name = File + Address = rufus + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = "File" # same as Device in Storage daemon + Media Type = File # same as MediaType in Storage daemon +} + +# Definition of DLT tape storage device +Storage { + Name = DLTDrive + Address = rufus + Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9" + Device = "HP DLT 80" # same as Device in Storage daemon + Media Type = DLT8000 # same as MediaType in Storage daemon +} + +\end{verbatim} +\normalsize + +Where we have included only the essential information -- i.e. the +Director, FileSet, Catalog, Client, Schedule, and Messages resources are +omitted. + +As you can see, by running the NightlySave Job, the data will be backed up +to File storage using the Default pool to specify the Storage as File. + +Now, if we add the following Job resource to this conf file. + +\footnotesize +\begin{verbatim} +Job { + Name = "migrate-volume" + Type = Migrate + Level = Full + Client = rufus-fd + FileSet = "Full Set" + Messages = Standard + Pool = Default + Maximum Concurrent Jobs = 4 + Selection Type = Volume + Selection Pattern = "File" +} +\end{verbatim} +\normalsize + +and then run the job named {\bf migrate-volume}, all volumes in the Pool +named Default (as specified in the migrate-volume Job that match the +regular expression pattern {\bf File} will be migrated to tape storage +DLTDrive because the {\bf Next Pool} in the Default Pool specifies that +Migrations should go to the pool named {\bf Tape}, which uses +Storage {\bf DLTDrive}. + +If instead, we use a Job resource as follows: + +\footnotesize +\begin{verbatim} +Job { + Name = "migrate" + Type = Migrate + Level = Full + Client = rufus-fd + FileSet="Full Set" + Messages = Standard + Pool = Default + Maximum Concurrent Jobs = 4 + Selection Type = Job + Selection Pattern = ".*Save" +} +\end{verbatim} +\normalsize + +All jobs ending with the name Save will be migrated from the File Default to +the Tape Pool, or from File storage to Tape storage. diff --git a/docs/manuals/en/concepts/mtx-changer.txt b/docs/manuals/en/concepts/mtx-changer.txt new file mode 100644 index 00000000..10ef6d1c --- /dev/null +++ b/docs/manuals/en/concepts/mtx-changer.txt @@ -0,0 +1,215 @@ +#!/bin/sh +# +# Bacula interface to mtx autoloader +# +# Created OCT/31/03 by Alexander Kuehn, derived from Ludwig Jaffe's script +# +# Works with the HP C1537A L708 DDS3 +# +#set -x +# these are the labels of the tapes in each virtual slot, not the slots! +labels="PSE-0001 PSE-0002 PSE-0003 PSE-0004 PSE-0005 PSE-0006 PSE-0007 PSE-0008 PSE-0009 PSE-0010 PSE-0011 PSE-0012" + +# who to send a mail to? +recipient=root@localhost +logfile=/var/log/mtx.log + +# Delay in seconds how often to check whether a new tape has been inserted +TAPEDELAY=10 # the default is every 10 seconds +echo `date` ":" $@ >>$logfile + +# change this if mt is not in the path (use different quotes!) +mt=`which mt` +grep=`which grep` +# +# how to run the console application? +console="/usr/local/sbin/console -c /usr/local/etc/console.conf" + +command="$1" + +#TAPEDRIVE0 holds the device/name of your 1st and only drive (Bacula supports only 1 drive currently) +#Read TAPEDRIVE from command line parameters +if [ -z "$2" ] ; then + TAPEDRIVE0=/dev/nsa0 +else + TAPEDRIVE0=$2 +fi + +#Read slot from command line parameters +if [ -z "$3" ] ; then + slot=`expr 1` +else + slot=`expr $3` +fi + +if [ -z "$command" ] ; then + echo "" + echo "The mtx-changer script for Bacula" + echo "---------------------------------" + echo "" + echo "usage: mtx-changer [slot]" + echo " mtx-changer" + echo "" + echo "Valid commands:" + echo "" + echo "unload Unloads a tape into the slot" + echo " from where it was loaded." + echo "load Loads a tape from the slot " + echo "list Lists full storage slots" + echo "loaded Gives slot from where the tape was loaded." + echo " 0 means the tape drive is empty." + echo "slots Gives Number of avialable slots." + echo "volumes List avialable slots and the label of the." + echo " tape in it (slot:volume)" + echo "Example:" + echo " mtx-changer load /dev/nst0 1 loads a tape from slot1" + echo " mtx-changer %a %o %S " + echo "" + exit 0 +fi + + +case "$command" in + unload) + # At first do mt -f /dev/st0 offline to unload the tape + # + # Check if you want to fool me + echo "unmount"|$console >/dev/null 2>/dev/null + echo "mtx-changer: Checking if drive is loaded before we unload. Request unload" >>$logfile + if $mt -f $TAPEDRIVE0 status >/dev/null 2>/dev/null ; then # mt says status ok + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + $mt -f $TAPEDRIVE0 rewoffl + else + echo "mtx-changer: *** Don't fool me! *** The Drive $TAPEDRIVE0 is empty." >>$logfile + fi + exit 0 + ;; + + load) + #Let's check if drive is loaded before we load it + echo "mtx-changer: Checking if drive is loaded before we load. I Request loaded" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then # this is wrong, becaus Bacula would try to use the tape if we mount it! +# LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` +# if [ -z "$LOADEDVOL" ] ; then +# echo "mtx-changer: The Drive $TAPEDRIVE0 is empty." >>$logfile +# else # restore state? +# if [ $LOADEDVOL = $3 ] ; then # requested Volume mounted -> exit +# echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile +# exit +# else # oops, wrong volume +# echo "unmount"|$console >/dev/null 2>/dev/null +# fi +# fi +# fi + if [ -z "$LOADEDVOL" ] ; then + echo "unmount"|$console >/dev/null 2>/dev/null + LOADEDVOL=0 + else + #Check if you want to fool me + if [ $LOADEDVOL = $3 ] ; then + echo "mtx-changer: *** Don't fool me! *** Tape $LOADEDVOL is already in drive $TAPEDRIVE0!" >>$logfile + exit + fi + echo "mtx-changer: The Drive $TAPEDRIVE0 is loaded with the tape $LOADEDVOL" >>$logfile + echo "mtx-changer: Unmounting..." >>$logfile + echo "unmount"|$console >/dev/null 2>/dev/null + fi + echo "mtx-changer: Unloading..." >>$logfile + echo "mtx-changer: Doing mt -f $TAPEDRIVE0 rewoffl to rewind and unload the tape!" >>$logfile + mt -f $TAPEDRIVE0 rewoffl 2>/dev/null + #Now we can load the drive as desired + echo "mtx-changer: Doing mtx -f $1 $2 $3" >>$logfile + # extract label for the mail + count=`expr 1` + for label in $labels ; do + if [ $slot -eq $count ] ; then volume=$label ; fi + count=`expr $count + 1` + done + + mail -s "Bacula needs volume $volume." $recipient </dev/null 2>/dev/null + while [ $? -ne 0 ] ; do + sleep $TAPEDELAY + $mt status >/dev/null 2>/dev/null + done + mail -s "Bacula says thank you." $recipient <>$logfile + echo "Loading finished." ; >>$logfile + echo "$slot" + exit 0 + ;; + + list) + echo "mtx-changer: Requested list" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # try mounting + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then # no luck + LOADEDVOL="_no_tape" + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + count=`expr 1` + for label in $labels ; do + if [ "$label" != "$LOADEDVOL" ] ; then + printf "$count " + fi + count=`expr $count + 1` + done + printf "\n" + ;; + + loaded) + echo "mtx-changer: Request loaded, dev $TAPEDRIVE0" >>$logfile + LOADEDVOL=`echo "status Storage"|$console|$grep $TAPEDRIVE0|grep ^Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z $LOADEDVOL ] ; then + LOADEDVOL=`echo "mount"|$console|$grep $TAPEDRIVE0|grep Device|grep -v "not open."|grep -v "ERR="|grep -v "no Bacula volume is mounted"|sed -e s/^.*Volume\ //|cut -d\" -f2` + if [ -z "$LOADEDVOL" ] ; then # no luck + echo "$TAPEDRIVE0 not mounted!" >>$logfile + else # restore state + echo "unmount"|$console >/dev/null 2>/dev/null + fi + fi + if [ -z "$LOADEDVOL" ] ; then + LOADEDVOL="_no_tape" >>$logfile + echo "0" + else + count=`expr 1` + for label in $labels ; do + if [ $LOADEDVOL = $label ] ; then echo $count ; fi + count=`expr $count + 1` + done + fi + exit 0 + ;; + + slots) + echo "mtx-changer: Request slots" >>$logfile + count=`expr 0` + for label in $labels ; do + count=`expr $count + 1` + done + echo $count + ;; + + volumes) + echo "mtx-changer: Request volumes" >>$logfile + count=`expr 1` + for label in $labels ; do + printf "$count:$label " + count=`expr $count + 1` + done + printf "\n" + ;; +esac diff --git a/docs/manuals/en/concepts/oldfileset.tex b/docs/manuals/en/concepts/oldfileset.tex new file mode 100644 index 00000000..43a190fa --- /dev/null +++ b/docs/manuals/en/concepts/oldfileset.tex @@ -0,0 +1,677 @@ +%% +%% + +\chapter{The Old FileSet Resource} +\label{OldFileSetChapter} +\label{FileSetResource} +\index[general]{Resource!Old FileSet } +\index[general]{Old FileSet Resource } + +Note, this form of the FileSet resource still works but has been replaced by a +new more flexible form in Bacula version 1.34.3. As a consequence, you are +encouraged to convert to the new form as this one is deprecated and will be +removed in a future version. + +The FileSet resource defines what files are to be included in a backup job. At +least one {\bf FileSet} resource is required. It consists of a list of files +or directories to be included, a list of files or directories to be excluded +and the various backup options such as compression, encryption, and signatures +that are to be applied to each file. + +Any change to the list of the included files will cause Bacula to +automatically create a new FileSet (defined by the name and an MD5 checksum of +the Include contents). Each time a new FileSet is created, Bacula will ensure +that the first backup is always a Full save. + +\begin{description} + +\item {\bf FileSet} +\index[dir]{FileSet } +Start of the FileSet records. At least one {\bf FileSet} resource must be +defined. + +\item {\bf Name = \lt{}name\gt{}} +\index[dir]{Name } +The name of the FileSet resource. This record is required. + +\item {\bf Include = \lt{}processing-options\gt{} +\ \ \{ \lt{}file-list\gt{} \} +} +\index[dir]{Include } + +The Include resource specifies the list of files and/or directories to be +included in the backup job. There can be any number of {\bf Include} {\bf +file-list} specifications within the FileSet, each having its own set of {\bf +processing-options}. Normally, the {\bf file-list} consists of one file or +directory name per line. Directory names should be specified without a +trailing slash. Wild-card (or glob matching) does not work when used in an +Include list. It does work in an Exclude list though. Just the same, any +asterisk (*), question mark (?), or left-bracket ([) must be preceded by a +slash (\textbackslash{}\textbackslash{}) if you want it to represent the +literal character. + +You should {\bf always} specify a full path for every directory and file that +you list in the FileSet. In addition, on Windows machines, you should {\bf +always} prefix the directory or filename with the drive specification (e.g. +{\bf c:/xxx}) using Unix directory name separators (forward slash). However, +within an {\bf Exclude} where for some reason the exclude will not work with a +prefixed drive letter. If you want to specify a drive letter in exclusions on +Win32 systems, you can do so by specifying: + +\footnotesize +\begin{verbatim} + Exclude = { /cygdrive/d/archive/Mulberry } +\end{verbatim} +\normalsize + +where in this case, the {\bf /cygdrive/d} \&nsbp; is Cygwin's way of referring +to drives on Win32 (thanks to Mathieu Arnold for this tip). + +Bacula's default for processing directories is to recursively descend in the +directory saving all files and subdirectories. Bacula will not by default +cross file systems (or mount points in Unix parlance). This means that if you +specify the root partition (e.g. {\bf /}), Bacula will save only the root +partition and not any of the other mounted file systems. Similarly on Windows +systems, you must explicitly specify each of the drives you want saved (e.g. +{\bf c:/} and {\bf d:/} ...). In addition, at least for Windows systems, you +will most likely want to enclose each specification within double quotes. The +{\bf df} command on Unix systems will show you which mount points you must +specify to save everything. See below for an example. + +Take special care not to include a directory twice or Bacula will backup the +same files two times wasting a lot of space on your archive device. Including +a directory twice is very easy to do. For example: + +\footnotesize +\begin{verbatim} + Include = { / /usr } +\end{verbatim} +\normalsize + +on a Unix system where /usr is a subdirectory (rather than a mounted +filesystem) will cause /usr to be backed up twice. In this case, on Bacula +versions prior to 1.32f-5-09Mar04 due to a bug, you will not be able to +restore hard linked files that were backed up twice. + +The {\bf \lt{}processing-options\gt{}} is optional. If specified, it is a list +of {\bf keyword=value} options to be applied to the file-list. Multiple +options may be specified by separating them with spaces. These options are +used to modify the default processing behavior of the files included. Since +there can be multiple {\bf Include} sets, this permits effectively specifying +the desired options (compression, encryption, ...) on a file by file basis. +The options may be one of the following: + +\begin{description} + +\item {\bf compression=GZIP} +\index[fd]{compression } +All files saved will be software compressed using the GNU ZIP compression +format. The compression is done on a file by file basis by the File daemon. +If there is a problem reading the tape in a single record of a file, it will +at most affect that file and none of the other files on the tape. Normally +this option is {\bf not} needed if you have a modern tape drive as the drive +will do its own compression. However, compression is very important if you +are writing your Volumes to a file, and it can also be helpful if you have a +fast computer but a slow network. + +Specifying {\bf GZIP} uses the default compression level six (i.e. {\bf GZIP} +is identical to {\bf GZIP6}). If you want a different compression level (1 +through 9), you can specify it by appending the level number with no +intervening spaces to {\bf GZIP}. Thus {\bf compression=GZIP1} would give +minimum compression but the fastest algorithm, and {\bf compression=GZIP9} +would give the highest level of compression, but requires more computation. +According to the GZIP documentation, compression levels greater than 6 +generally give very little extra compression but are rather CPU intensive. + +\item {\bf signature=MD5} +\index[fd]{signature } +An MD5 signature will be computed for all files saved. Adding this option +generates about 5\% extra overhead for each file saved. In addition to the +additional CPU time, the MD5 signature adds 16 more bytes per file to your +catalog. We strongly recommend that this option be specified as a default +for all files. + +\item {\bf signature=SHA1} +\index[fd]{signature } +An SHA1 signature will be computed for all The SHA1 algorithm is purported to +be some what slower than the MD5 algorithm, but at the same time is +significantly better from a cryptographic point of view (i.e. much fewer +collisions, much lower probability of being hacked.) It adds four more bytes +than the MD5 signature. We strongly recommend that either this option or MD5 +be specified as a default for all files. Note, only one of the two options +MD5 or SHA1 can be computed for any file. + +\item {\bf *encryption=\lt{}algorithm\gt{}} +\index[fd]{*encryption } +All files saved will be encrypted using one of the following algorithms (NOT +YET IMPLEMENTED): + +\begin{description} + +\item {\bf *AES} +\index[fd]{*AES } +\end{description} + +\item {\bf verify=\lt{}options\gt{}} +\index[fd]{verify } +The options letters specified are used when running a {\bf Verify +Level=Catalog} job, and may be any combination of the following: + +\begin{description} + +\item {\bf i} +compare the inodes + +\item {\bf p} +compare the permission bits + +\item {\bf n} +compare the number of links + +\item {\bf u} +compare the user id + +\item {\bf g} +compare the group id + +\item {\bf s} +compare the size + +\item {\bf a} +compare the access time + +\item {\bf m} +compare the modification time (st\_mtime) + +\item {\bf c} +compare the change time (st\_ctime) + +\item {\bf s} +report file size decreases + +\item {\bf 5} +compare the MD5 signature + +\item {\bf 1} +compare the SHA1 signature +\end{description} + +A useful set of general options on the {\bf Level=Catalog} verify is {\bf +pins5} i.e. compare permission bits, inodes, number of links, size, and MD5 +changes. + +\item {\bf onefs=yes|no} +\index[fd]{onefs } +If set to {\bf yes} (the default), {\bf Bacula} will remain on a single file +system. That is it will not backup file systems that are mounted on a +subdirectory. In this case, you must explicitly list each file system you +want saved. If you set this option to {\bf no}, Bacula will backup all +mounted file systems (i.e. traverse mount points) that are found within the +{\bf FileSet}. Thus if you have NFS or Samba file systems mounted on a +directory included in your FileSet, they will also be backed up. Normally, it +is preferable to set {\bf onefs=yes} and to explicitly name each file system +you want backed up. See the example below for more details. +\label{portable} + +\item {\bf portable=yes|no} +\index[fd]{portable } +If set to {\bf yes} (default is {\bf no}), the Bacula File daemon will backup +Win32 files in a portable format. By default, this option is set to {\bf +no}, which means that on Win32 systems, the data will be backed up using +Windows API calls and on WinNT/2K/XP, the security and ownership data will be +properly backed up (and restored), but the data format is not portable to +other systems -- e.g. Unix, Win95/98/Me. On Unix systems, this option is +ignored, and unless you have a specific need to have portable backups, we +recommend accept the default ({\bf no}) so that the maximum information +concerning your files is backed up. + +\item {\bf recurse=yes|no} +\index[fd]{recurse } +If set to {\bf yes} (the default), Bacula will recurse (or descend) into all +subdirectories found unless the directory is explicitly excluded using an +{\bf exclude} definition. If you set {\bf recurse=no}, Bacula will save the +subdirectory entries, but not descend into the subdirectories, and thus will +not save the contents of the subdirectories. Normally, you will want the +default ({\bf yes}). + +\item {\bf sparse=yes|no} +\index[dir]{sparse } +Enable special code that checks for sparse files such as created by ndbm. The +default is {\bf no}, so no checks are made for sparse files. You may specify +{\bf sparse=yes} even on files that are not sparse file. No harm will be +done, but there will be a small additional overhead to check for buffers of +all zero, and a small additional amount of space on the output archive will +be used to save the seek address of each non-zero record read. + +{\bf Restrictions:} Bacula reads files in 32K buffers. If the whole buffer is +zero, it will be treated as a sparse block and not written to tape. However, +if any part of the buffer is non-zero, the whole buffer will be written to +tape, possibly including some disk sectors (generally 4098 bytes) that are +all zero. As a consequence, Bacula's detection of sparse blocks is in 32K +increments rather than the system block size. If anyone considers this to be +a real problem, please send in a request for change with the reason. The +sparse code was first implemented in version 1.27. + +If you are not familiar with sparse files, an example is say a file where you +wrote 512 bytes at address zero, then 512 bytes at address 1 million. The +operating system will allocate only two blocks, and the empty space or hole +will have nothing allocated. However, when you read the sparse file and read +the addresses where nothing was written, the OS will return all zeros as if +the space were allocated, and if you backup such a file, a lot of space will +be used to write zeros to the volume. Worse yet, when you restore the file, +all the previously empty space will now be allocated using much more disk +space. By turning on the {\bf sparse} option, Bacula will specifically look +for empty space in the file, and any empty space will not be written to the +Volume, nor will it be restored. The price to pay for this is that Bacula +must search each block it reads before writing it. On a slow system, this may +be important. If you suspect you have sparse files, you should benchmark the +difference or set sparse for only those files that are really sparse. +\label{readfifo} + +\item {\bf readfifo=yes|no} +\index[fd]{readfifo } +If enabled, tells the Client to read the data on a backup and write the data +on a restore to any FIFO (pipe) that is explicitly mentioned in the FileSet. +In this case, you must have a program already running that writes into the +FIFO for a backup or reads from the FIFO on a restore. This can be +accomplished with the {\bf RunBeforeJob} record. If this is not the case, +Bacula will hang indefinitely on reading/writing the FIFO. When this is not +enabled (default), the Client simply saves the directory entry for the FIFO. + +\item {\bf mtimeonly=yes|no} +\index[dir]{mtimeonly } +If enabled, tells the Client that the selection of files during Incremental +and Differential backups should based only on the st\_mtime value in the +stat() packet. The default is {\bf no} which means that the selection of +files to be backed up will be based on both the st\_mtime and the st\_ctime +values. In general, it is not recommended to use this option. + +\item {\bf keepatime=yes|no} +\index[dir]{keepatime } +The default is {\bf no}. When enabled, Bacula will reset the st\_atime +(access time) field of files that it backs up to their value prior to the +backup. This option is not generally recommended as there are very few +programs that use st\_atime, and the backup overhead is increased because of +the additional system call necessary to reset the times. (I'm not sure this +works on Win32). +\end{description} + +{\bf \lt{}file-list\gt{}} is a space separated list of filenames and/or +directory names. To include names containing spaces, enclose the name between +double-quotes. The list may span multiple lines, in fact, normally it is good +practice to specify each filename on a separate line. + +There are a number of special cases when specifying files or directories in a +{\bf file-list}. They are: + +\begin{itemize} +\item Any file-list item preceded by an at-sign (@) is assumed to be a +filename containing a list of files, which is read when the configuration +file is parsed during Director startup. Note, that the file is read on the +Director's machine and not on the Client. +\item Any file-list item beginning with a vertical bar (|) is assumed to be a +program. This program will be executed on the Director's machine at the time +the Job starts (not when the Director reads the configuration file), and any +output from that program will be assumed to be a list of files or +directories, one per line, to be included. This allows you to have a job that +for example includes all the local partitions even if you change the +partitioning by adding a disk. In general, you will need to prefix your +command or commands with a {\bf sh -c} so that they are invoked by a shell. +This will not be the case if you are invoking a script as in the second +example below. Also, you must take care to escape wild-cards and ensure that +any spaces in your command are escaped as well. If you use a single quotes +(') within a double quote ("), Bacula will treat everything between the +single quotes as one field so it will not be necessary to escape the spaces. +In general, getting all the quotes and escapes correct is a real pain as you +can see by the next example. As a consequence, it is often easier to put +everything in a file, and simply us the file name within Bacula. In that case +the {\bf sh -c} will not be necessary providing the first line of the file is + {\bf \#!/bin/sh}. + +As an example: + +\footnotesize +\begin{verbatim} + +Include = signature=SHA1 { + "|sh -c 'df -l | grep \"^/dev/hd[ab]\" | grep -v \".*/tmp\" \ + | awk \"{print \\$6}\"'" +} +\end{verbatim} +\normalsize + +will produce a list of all the local partitions on a Red Hat Linux system. +Note, the above line was split, but should normally be written on one line. +Quoting is a real problem because you must quote for Bacula which consists of +preceding every \textbackslash{} and every " with a \textbackslash{}, and +you must also quote for the shell command. In the end, it is probably easier +just to execute a small file with: + +\footnotesize +\begin{verbatim} +Include = signature=MD5 { + "|my_partitions" +} +\end{verbatim} +\normalsize + +where my\_partitions has: + +\footnotesize +\begin{verbatim} +#!/bin/sh +df -l | grep "^/dev/hd[ab]" | grep -v ".*/tmp" \ + | awk "{print \$6}" +\end{verbatim} +\normalsize + +If the vertical bar (|) is preceded by a backslash as in \textbackslash{}|, +the program will be executed on the Client's machine instead of on the +Director's machine -- (this is implemented but not tested, and very likely +will not work on Windows). +\item Any file-list item preceded by a less-than sign (\lt{}) will be taken +to be a file. This file will be read on the Director's machine at the time +the Job starts, and the data will be assumed to be a list of directories or +files, one per line, to be included. This feature allows you to modify the +external file and change what will be saved without stopping and restarting +Bacula as would be necessary if using the @ modifier noted above. + +If you precede the less-than sign (\lt{}) with a backslash as in +\textbackslash{}\lt{}, the file-list will be read on the Client machine +instead of on the Director's machine (implemented but not tested). +\item If you explicitly specify a block device such as {\bf /dev/hda1}, then +Bacula (starting with version 1.28) will assume that this is a raw partition +to be backed up. In this case, you are strongly urged to specify a {\bf +sparse=yes} include option, otherwise, you will save the whole partition +rather than just the actual data that the partition contains. For example: + +\footnotesize +\begin{verbatim} +Include = signature=MD5 sparse=yes { + /dev/hd6 +} +\end{verbatim} +\normalsize + +will backup the data in device /dev/hd6. + +Ludovic Strappazon has pointed out that this feature can be used to backup a +full Microsoft Windows disk. Simply boot into the system using a Linux Rescue +disk, then load a statically linked Bacula as described in the +\ilink{ Disaster Recovery Using Bacula}{RescueChapter} chapter of +this manual. Then simply save the whole disk partition. In the case of a +disaster, you can then restore the desired partition. +\item If you explicitly specify a FIFO device name (created with mkfifo), and +you add the option {\bf readfifo=yes} as an option, Bacula will read the FIFO +and back its data up to the Volume. For example: + +\footnotesize +\begin{verbatim} +Include = signature=SHA1 readfifo=yes { + /home/abc/fifo +} +\end{verbatim} +\normalsize + +if {\bf /home/abc/fifo} is a fifo device, Bacula will open the fifo, read it, +and store all data thus obtained on the Volume. Please note, you must have a +process on the system that is writing into the fifo, or Bacula will hang, and +after one minute of waiting, it will go on to the next file. The data read +can be anything since Bacula treats it as a stream. + +This feature can be an excellent way to do a "hot" backup of a very large +database. You can use the {\bf RunBeforeJob} to create the fifo and to start +a program that dynamically reads your database and writes it to the fifo. +Bacula will then write it to the Volume. + +During the restore operation, the inverse is true, after Bacula creates the +fifo if there was any data stored with it (no need to explicitly list it or +add any options), that data will be written back to the fifo. As a +consequence, if any such FIFOs exist in the fileset to be restored, you must +ensure that there is a reader program or Bacula will block, and after one +minute, Bacula will time out the write to the fifo and move on to the next +file. +\end{itemize} + +The Exclude Files specifies the list of files and/or directories to be +excluded from the backup job. The {\bf \lt{}file-list\gt{}} is a comma or +space separated list of filenames and/or directory names. To exclude names +containing spaces, enclose the name between double-quotes. Most often each +filename is on a separate line. + +For exclusions on Windows systems, do not include a leading drive letter such +as {\bf c:}. This does not work. Any filename preceded by an at-sign (@) is +assumed to be a filename on the Director's machine containing a list of files. + +\end{description} + +The following is an example of a valid FileSet resource definition: + +\footnotesize +\begin{verbatim} +FileSet { + Name = "Full Set" + Include = compression=GZIP signature=SHA1 sparse=yes { + @/etc/backup.list + } + Include = { + /root/myfile + /usr/lib/another_file + } + Exclude = { *.o } +} +\end{verbatim} +\normalsize + +Note, in the above example, all the files contained in /etc/backup.list will +be compressed with GZIP compression, an SHA1 signature will be computed on the +file's contents (its data), and sparse file handling will apply. + +The two files /root/myfile and /usr/lib/another\_file will also be saved but +without any options. In addition, all files with the extension {\bf .o} will +be excluded from the file set (i.e. from the backup). + +Suppose you want to save everything except {\bf /tmp} on your system. Doing a +{\bf df} command, you get the following output: + +\footnotesize +\begin{verbatim} +[kern@rufus k]$ df +Filesystem 1k-blocks Used Available Use% Mounted on +/dev/hda5 5044156 439232 4348692 10% / +/dev/hda1 62193 4935 54047 9% /boot +/dev/hda9 20161172 5524660 13612372 29% /home +/dev/hda2 62217 6843 52161 12% /rescue +/dev/hda8 5044156 42548 4745376 1% /tmp +/dev/hda6 5044156 2613132 2174792 55% /usr +none 127708 0 127708 0% /dev/shm +//minimatou/c$ 14099200 9895424 4203776 71% /mnt/mmatou +lmatou:/ 1554264 215884 1258056 15% /mnt/matou +lmatou:/home 2478140 1589952 760072 68% /mnt/matou/home +lmatou:/usr 1981000 1199960 678628 64% /mnt/matou/usr +lpmatou:/ 995116 484112 459596 52% /mnt/pmatou +lpmatou:/home 19222656 2787880 15458228 16% /mnt/pmatou/home +lpmatou:/usr 2478140 2038764 311260 87% /mnt/pmatou/usr +deuter:/ 4806936 97684 4465064 3% /mnt/deuter +deuter:/home 4806904 280100 4282620 7% /mnt/deuter/home +deuter:/files 44133352 27652876 14238608 67% /mnt/deuter/files +\end{verbatim} +\normalsize + +Now, if you specify only {\bf /} in your Include list, Bacula will only save +the Filesystem {\bf /dev/hda5}. To save all file systems except {\bf /tmp} +with out including any of the Samba or NFS mounted systems, and explicitly +excluding a /tmp, /proc, .journal, and .autofsck, which you will not want to +be saved and restored, you can use the following: + +\footnotesize +\begin{verbatim} +FileSet { + Name = Everything + Include = { + / + /boot + /home + /rescue + /usr + } + Exclude = { + /proc + /tmp + .journal + .autofsck + } +} +\end{verbatim} +\normalsize + +Since /tmp is on its own filesystem and it was not explicitly named in the +Include list, it is not really needed in the exclude list. It is better to +list it in the Exclude list for clarity, and in case the disks are changed so +that it is no longer in its own partition. + +Please be aware that allowing Bacula to traverse or change file systems can be +{\bf very} dangerous. For example, with the following: + +\footnotesize +\begin{verbatim} +FileSet { + Name = "Bad example" + Include = onefs=no { + /mnt/matou + } +} +\end{verbatim} +\normalsize + +you will be backing up an NFS mounted partition ({\bf /mnt/matou}), and since +{\bf onefs} is set to {\bf no}, Bacula will traverse file systems. However, if +{\bf /mnt/matou} has the current machine's file systems mounted, as is often +the case, you will get yourself into a recursive loop and the backup will +never end. + +The following FileSet definition will backup a raw partition: + +\footnotesize +\begin{verbatim} +FileSet { + Name = "RawPartition" + Include = sparse=yes { + /dev/hda2 + } +} +\end{verbatim} +\normalsize + +Note, in backing up and restoring a raw partition, you should ensure that no +other process including the system is writing to that partition. As a +precaution, you are strongly urged to ensure that the raw partition is not +mounted or is mounted read-only. If necessary, this can be done using the {\bf +RunBeforeJob} record. + +\section{Additional Considerations for Using Excludes on Windows} +\index[general]{Additional Considerations for Using Excludes on Windows } +\index[general]{Windows!Additional Considerations for Using Excludes on } +on Windows} + +For exclude lists to work correctly on Windows, you must observe the following +rules: + +\begin{itemize} +\item Filenames are case sensitive, so you must use the correct case. +\item To exclude a directory, you must not have a trailing slash on the +directory name. +\item If you have spaces in your filename, you must enclose the entire name +in double-quote characters ("). Trying to use a backslash before the space +will not work. +\item You must not precede the excluded file or directory with a drive letter +(such as {\bf c:}) otherwise it will not work. +\end{itemize} + +Thanks to Thiago Lima for summarizing the above items for us. If you are +having difficulties getting includes or excludes to work, you might want to +try using the {\bf estimate job=xxx listing} command documented in the +\ilink{Console chapter}{console.tex#estimate} of this manual. +\label{win32} + +\section{Windows Considerations for FileSets} +\index[general]{FileSets!Windows Considerations for } +\index[general]{Windows Considerations for FileSets } + +If you are entering Windows file names, the directory path may be preceded by +the drive and a colon (as in c:). However, the path separators must be +specified in Unix convention (i.e. forward slash (/)). If you wish to include +a quote in a file name, precede the quote with a backslash +(\textbackslash{}\textbackslash{}). For example you might use the following +for a Windows machine to backup the "My Documents" directory: + +\footnotesize +\begin{verbatim} +FileSet { + Name = "Windows Set" + Include = { + "c:/My Documents" + } + Exclude = { *.obj *.exe } +} +\end{verbatim} +\normalsize + +When using exclusion on Windows, do not use a drive prefix (i.e. {\bf c:}) as +it will prevent the exclusion from working. However, if you need to specify a +drive letter in exclusions on Win32 systems, you can do so by specifying: + +\footnotesize +\begin{verbatim} + Exclude = { /cygdrive/d/archive/Mulberry } +\end{verbatim} +\normalsize + +where in this case, the {\bf /cygdrive/d} is Cygwin's way of referring to +drive {\bf d:} (thanks to Mathieu Arnold for this tip). + +\section{A Windows Example FileSet} +\index[general]{FileSet!Windows Example } +\index[general]{Windows Example FileSet } + +The following example was contributed by Phil Stracchino: + +\footnotesize +\begin{verbatim} +This is my Windows 2000 fileset: +FileSet { + Name = "Windows 2000 Full Set" + Include = signature=MD5 { + c:/ + } +# Most of these files are excluded not because we don't want +# them, but because Win2K won't allow them to be backed up +# except via proprietary Win32 API calls. + Exclude = { + "/Documents and Settings/*/Application Data/*/Profiles/*/*/ + Cache/*" + "/Documents and Settings/*/Local Settings/Application Data/ + Microsoft/Windows/[Uu][Ss][Rr][Cc][Ll][Aa][Ss][Ss].*" + "/Documents and Settings/*/[Nn][Tt][Uu][Ss][Ee][Rr].*" + "/Documents and Settings/*/Cookies/*" + "/Documents and Settings/*/Local Settings/History/*" + "/Documents and Settings/*/Local Settings/ + Temporary Internet Files/*" + "/Documents and Settings/*/Local Settings/Temp/*" + "/WINNT/CSC" + "/WINNT/security/logs/scepol.log" + "/WINNT/system32/config/*" + "/WINNT/msdownld.tmp/*" + "/WINNT/Internet Logs/*" + "/WINNT/$Nt*Uninstall*" + "/WINNT/Temp/*" + "/temp/*" + "/tmp/*" + "/pagefile.sys" + } +} +\end{verbatim} +\normalsize + +Note, the three line of the above Exclude were split to fit on the document +page, they should be written on a single line in real use. diff --git a/docs/manuals/en/concepts/pools.tex b/docs/manuals/en/concepts/pools.tex new file mode 100644 index 00000000..10217f84 --- /dev/null +++ b/docs/manuals/en/concepts/pools.tex @@ -0,0 +1,429 @@ +%% +%% + +\chapter{Automated Disk Backup} +\label{PoolsChapter} +\index[general]{Volumes!Using Pools to Manage} +\index[general]{Disk!Automated Backup} +\index[general]{Using Pools to Manage Volumes} +\index[general]{Automated Disk Backup} + +If you manage five or ten machines and have a nice tape backup, you don't need +Pools, and you may wonder what they are good for. In this chapter, you will +see that Pools can help you optimize disk storage space. The same techniques +can be applied to a shop that has multiple tape drives, or that wants to mount +various different Volumes to meet their needs. + +The rest of this chapter will give an example involving backup to disk +Volumes, but most of the information applies equally well to tape Volumes. + +\label{TheProblem} +\section{The Problem} +\index[general]{Problem} + +A site that I administer (a charitable organization) had a tape DDS-3 tape +drive that was failing. The exact reason for the failure is still unknown. +Worse yet, their full backup size is about 15GB whereas the capacity of their +broken DDS-3 was at best 8GB (rated 6/12). A new DDS-4 tape drive and the +necessary cassettes was more expensive than their budget could handle. + +\label{TheSolution} +\section{The Solution} +\index[general]{Solution} + +They want to maintain six months of backup data, and be able to access the old +files on a daily basis for a week, a weekly basis for a month, then monthly +for six months. In addition, offsite capability was not needed (well perhaps +it really is, but it was never used). Their daily changes amount to about +300MB on the average, or about 2GB per week. + +As a consequence, the total volume of data they need to keep to meet their +needs is about 100GB (15GB x 6 + 2GB x 5 + 0.3 x 7) = 102.1GB. + +The chosen solution was to buy a 120GB hard disk for next to nothing -- far +less than 1/10th the price of a tape drive and the cassettes to handle the +same amount of data, and to have Bacula write to disk files. + +The rest of this chapter will explain how to setup Bacula so that it would +automatically manage a set of disk files with the minimum sysadmin +intervention. The system has been running since 22 January 2004 until today +(23 June 2007) with no intervention, with the exception of adding +a second 120GB hard disk after a year because their needs grew +over that time to more than the 120GB (168GB to be exact). The only other +intervention I have made is a periodic (about once a year) Bacula upgrade. + +\label{OverallDesign} +\section{Overall Design} +\index[general]{Overall Design} +\index[general]{Design!Overall} + +Getting Bacula to write to disk rather than tape in the simplest case is +rather easy, and is documented in the previous chapter. In addition, all the +directives discussed here are explained in that chapter. We'll leave it to you +to look at the details there. If you haven't read it and are not familiar with +Pools, you probably should at least read it once quickly for the ideas before +continuing here. + +One needs to consider about what happens if we have only a single large Bacula +Volume defined on our hard disk. Everything works fine until the Volume fills, +then Bacula will ask you to mount a new Volume. This same problem applies to +the use of tape Volumes if your tape fills. Being a hard disk and the only one +you have, this will be a bit of a problem. It should be obvious that it is +better to use a number of smaller Volumes and arrange for Bacula to +automatically recycle them so that the disk storage space can be reused. The +other problem with a single Volume, is that until version 2.0.0, +Bacula did not seek within a disk Volume, so restoring a single file can take +more time than one would expect. + +As mentioned, the solution is to have multiple Volumes, or files on the disk. +To do so, we need to limit the use and thus the size of a single Volume, by +time, by number of jobs, or by size. Any of these would work, but we chose to +limit the use of a single Volume by putting a single job in each Volume with +the exception of Volumes containing Incremental backup where there will be 6 +jobs (a week's worth of data) per volume. The details of this will be +discussed shortly. This is a single client backup, so if you have multiple +clients you will need to multiply those numbers by the number of clients, +or use a different system for switching volumes, such as limiting the +volume size. + +The next problem to resolve is recycling of Volumes. As you noted from above, +the requirements are to be able to restore monthly for 6 months, weekly for a +month, and daily for a week. So to simplify things, why not do a Full save +once a month, a Differential save once a week, and Incremental saves daily. +Now since each of these different kinds of saves needs to remain valid for +differing periods, the simplest way to do this (and possibly the only) is to +have a separate Pool for each backup type. + +The decision was to use three Pools: one for Full saves, one for Differential +saves, and one for Incremental saves, and each would have a different number +of volumes and a different Retention period to accomplish the requirements. + +\label{FullPool} +\subsection{Full Pool} +\index[general]{Pool!Full} +\index[general]{Full Pool} + +Putting a single Full backup on each Volume, will require six Full save +Volumes, and a retention period of six months. The Pool needed to do that is: + +\footnotesize +\begin{verbatim} +Pool { + Name = Full-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 6 months + Maximum Volume Jobs = 1 + Label Format = Full- + Maximum Volumes = 9 +} +\end{verbatim} +\normalsize + +Since these are disk Volumes, no space is lost by having separate Volumes for +each backup (done once a month in this case). The items to note are the +retention period of six months (i.e. they are recycled after six months), that +there is one job per volume (Maximum Volume Jobs = 1), the volumes will be +labeled Full-0001, ... Full-0006 automatically. One could have labeled these +manually from the start, but why not use the features of Bacula. + +Six months after the first volume is used, it will be subject to pruning +and thus recycling, so with a maximum of 9 volumes, there should always be +3 volumes available (note, they may all be marked used, but they will be +marked purged and recycled as needed). + +If you have two clients, you would want to set {\bf Maximum Volume Jobs} to +2 instead of one, or set a limit on the size of the Volumes, and possibly +increase the maximum number of Volumes. + + +\label{DiffPool} +\subsection{Differential Pool} +\index[general]{Pool!Differential} +\index[general]{Differential Pool} + +For the Differential backup Pool, we choose a retention period of a bit longer +than a month and ensure that there is at least one Volume for each of the +maximum of five weeks in a month. So the following works: + +\footnotesize +\begin{verbatim} +Pool { + Name = Diff-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 40 days + Maximum Volume Jobs = 1 + Label Format = Diff- + Maximum Volumes = 10 +} +\end{verbatim} +\normalsize + +As you can see, the Differential Pool can grow to a maximum of 9 volumes, +and the Volumes are retained 40 days and thereafter they can be recycled. Finally +there is one job per volume. This, of course, could be tightened up a lot, but +the expense here is a few GB which is not too serious. + +If a new volume is used every week, after 40 days, one will have used 7 +volumes, and there should then always be 3 volumes that can be purged and +recycled. + +See the discussion above concering the Full pool for how to handle multiple +clients. + +\label{IncPool} +\subsection{Incremental Pool} +\index[general]{Incremental Pool} +\index[general]{Pool!Incremental} + +Finally, here is the resource for the Incremental Pool: + +\footnotesize +\begin{verbatim} +Pool { + Name = Inc-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 20 days + Maximum Volume Jobs = 6 + Label Format = Inc- + Maximum Volumes = 7 +} +\end{verbatim} +\normalsize + +We keep the data for 20 days rather than just a week as the needs require. To +reduce the proliferation of volume names, we keep a week's worth of data (6 +incremental backups) in each Volume. In practice, the retention period should +be set to just a bit more than a week and keep only two or three volumes +instead of five. Again, the lost is very little and as the system reaches the +full steady state, we can adjust these values so that the total disk usage +doesn't exceed the disk capacity. + +If you have two clients, the simplest thing to do is to increase the +maximum volume jobs from 6 to 12. As mentioned above, it is also possible +limit the size of the volumes. However, in that case, you will need to +have a better idea of the volume or add sufficient volumes to the pool so +that you will be assured that in the next cycle (after 20 days) there is +at least one volume that is pruned and can be recycled. + + +\label{Example} +\section{The Actual Conf Files} +\index[general]{Files!Actual Conf} +\index[general]{Actual Conf Files} + +The following example shows you the actual files used, with only a few minor +modifications to simplify things. + +The Director's configuration file is as follows: + +\footnotesize +\begin{verbatim} +Director { # define myself + Name = bacula-dir + DIRport = 9101 + QueryFile = "/home/bacula/bin/query.sql" + WorkingDirectory = "/home/bacula/working" + PidDirectory = "/home/bacula/working" + Maximum Concurrent Jobs = 1 + Password = " *** CHANGE ME ***" + Messages = Standard +} +# By default, this job will back up to disk in /tmp +Job { + Name = client + Type = Backup + Client = client-fd + FileSet = "Full Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Full Backup Pool = Full-Pool + Incremental Backup Pool = Inc-Pool + Differential Backup Pool = Diff-Pool + Write Bootstrap = "/home/bacula/working/client.bsr" + Priority = 10 +} + +# Backup the catalog database (after the nightly save) +Job { + Name = "BackupCatalog" + Type = Backup + Client = client-fd + FileSet="Catalog" + Schedule = "WeeklyCycleAfterBackup" + Storage = File + Messages = Standard + Pool = Default + # This creates an ASCII copy of the catalog + # WARNING!!! Passing the password via the command line is insecure. + # see comments in make_catalog_backup for details. + RunBeforeJob = "/home/bacula/bin/make_catalog_backup bacula bacula" + # This deletes the copy of the catalog + RunAfterJob = "/home/bacula/bin/delete_catalog_backup" + Write Bootstrap = "/home/bacula/working/BackupCatalog.bsr" + Priority = 11 # run after main backup +} + +# Standard Restore template, to be changed by Console program +Job { + Name = "RestoreFiles" + Type = Restore + Client = havana-fd + FileSet="Full Set" + Storage = File + Messages = Standard + Pool = Default + Where = /tmp/bacula-restores +} + + + +# List of files to be backed up +FileSet { + Name = "Full Set" + Include = { Options { signature=SHA1; compression=GZIP9 } + File = / + File = /usr + File = /home + File = /boot + File = /var + File = /opt + } + Exclude = { + File = /proc + File = /tmp + File = /.journal + File = /.fsck + ... + } +} +Schedule { + Name = "WeeklyCycle" + Run = Level=Full 1st sun at 2:05 + Run = Level=Differential 2nd-5th sun at 2:05 + Run = Level=Incremental mon-sat at 2:05 +} + +# This schedule does the catalog. It starts after the WeeklyCycle +Schedule { + Name = "WeeklyCycleAfterBackup" + Run = Level=Full sun-sat at 2:10 +} + +# This is the backup of the catalog +FileSet { + Name = "Catalog" + Include { Options { signature=MD5 } + File = /home/bacula/working/bacula.sql + } +} + +Client { + Name = client-fd + Address = client + FDPort = 9102 + Catalog = MyCatalog + Password = " *** CHANGE ME ***" + AutoPrune = yes # Prune expired Jobs/Files + Job Retention = 6 months + File Retention = 60 days +} + +Storage { + Name = File + Address = localhost + SDPort = 9103 + Password = " *** CHANGE ME ***" + Device = FileStorage + Media Type = File +} + +Catalog { + Name = MyCatalog + dbname = bacula; user = bacula; password = "" +} + +Pool { + Name = Full-Pool + Pool Type = Backup + Recycle = yes # automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 6 months + Maximum Volume Jobs = 1 + Label Format = Full- + Maximum Volumes = 9 +} + +Pool { + Name = Inc-Pool + Pool Type = Backup + Recycle = yes # automatically recycle Volumes + AutoPrune = yes # Prune expired volumes + Volume Retention = 20 days + Maximum Volume Jobs = 6 + Label Format = Inc- + Maximum Volumes = 7 +} + +Pool { + Name = Diff-Pool + Pool Type = Backup + Recycle = yes + AutoPrune = yes + Volume Retention = 40 days + Maximum Volume Jobs = 1 + Label Format = Diff- + Maximum Volumes = 10 +} + +Messages { + Name = Standard + mailcommand = "bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" + mail = root@domain.com = all, !skipped + operator = root@domain.com = mount + console = all, !skipped, !saved + append = "/home/bacula/bin/log" = all, !skipped +} +\end{verbatim} +\normalsize + +and the Storage daemon's configuration file is: + +\footnotesize +\begin{verbatim} +Storage { # definition of myself + Name = bacula-sd + SDPort = 9103 # Director's port + WorkingDirectory = "/home/bacula/working" + Pid Directory = "/home/bacula/working" +} +Director { + Name = bacula-dir + Password = " *** CHANGE ME ***" +} +Device { + Name = FileStorage + Media Type = File + Archive Device = /files/bacula + LabelMedia = yes; # lets Bacula label unlabeled media + Random Access = Yes; + AutomaticMount = yes; # when device opened, read it + RemovableMedia = no; + AlwaysOpen = no; +} +Messages { + Name = Standard + director = bacula-dir = all +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/concepts/projects.tex b/docs/manuals/en/concepts/projects.tex new file mode 100644 index 00000000..f118e791 --- /dev/null +++ b/docs/manuals/en/concepts/projects.tex @@ -0,0 +1,28 @@ +%% +%% + +\chapter{Bacula Projects} +\label{ProjectsChapter} +\index[general]{Projects!Bacula } +\index[general]{Bacula Projects } + +Once a new major version of Bacula is released, the Bacula +users will vote on a list of new features. This vote is used +as the main element determining what new features will be +implemented for the next version. Generally, the development time +for a new release is between four to nine months. Sometimes it may be +a bit longer, but in that case, there will be a number of bug fix +updates to the currently released version. + +For the current list of project, please see the projects page in the CVS +at: \elink{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/projects} +{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/projects} +see the {\bf projects} file in the main source directory. The projects +file is updated approximately once every six months. + +Separately from the project list, Kern maintains a current list of +tasks as well as ideas, feature requests, and occasionally design +notes. This list is updated roughly weekly (sometimes more often). +For a current list of tasks you can see {\bf kernstodo} in the Source Forge +CVS at \elink{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/kernstodo} +{http://cvs.sourceforge.net/viewcvs.py/*checkout*/bacula/bacula/kernstodo}. diff --git a/docs/manuals/en/concepts/python.tex b/docs/manuals/en/concepts/python.tex new file mode 100644 index 00000000..40e1b2e0 --- /dev/null +++ b/docs/manuals/en/concepts/python.tex @@ -0,0 +1,479 @@ +%% +%% + +\chapter{Python Scripting} +\label{PythonChapter} +\index[general]{Python Scripting} +\index[general]{Scripting!Python} + +You may be asking what Python is and why a scripting language is +needed in Bacula. The answer to the first question is that Python +is an Object Oriented scripting language with features similar +to those found in Perl, but the syntax of the language is much +cleaner and simpler. The answer to why have scripting in Bacula is to +give the user more control over the whole backup process. Probably +the simplest example is when Bacula needs a new Volume name, with +a scripting language such as Python, you can generate any name +you want, based on the current state of Bacula. + +\section{Python Configuration} +\index[general]{Python Configuration} +\index[general]{Configuration!Python} + +Python must be enabled during the configuration process by adding +a \verb:--:with-python, and possibly specifying an alternate +directory if your Python is not installed in a standard system +location. If you are using RPMs you will need the python-devel package +installed. + +When Python is configured, it becomes an integral part of Bacula and +runs in Bacula's address space, so even though it is an interpreted +language, it is very efficient. + +When the Director starts, it looks to see if you have a {\bf +Scripts Directory} Directive defined (normal default {\bf +/etc/bacula/scripts}, if so, it looks in that directory for a file named +{\bf DirStartUp.py}. If it is found, Bacula will pass this file to Python +for execution. The {\bf Scripts Directory} is a new directive that you add +to the Director resource of your bacula-dir.conf file. + +Note: Bacula does not install Python scripts by default because these +scripts are for you to program. This means that with a default +installation with Python enabled, Bacula will print the following error +message: + +\begin{verbatim} +09-Jun 15:14 bacula-dir: ERROR in pythonlib.c:131 Could not import +Python script /etc/bacula/scripts/DirStartUp. Python disabled. +\end{verbatim} + +The source code directory {\bf examples/python} contains sample scripts +for DirStartUp.py, SDStartUp.py, and FDStartUp.py that you might want +to use as a starting point. Normally, your scripts directory (at least +where you store the Python scripts) should be writable by Bacula, because +Python will attempt to write a compiled version of the scripts (e.g. +DirStartUp.pyc) back to that directory. + +When starting with the sample scripts, you can delete any part that +you will not need, but you should keep all the Bacula Event and Job Event +definitions. If you do not want a particular event, simply replace the +existing code with a {\bf noop = 1}. + +\section{Bacula Events} +\index[general]{Bacula Events} +\index[general]{Events} +A Bacula event is a point in the Bacula code where Bacula +will call a subroutine (actually a method) that you have +defined in the Python StartUp script. Events correspond +to some significant event such as a Job Start, a Job End, +Bacula needs a new Volume Name, ... When your script is +called, it will have access to all the Bacula variables +specific to the Job (attributes of the Job Object), and +it can even call some of the Job methods (subroutines) +or set new values in the Job attributes, such as the +Priority. You will see below how the events are used. + +\section{Python Objects} +\index[general]{Python Objects} +\index[general]{Objects!Python} + +There are four Python objects that you will need to work with: +\begin{description} +\item [The Bacula Object] + The Bacula object is created by the Bacula daemon (the Director + in the present case) when the daemon starts. It is available to + the Python startup script, {\bf DirStartup.py}, by importing the + Bacula definitions with {\bf import bacula}. The methods + available with this object are described below. + +\item [The Bacula Events Class] + You create this class in the startup script, and you pass + it to the Bacula Object's {\bf set\_events} method. The + purpose of the Bacula Events Class is to define what global + or daemon events you want to monitor. When one of those events + occurs, your Bacula Events Class will be called at the method + corresponding to the event. There are currently three events, + JobStart, JobEnd, and Exit, which are described in detail below. + +\item [The Job Object] + When a Job starts, and assuming you have defined a JobStart method + in your Bacula Events Class, Bacula will create a Job Object. This + object will be passed to the JobStart event. The Job Object has a + has good number of read-only members or attributes providing many + details of the Job, and it also has a number of writable attributes + that allow you to pass information into the Job. These attributes + are described below. + +\item [The Job Events Class] + You create this class in the JobStart method of your Bacula Events + class, and it allows you to define which of the possible Job Object + events you want to see. You must pass an instance of your Job Events + class to the Job Object set\_events() method. + Normally, you will probably only have one + Job Events Class, which will be instantiated for each Job. However, + if you wish to see different events in different Jobs, you may have + as many Job Events classes as you wish. +\end{description} + + +The first thing the startup script must do is to define what global Bacula +events (daemon events), it wants to see. This is done by creating a +Bacula Events class, instantiating it, then passing it to the +{\bf set\_events} method. There are three possible +events. + +\begin{description} +\item [JobStart] + \index[dir]{JobStart} + This Python method, if defined, will be called each time a Job is started. + The method is passed the class instantiation object as the first argument, + and the Bacula Job object as the second argument. The Bacula Job object + has several built-in methods, and you can define which ones you + want called. If you do not define this method, you will not be able + to interact with Bacula jobs. + +\item [JobEnd] + This Python method, if defined, will be called each time a Job terminates. + The method is passed the class instantiation object as the first argument, + and the Bacula Job object as the second argument. + +\item [Exit] + This Python method, if defined, will be called when the Director terminates. + The method is passed the class instantiation object as the first argument. +\end{description} + +Access to the Bacula variables and methods is done with: + + import bacula + +The following are the read-only attributes provided by the bacula object. +\begin{description} +\item [Name] +\item [ConfigFile] +\item [WorkingDir] +\item [Version] string consisting of "Version Build-date" +\end{description} + + +A simple definition of the Bacula Events Class might be the following: + +\footnotesize +\begin{verbatim} +import sys, bacula +class BaculaEvents: + def JobStart(self, job): + ... +\end{verbatim} +\normalsize + +Then to instantiate the class and pass it to Bacula, you +would do: + +\footnotesize +\begin{verbatim} +bacula.set_events(BaculaEvents()) # register Bacula Events wanted +\end{verbatim} +\normalsize + +And at that point, each time a Job is started, your BaculaEvents JobStart +method will be called. + +Now to actually do anything with a Job, you must define which Job events +you want to see, and this is done by defining a JobEvents class containing +the methods you want called. Each method name corresponds to one of the +Job Events that Bacula will generate. + +A simple Job Events class might look like the following: + +\footnotesize +\begin{verbatim} +class JobEvents: + def NewVolume(self, job): + ... +\end{verbatim} +\normalsize + +Here, your JobEvents class method NewVolume will be called each time +the Job needs a new Volume name. To actually register the events defined +in your class with the Job, you must instantiate the JobEvents class and +set it in the Job {\bf set\_events} variable. Note, this is a bit different +from how you registered the Bacula events. The registration process must +be done in the Bacula JobStart event (your method). So, you would modify +Bacula Events (not the Job events) as follows: + +\footnotesize +\begin{verbatim} +import sys, bacula +class BaculaEvents: + def JobStart(self, job): + events = JobEvents() # create instance of Job class + job.set_events(events) # register Job events desired + ... +\end{verbatim} +\normalsize + +When a job event is triggered, the appropriate event definition is +called in the JobEvents class. This is the means by which your Python +script or code gets control. Once it has control, it may read job +attributes, or set them. See below for a list of read-only attributes, +and those that are writable. + +In addition, the Bacula {\bf job} object in the Director has +a number of methods (subroutines) that can be called. They +are: +\begin{description} +\item [set\_events] The set\_events method takes a single + argument, which is the instantiation of the Job Events class + that contains the methods that you want called. The method + names that will be called must correspond to the Bacula + defined events. You may define additional methods but Bacula + will not use them. +\item [run] The run method takes a single string + argument, which is the run command (same as in the Console) + that you want to submit to start a new Job. The value + returned by the run method is the JobId of the job that + started, or -1 if there was an error. +\item [write] The write method is used to be able to send + print output to the Job Report. This will be described later. +\item[cancel] The cancel method takes a single integer argument, + which is a JobId. If JobId is found, it will be canceled. +\item [DoesVolumeExist] The DoesVolumeExist method takes a single + string argument, which is the Volume name, and returns + 1 if the volume exists in the Catalog and 0 if the volume + does not exist. +\end{description} + +The following attributes are read/write within the Director +for the {\bf job} object. + +\begin{description} +\item [Priority] Read or set the Job priority. + Note, that setting a Job Priority is effective only before + the Job actually starts. +\item [Level] This attribute contains a string representing the Job + level, e.g. Full, Differential, Incremental, ... if read. + The level can also be set. +\end{description} + +The following read-only attributes are available within the Director +for the {\bf job} object. + +\begin{description} +\item [Type] This attribute contains a string representing the Job + type, e.g. Backup, Restore, Verify, ... +\item [JobId] This attribute contains an integer representing the + JobId. +\item [Client] This attribute contains a string with the name of the + Client for this job. +\item [NumVols] This attribute contains an integer with the number of + Volumes in the Pool being used by the Job. +\item [Pool] This attribute contains a string with the name of the Pool + being used by the Job. +\item [Storage] This attribute contains a string with the name of the + Storage resource being used by the Job. +\item [Catalog] This attribute contains a string with the name of the + Catalog resource being used by the Job. +\item [MediaType] This attribute contains a string with the name of the + Media Type associated with the Storage resource being used by the Job. +\item [Job] This attribute contains a string containing the name of the + Job resource used by this job (not unique). +\item [JobName] This attribute contains a string representing the full + unique Job name. +\item [JobStatus] This attribute contains a single character string + representing the current Job status. The status may change + during execution of the job. It may take on the following + values: + \begin{description} + \item [C] Created, not yet running + \item [R] Running + \item [B] Blocked + \item [T] Completed successfully + \item [E] Terminated with errors + \item [e] Non-fatal error + \item [f] Fatal error + \item [D] Verify found differences + \item [A] Canceled by user + \item [F] Waiting for Client + \item [S] Waiting for Storage daemon + \item [m] Waiting for new media + \item [M] Waiting for media mount + \item [s] Waiting for storage resource + \item [j] Waiting for job resource + \item [c] Waiting for client resource + \item [d] Waiting on maximum jobs + \item [t] Waiting on start time + \item [p] Waiting on higher priority jobs + \end{description} + +\item [Priority] This attribute contains an integer with the priority + assigned to the job. +\item [CatalogRes] tuple consisting of (DBName, Address, User, + Password, Socket, Port, Database Vendor) taken from the Catalog resource + for the Job with the exception of Database Vendor, which is + one of the following: MySQL, PostgreSQL, SQLite, Internal, + depending on what database you configured. +\item [VolumeName] + After a Volume has been purged, this attribute will contain the + name of that Volume. At other times, this value may have no meaning. +\end{description} + +The following write-only attributes are available within the +Director: + +\begin{description} +\item [JobReport] Send line to the Job Report. +\item [VolumeName] Set a new Volume name. Valid only during the + NewVolume event. +\end{description} + +\section{Python Console Command} +\index[general]{Python Console Command} +\index[general]{Console Command!Python} + +There is a new Console command named {\bf python}. It takes +a single argument {\bf restart}. Example: +\begin{verbatim} + python restart +\end{verbatim} + +This command restarts the Python interpreter in the Director. +This can be useful when you are modifying the DirStartUp script, +because normally Python will cache it, and thus the +script will be read one time. + +\section{Debugging Python Scripts} +\index[general]{Debugging Python Scripts} +In general, you debug your Python scripts by using print statements. +You can also develop your script or important parts of it as a +separate file using the Python interpreter to run it. Once you +have it working correctly, you can then call the script from +within the Bacula Python script (DirStartUp.py). + +If you are having problems loading DirStartUp.py, you will probably +not get any error messages because Bacula can only print Python +error messages after the Python interpreter is started. However, you +may be able to see the error messages by starting Bacula in +a shell window with the {\bf -d1} option on the command line. That +should cause the Python error messages to be printed in the shell +window. + +If you are getting error messages such as the following when +loading DirStartUp.py: + +\begin{verbatim} + Traceback (most recent call last): + File "/etc/bacula/scripts/DirStartUp.py", line 6, in ? + import time, sys, bacula + ImportError: /usr/lib/python2.3/lib-dynload/timemodule.so: undefined + symbol: PyInt_FromLong + bacula-dir: pythonlib.c:134 Python Import error. +\end{verbatim} + +It is because the DirStartUp script is calling a dynamically loaded +module (timemodule.so in the above case) that then tries to use +Python functions exported from the Python interpreter (in this case +PyInt\_FromLong). The way Bacula is currently linked with Python does +not permit this. The solution to the problem is to put such functions +(in this case the import of time into a separate Python script, which +will do your calculations and return the values you want. Then call +(not import) this script from the Bacula DirStartUp.py script, and +it all should work as you expect. + + + + + +\section{Python Example} +\index[general]{Python Example} +\index[general]{Example!Python} + +An example script for the Director startup file is provided in +{\bf examples/python/DirStartup.py} as follows: + +\footnotesize +\begin{verbatim} +# +# Bacula Python interface script for the Director +# + +# You must import both sys and bacula +import sys, bacula + +# This is the list of Bacula daemon events that you +# can receive. +class BaculaEvents(object): + def __init__(self): + # Called here when a new Bacula Events class is + # is created. Normally not used + noop = 1 + + def JobStart(self, job): + """ + Called here when a new job is started. If you want + to do anything with the Job, you must register + events you want to receive. + """ + events = JobEvents() # create instance of Job class + events.job = job # save Bacula's job pointer + job.set_events(events) # register events desired + sys.stderr = events # send error output to Bacula + sys.stdout = events # send stdout to Bacula + jobid = job.JobId; client = job.Client + numvols = job.NumVols + job.JobReport="Python Dir JobStart: JobId=%d Client=%s NumVols=%d\n" % (jobid,client,numvols) + + # Bacula Job is going to terminate + def JobEnd(self, job): + jobid = job.JobId + client = job.Client + job.JobReport="Python Dir JobEnd output: JobId=%d Client=%s.\n" % (jobid, client) + + # Called here when the Bacula daemon is going to exit + def Exit(self, job): + print "Daemon exiting." + +bacula.set_events(BaculaEvents()) # register daemon events desired + +""" + These are the Job events that you can receive. +""" +class JobEvents(object): + def __init__(self): + # Called here when you instantiate the Job. Not + # normally used + noop = 1 + + def JobInit(self, job): + # Called when the job is first scheduled + noop = 1 + + def JobRun(self, job): + # Called just before running the job after initializing + # This is the point to change most Job parameters. + # It is equivalent to the JobRunBefore point. + noop = 1 + + def NewVolume(self, job): + # Called when Bacula wants a new Volume name. The Volume + # name returned, if any, must be stored in job.VolumeName + jobid = job.JobId + client = job.Client + numvol = job.NumVols; + print job.CatalogRes + job.JobReport = "JobId=%d Client=%s NumVols=%d" % (jobid, client, numvol) + job.JobReport="Python before New Volume set for Job.\n" + Vol = "TestA-%d" % numvol + job.JobReport = "Exists=%d TestA-%d" % (job.DoesVolumeExist(Vol), numvol) + job.VolumeName="TestA-%d" % numvol + job.JobReport="Python after New Volume set for Job.\n" + return 1 + + def VolumePurged(self, job): + # Called when a Volume is purged. The Volume name can be referenced + # with job.VolumeName + noop = 1 + + + +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/concepts/recycling.tex b/docs/manuals/en/concepts/recycling.tex new file mode 100644 index 00000000..c2962d51 --- /dev/null +++ b/docs/manuals/en/concepts/recycling.tex @@ -0,0 +1,717 @@ +%% +%% + +\chapter{Automatic Volume Recycling} +\label{RecyclingChapter} +\index[general]{Recycling!Automatic Volume } +\index[general]{Automatic Volume Recycling } + +By default, once Bacula starts writing a Volume, it can append to the +volume, but it will not overwrite the existing data thus destroying it. +However when Bacula {\bf recycles} a Volume, the Volume becomes available +for being reused, and Bacula can at some later time overwrite the previous +contents of that Volume. Thus all previous data will be lost. If the +Volume is a tape, the tape will be rewritten from the beginning. If the +Volume is a disk file, the file will be truncated before being rewritten. + +You may not want Bacula to automatically recycle (reuse) tapes. This would +require a large number of tapes though, and in such a case, it is possible +to manually recycle tapes. For more on manual recycling, see the section +entitled \ilink{ Manually Recycling Volumes}{manualrecycling} below in this +chapter. + +Most people prefer to have a Pool of tapes that are used for daily backups and +recycled once a week, another Pool of tapes that are used for Full backups +once a week and recycled monthly, and finally a Pool of tapes that are used +once a month and recycled after a year or two. With a scheme like this, the +number of tapes in your pool or pools remains constant. + +By properly defining your Volume Pools with appropriate Retention periods, +Bacula can manage the recycling (such as defined above) automatically. + +Automatic recycling of Volumes is controlled by four records in the {\bf +Pool} resource definition in the Director's configuration file. These four +records are: + +\begin{itemize} +\item AutoPrune = yes +\item VolumeRetention = \lt{}time\gt{} +\item Recycle = yes +\item RecyclePool = \lt{}APool\gt{} (\textit{This require bacula 2.1.4 or greater}) +\end{itemize} + +The above three directives are all you need assuming that you fill +each of your Volumes then wait the Volume Retention period before +reusing them. If you want Bacula to stop using a Volume and recycle +it before it is full, you will need to use one or more additional +directives such as: +\begin{itemize} +\item Use Volume Once = yes +\item Volume Use Duration = ttt +\item Maximum Volume Jobs = nnn +\item Maximum Volume Bytes = mmm +\end{itemize} +Please see below and +the \ilink{Basic Volume Management}{DiskChapter} chapter +of this manual for more complete examples. + +Automatic recycling of Volumes is performed by Bacula only when it wants a +new Volume and no appendable Volumes are available in the Pool. It will then +search the Pool for any Volumes with the {\bf Recycle} flag set and whose +Volume Status is {\bf Full}. At that point, the recycling occurs in two steps. +The first is that the Catalog for a Volume must be purged of all Jobs and +Files contained on that Volume, and the second step is the actual recycling of +the Volume. The Volume will be purged if the VolumeRetention period has +expired. When a Volume is marked as Purged, it means that no Catalog records +reference that Volume, and the Volume can be recycled. Until recycling +actually occurs, the Volume data remains intact. If no Volumes can be found +for recycling for any of the reasons stated above, Bacula will request +operator intervention (i.e. it will ask you to label a new volume). + +A key point mentioned above, that can be a source of frustration, is that Bacula +will only recycle purged Volumes if there is no other appendable Volume +available, otherwise, it will always write to an appendable Volume before +recycling even if there are Volume marked as Purged. This preserves your data +as long as possible. So, if you wish to "force" Bacula to use a purged +Volume, you must first ensure that no other Volume in the Pool is marked {\bf +Append}. If necessary, you can manually set a volume to {\bf Full}. The reason +for this is that Bacula wants to preserve the data on your old tapes (even +though purged from the catalog) as long as absolutely possible before +overwriting it. There are also a number of directives such as +{\bf Volume Use Duration} that will automatically mark a volume as {\bf +Used} and thus no longer appendable. + +\label{AutoPruning} +\section{Automatic Pruning} +\index[general]{Automatic Pruning} +\index[general]{Pruning!Automatic} + +As Bacula writes files to tape, it keeps a list of files, jobs, and volumes +in a database called the catalog. Among other things, the database helps +Bacula to decide which files to back up in an incremental or differential +backup, and helps you locate files on past backups when you want to restore +something. However, the catalog will grow larger and larger as time goes +on, and eventually it can become unacceptably large. + +Bacula's process for removing entries from the catalog is called Pruning. +The default is Automatic Pruning, which means that once an entry reaches a +certain age (e.g. 30 days old) it is removed from the catalog. Once a job +has been pruned, you can still restore it from the backup tape, but one +additional step is required: scanning the volume with bscan. The +alternative to Automatic Pruning is Manual Pruning, in which you explicitly +tell Bacula to erase the catalog entries for a volume. You'd usually do +this when you want to reuse a Bacula volume, because there's no point in +keeping a list of files that USED TO BE on a tape. Or, if the catalog is +starting to get too big, you could prune the oldest jobs to save space. +Manual pruning is done with the \ilink{ prune command}{ManualPruning} in +the console. (thanks to Bryce Denney for the above explanation). + +\section{Pruning Directives} +\index[general]{Pruning Directives } +\index[general]{Directives!Pruning } + +There are three pruning durations. All apply to catalog database records and +not to the actual data in a Volume. The pruning (or retention) durations are +for: Volumes (Media records), Jobs (Job records), and Files (File records). +The durations inter-depend a bit because if Bacula prunes a Volume, it +automatically removes all the Job records, and all the File records. Also when +a Job record is pruned, all the File records for that Job are also pruned +(deleted) from the catalog. + +Having the File records in the database means that you can examine all the +files backed up for a particular Job. They take the most space in the catalog +(probably 90-95\% of the total). When the File records are pruned, the Job +records can remain, and you can still examine what Jobs ran, but not the +details of the Files backed up. In addition, without the File records, you +cannot use the Console restore command to restore the files. + +When a Job record is pruned, the Volume (Media record) for that Job can still +remain in the database, and if you do a "list volumes", you will see the +volume information, but the Job records (and its File records) will no longer +be available. + +In each case, pruning removes information about where older files are, but it +also prevents the catalog from growing to be too large. You choose the +retention periods in function of how many files you are backing up and the +time periods you want to keep those records online, and the size of the +database. You can always re-insert the records (with 98\% of the original data) +by using "bscan" to scan in a whole Volume or any part of the volume that +you want. + +By setting {\bf AutoPrune} to {\bf yes} you will permit {\bf Bacula} to +automatically prune all Volumes in the Pool when a Job needs another Volume. +Volume pruning means removing records from the catalog. It does not shrink the +size of the Volume or affect the Volume data until the Volume gets +overwritten. When a Job requests another volume and there are no Volumes with +Volume Status {\bf Append} available, Bacula will begin volume pruning. This +means that all Jobs that are older than the {\bf VolumeRetention} period will +be pruned from every Volume that has Volume Status {\bf Full} or {\bf Used} +and has Recycle set to {\bf yes}. Pruning consists of deleting the +corresponding Job, File, and JobMedia records from the catalog database. No +change to the physical data on the Volume occurs during the pruning process. +When all files are pruned from a Volume (i.e. no records in the catalog), the +Volume will be marked as {\bf Purged} implying that no Jobs remain on the +volume. The Pool records that control the pruning are described below. + +\begin{description} + +\item [AutoPrune = \lt{}yes|no\gt{}] + \index[console]{AutoPrune } + If AutoPrune is set to {\bf yes} (default), Bacula + will automatically apply the Volume retention period when running a Job and + it needs a new Volume but no appendable volumes are available. At that point, + Bacula will prune all Volumes that can be pruned (i.e. AutoPrune set) in an + attempt to find a usable volume. If during the autoprune, all files are + pruned from the Volume, it will be marked with VolStatus {\bf Purged}. The + default is {\bf yes}. Note, that although the File and Job records may be + pruned from the catalog, a Volume will be marked Purged (and hence + ready for recycling) if the Volume status is Append, Full, Used, or Error. + If the Volume has another status, such as Archive, Read-Only, Disabled, + Busy, or Cleaning, the Volume status will not be changed to Purged. + +\item [Volume Retention = \lt{}time-period-specification\gt{}] + \index[console]{Volume Retention} + The Volume Retention record defines the length of time that Bacula will + guarantee that the Volume is not reused counting from the time the last + job stored on the Volume terminated. A key point is that this time + period is not even considered as long at the Volume remains appendable. + The Volume Retention period count down begins only when the Append + status has been changed to some othe status (Full, Used, Purged, ...). + + When this time period expires, and if {\bf AutoPrune} is set to {\bf + yes}, and a new Volume is needed, but no appendable Volume is available, + Bacula will prune (remove) Job records that are older than the specified + Volume Retention period. + + The Volume Retention period takes precedence over any Job Retention + period you have specified in the Client resource. It should also be + noted, that the Volume Retention period is obtained by reading the + Catalog Database Media record rather than the Pool resource record. + This means that if you change the VolumeRetention in the Pool resource + record, you must ensure that the corresponding change is made in the + catalog by using the {\bf update pool} command. Doing so will insure + that any new Volumes will be created with the changed Volume Retention + period. Any existing Volumes will have their own copy of the Volume + Retention period that can only be changed on a Volume by Volume basis + using the {\bf update volume} command. + + When all file catalog entries are removed from the volume, its VolStatus is + set to {\bf Purged}. The files remain physically on the Volume until the + volume is overwritten. + + Retention periods are specified in seconds, minutes, hours, days, weeks, + months, quarters, or years on the record. See the + \ilink{Configuration chapter}{Time} of this manual for + additional details of time specification. + +The default is 1 year. +% TODO: if that is the format, should it be in quotes? decide on a style + +\item [Recycle = \lt{}yes|no\gt{}] + \index[fd]{Recycle } + This statement tells Bacula whether or not the particular Volume can be + recycled (i.e. rewritten). If Recycle is set to {\bf no} (the + default), then even if Bacula prunes all the Jobs on the volume and it + is marked {\bf Purged}, it will not consider the tape for recycling. If + Recycle is set to {\bf yes} and all Jobs have been pruned, the volume + status will be set to {\bf Purged} and the volume may then be reused + when another volume is needed. If the volume is reused, it is relabeled + with the same Volume Name, however all previous data will be lost. + \end{description} + + It is also possible to "force" pruning of all Volumes in the Pool + associated with a Job by adding {\bf Prune Files = yes} to the Job resource. + +\label{Recycling} +\label{RecyclingAlgorithm} +\section{Recycling Algorithm} +\index[general]{Algorithm!Recycling } +\index[general]{Recycling Algorithm } + +After all Volumes of a Pool have been pruned (as mentioned above, this happens +when a Job needs a new Volume and no appendable Volumes are available), Bacula +will look for the oldest Volume that is Purged (all Jobs and Files expired), +and if the {\bf Recycle} flag is on (Recycle=yes) for that Volume, Bacula will +relabel it and write new data on it. + +As mentioned above, there are two key points for getting a Volume +to be recycled. First, the Volume must no longer be marked Append (there +are a number of directives to automatically make this change), and second +since the last write on the Volume, one or more of the Retention periods +must have expired so that there are no more catalog backup job records +that reference that Volume. Once both those conditions are satisfied, +the volume can be marked Purged and hence recycled. + +The full algorithm that Bacula uses when it needs a new Volume is: +\index[general]{New Volume Algorithm} +\index[general]{Algorithm!New Volume} + +The algorithm described below assumes that AutoPrune is enabled, +that Recycling is turned on, and that you have defined +appropriate Retention periods, or used the defaults for all these +items. + +\begin{itemize} +\item If the request is for an Autochanger device, look only + for Volumes in the Autochanger (i.e. with InChanger set and that have + the correct Storage device). +\item Search the Pool for a Volume with VolStatus=Append (if there is more + than one, the Volume with the oldest date last written is chosen. If + two have the same date then the one with the lowest MediaId is chosen). +\item Search the Pool for a Volume with VolStatus=Recycle and the InChanger + flag is set true (if there is more than one, the Volume with the oldest + date last written is chosen. If two have the same date then the one + with the lowest MediaId is chosen). +\item Try recycling any purged Volumes. +\item Prune volumes applying Volume retention period (Volumes with VolStatus + Full, Used, or Append are pruned). Note, even if all the File and Job + records are pruned from a Volume, the Volume will not be marked Purged + until the Volume retention period expires. +\item Search the Pool for a Volume with VolStatus=Purged +\item If a Pool named "Scratch" exists, search for a Volume and if found + move it to the current Pool for the Job and use it. Note, when + the Scratch Volume is moved into the current Pool, the basic + Pool defaults are applied as if it is a newly labeled Volume + (equivalent to an {\bf update volume from pool} command). +\item If we were looking for Volumes in the Autochanger, go back to + step 2 above, but this time, look for any Volume whether or not + it is in the Autochanger. +\item Attempt to create a new Volume if automatic labeling enabled + If Python is enabled, a Python NewVolume event is generated before + the Label Format directve is used. If the maximum number of Volumes + specified for the pool is reached, a new Volume will not be created. +\item Prune the oldest Volume if RecycleOldestVolume=yes (the Volume with the + oldest LastWritten date and VolStatus equal to Full, Recycle, Purged, Used, + or Append is chosen). This record ensures that all retention periods are + properly respected. +\item Purge the oldest Volume if PurgeOldestVolume=yes (the Volume with the + oldest LastWritten date and VolStatus equal to Full, Recycle, Purged, Used, + or Append is chosen). We strongly recommend against the use of {\bf + PurgeOldestVolume} as it can quite easily lead to loss of current backup + data. +\item Give up and ask operator. +\end{itemize} + +The above occurs when Bacula has finished writing a Volume or when no Volume +is present in the drive. + +On the other hand, if you have inserted a different Volume after the last job, +and Bacula recognizes the Volume as valid, it will request authorization from +the Director to use this Volume. In this case, if you have set {\bf Recycle +Current Volume = yes} and the Volume is marked as Used or Full, Bacula will +prune the volume and if all jobs were removed during the pruning (respecting +the retention periods), the Volume will be recycled and used. + +The recycling algorithm in this case is: +\begin{itemize} +\item If the VolStatus is {\bf Append} or {\bf Recycle} + is set, the volume will be used. +\item If {\bf Recycle Current Volume} is set and the volume is marked {\bf + Full} or {\bf Used}, Bacula will prune the volume (applying the retention + period). If all Jobs are pruned from the volume, it will be recycled. +\end{itemize} + +This permits users to manually change the Volume every day and load tapes in +an order different from what is in the catalog, and if the volume does not +contain a current copy of your backup data, it will be used. + +A few points from Alan Brown to keep in mind: + +\begin{enumerate} +\item If a pool doesn't have maximum volumes defined then Bacula will prefer to + demand new volumes over forcibly purging older volumes. + +\item If volumes become free through pruning and the Volume retention period has + expired, then they get marked as "purged" and are immediately available for + recycling - these will be used in preference to creating new volumes. + +\item If the Job, File, and Volume retention periods are different, then + it's common to see a tape with no files or jobs listed in the database, + but which is still not marked as "purged". +\end{enumerate} + + +\section{Recycle Status} +\index[general]{Status!Recycle } +\index[general]{Recycle Status } + +Each Volume inherits the Recycle status (yes or no) from the Pool resource +record when the Media record is created (normally when the Volume is labeled). +This Recycle status is stored in the Media record of the Catalog. Using +the Console program, you may subsequently change the Recycle status for each +Volume. For example in the following output from {\bf list volumes}: + +\footnotesize +\begin{verbatim} ++----------+-------+--------+---------+------------+--------+-----+ +| VolumeNa | Media | VolSta | VolByte | LastWritte | VolRet | Rec | ++----------+-------+--------+---------+------------+--------+-----+ +| File0001 | File | Full | 4190055 | 2002-05-25 | 14400 | 1 | +| File0002 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0003 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0004 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0005 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0006 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0007 | File | Purged | 1896466 | 2002-05-26 | 14400 | 1 | ++----------+-------+--------+---------+------------+--------+-----+ +\end{verbatim} +\normalsize + +all the volumes are marked as recyclable, and the last Volume, {\bf File0007} +has been purged, so it may be immediately recycled. The other volumes are all +marked recyclable and when their Volume Retention period (14400 seconds or four +hours) expires, they will be eligible for pruning, and possibly recycling. +Even though Volume {\bf File0007} has been purged, all the data on the Volume +is still recoverable. A purged Volume simply means that there are no entries +in the Catalog. Even if the Volume Status is changed to {\bf Recycle}, the +data on the Volume will be recoverable. The data is lost only when the Volume +is re-labeled and re-written. + +To modify Volume {\bf File0001} so that it cannot be recycled, you use the +{\bf update volume pool=File} command in the console program, or simply {\bf +update} and Bacula will prompt you for the information. + +\footnotesize +\begin{verbatim} ++----------+------+-------+---------+-------------+-------+-----+ +| VolumeNa | Media| VolSta| VolByte | LastWritten | VolRet| Rec | ++----------+------+-------+---------+-------------+-------+-----+ +| File0001 | File | Full | 4190055 | 2002-05-25 | 14400 | 0 | +| File0002 | File | Full | 1897236 | 2002-05-26 | 14400 | 1 | +| File0003 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0004 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0005 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0006 | File | Full | 1896460 | 2002-05-26 | 14400 | 1 | +| File0007 | File | Purged| 1896466 | 2002-05-26 | 14400 | 1 | ++----------+------+-------+---------+-------------+-------+-----+ +\end{verbatim} +\normalsize + +In this case, {\bf File0001} will never be automatically recycled. The same +effect can be achieved by setting the Volume Status to Read-Only. + +As you have noted, the Volume Status (VolStatus) column in the +catalog database contains the current status of the Volume, which +is normally maintained automatically by Bacula. To give you an +idea of some of the values it can take during the life cycle of +a Volume, here is a picture created by Arno Lehmann: + +\footnotesize +\begin{verbatim} +A typical volume life cycle is like this: + + because job count or size limit exceeded + Append ----------------------------------------> Used + ^ | + | First Job writes to Retention time passed | + | the volume and recycling takes | + | place | + | v + Recycled <-------------------------------------- Purged + Volume is selected for reuse + +\end{verbatim} +\normalsize + + +\section{Making Bacula Use a Single Tape} +\label{singletape} +\index[general]{Tape!Making Bacula Use a Single} +\index[general]{Making Bacula Use a Single Tape} + +Most people will want Bacula to fill a tape and when it is full, a new tape +will be mounted, and so on. However, as an extreme example, it is possible for +Bacula to write on a single tape, and every night to rewrite it. To get this +to work, you must do two things: first, set the VolumeRetention to less than +your save period (one day), and the second item is to make Bacula mark the +tape as full after using it once. This is done using {\bf UseVolumeOnce = +yes}. If this latter record is not used and the tape is not full after the +first time it is written, Bacula will simply append to the tape and eventually +request another volume. Using the tape only once, forces the tape to be marked +{\bf Full} after each use, and the next time {\bf Bacula} runs, it will +recycle the tape. + +An example Pool resource that does this is: + +\footnotesize +\begin{verbatim} +Pool { + Name = DDS-4 + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 12h # expire after 12 hours + Recycle = yes +} +\end{verbatim} +\normalsize + +\section{Daily, Weekly, Monthly Tape Usage Example} +\label{usageexample} +\index[general]{Daily, Weekly, Monthly Tape Usage Example } +\index[general]{Example!Daily Weekly Monthly Tape Usage } + +This example is meant to show you how one could define a fixed set of volumes +that Bacula will rotate through on a regular schedule. There are an infinite +number of such schemes, all of which have various advantages and +disadvantages. + +We start with the following assumptions: + +\begin{itemize} +\item A single tape has more than enough capacity to do a full save. +\item There are ten tapes that are used on a daily basis for incremental + backups. They are prelabeled Daily1 ... Daily10. +\item There are four tapes that are used on a weekly basis for full backups. + They are labeled Week1 ... Week4. +\item There are 12 tapes that are used on a monthly basis for full backups. + They are numbered Month1 ... Month12 +\item A full backup is done every Saturday evening (tape inserted Friday + evening before leaving work). +\item No backups are done over the weekend (this is easy to change). +\item The first Friday of each month, a Monthly tape is used for the Full + backup. +\item Incremental backups are done Monday - Friday (actually Tue-Fri + mornings). +% TODO: why this "actually"? does this need to be explained? + \end{itemize} + +We start the system by doing a Full save to one of the weekly volumes or one +of the monthly volumes. The next morning, we remove the tape and insert a +Daily tape. Friday evening, we remove the Daily tape and insert the next tape +in the Weekly series. Monday, we remove the Weekly tape and re-insert the +Daily tape. On the first Friday of the next month, we insert the next Monthly +tape in the series rather than a Weekly tape, then continue. When a Daily tape +finally fills up, {\bf Bacula} will request the next one in the series, and +the next day when you notice the email message, you will mount it and {\bf +Bacula} will finish the unfinished incremental backup. + +What does this give? Well, at any point, you will have the last complete +Full save plus several Incremental saves. For any given file you want to +recover (or your whole system), you will have a copy of that file every day +for at least the last 14 days. For older versions, you will have at least three +and probably four Friday full saves of that file, and going back further, you +will have a copy of that file made on the beginning of the month for at least +a year. + +So you have copies of any file (or your whole system) for at least a year, but +as you go back in time, the time between copies increases from daily to weekly +to monthly. + +What would the Bacula configuration look like to implement such a scheme? + +\footnotesize +\begin{verbatim} +Schedule { + Name = "NightlySave" + Run = Level=Full Pool=Monthly 1st sat at 03:05 + Run = Level=Full Pool=Weekly 2nd-5th sat at 03:05 + Run = Level=Incremental Pool=Daily tue-fri at 03:05 +} +Job { + Name = "NightlySave" + Type = Backup + Level = Full + Client = LocalMachine + FileSet = "File Set" + Messages = Standard + Storage = DDS-4 + Pool = Daily + Schedule = "NightlySave" +} +# Definition of file storage device +Storage { + Name = DDS-4 + Address = localhost + SDPort = 9103 + Password = XXXXXXXXXXXXX + Device = FileStorage + Media Type = 8mm +} +FileSet { + Name = "File Set" + Include = signature=MD5 { + fffffffffffffffff + } + Exclude = { *.o } +} +Pool { + Name = Daily + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 10d # recycle in 10 days + Maximum Volumes = 10 + Recycle = yes +} +Pool { + Name = Weekly + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 30d # recycle in 30 days (default) + Recycle = yes +} +Pool { + Name = Monthly + Use Volume Once = yes + Pool Type = Backup + AutoPrune = yes + VolumeRetention = 365d # recycle in 1 year + Recycle = yes +} +\end{verbatim} +\normalsize + +\section{ Automatic Pruning and Recycling Example} +\label{PruningExample} +\index[general]{Automatic Pruning and Recycling Example } +\index[general]{Example!Automatic Pruning and Recycling } + +Perhaps the best way to understand the various resource records that come into +play during automatic pruning and recycling is to run a Job that goes through +the whole cycle. If you add the following resources to your Director's +configuration file: + +\footnotesize +\begin{verbatim} +Schedule { + Name = "30 minute cycle" + Run = Level=Full Pool=File Messages=Standard Storage=File + hourly at 0:05 + Run = Level=Full Pool=File Messages=Standard Storage=File + hourly at 0:35 +} +Job { + Name = "Filetest" + Type = Backup + Level = Full + Client=XXXXXXXXXX + FileSet="Test Files" + Messages = Standard + Storage = File + Pool = File + Schedule = "30 minute cycle" +} +# Definition of file storage device +Storage { + Name = File + Address = XXXXXXXXXXX + SDPort = 9103 + Password = XXXXXXXXXXXXX + Device = FileStorage + Media Type = File +} +FileSet { + Name = "Test Files" + Include = signature=MD5 { + fffffffffffffffff + } + Exclude = { *.o } +} +Pool { + Name = File + Use Volume Once = yes + Pool Type = Backup + LabelFormat = "File" + AutoPrune = yes + VolumeRetention = 4h + Maximum Volumes = 12 + Recycle = yes +} +\end{verbatim} +\normalsize + +Where you will need to replace the {\bf ffffffffff}'s by the appropriate files +to be saved for your configuration. For the FileSet Include, choose a +directory that has one or two megabytes maximum since there will probably be +approximately eight copies of the directory that {\bf Bacula} will cycle through. + +In addition, you will need to add the following to your Storage daemon's +configuration file: + +\footnotesize +\begin{verbatim} +Device { + Name = FileStorage + Media Type = File + Archive Device = /tmp + LabelMedia = yes; + Random Access = Yes; + AutomaticMount = yes; + RemovableMedia = no; + AlwaysOpen = no; +} +\end{verbatim} +\normalsize + +With the above resources, Bacula will start a Job every half hour that saves a +copy of the directory you chose to /tmp/File0001 ... /tmp/File0012. After 4 +hours, Bacula will start recycling the backup Volumes (/tmp/File0001 ...). You +should see this happening in the output produced. Bacula will automatically +create the Volumes (Files) the first time it uses them. + +To turn it off, either delete all the resources you've added, or simply +comment out the {\bf Schedule} record in the {\bf Job} resource. + +\section{Manually Recycling Volumes} +\label{manualrecycling} +\index[general]{Volumes!Manually Recycling } +\index[general]{Manually Recycling Volumes } + +Although automatic recycling of Volumes is implemented in version 1.20 and +later (see the +\ilink{Automatic Recycling of Volumes}{RecyclingChapter} chapter of +this manual), you may want to manually force reuse (recycling) of a Volume. + +Assuming that you want to keep the Volume name, but you simply want to write +new data on the tape, the steps to take are: + +\begin{itemize} +\item Use the {\bf update volume} command in the Console to ensure that the + {\bf Recycle} field is set to {\bf 1} +\item Use the {\bf purge jobs volume} command in the Console to mark the + Volume as {\bf Purged}. Check by using {\bf list volumes}. +\end{itemize} + +Once the Volume is marked Purged, it will be recycled the next time a Volume +is needed. + +If you wish to reuse the tape by giving it a new name, follow the following +steps: + +\begin{itemize} +\item Use the {\bf purge jobs volume} command in the Console to mark the + Volume as {\bf Purged}. Check by using {\bf list volumes}. +\item In Bacula version 1.30 or greater, use the Console {\bf relabel} + command to relabel the Volume. +\end{itemize} + +Please note that the relabel command applies only to tape Volumes. + +For Bacula versions prior to 1.30 or to manually relabel the Volume, use the +instructions below: + +\begin{itemize} +\item Use the {\bf delete volume} command in the Console to delete the Volume + from the Catalog. +\item If a different tape is mounted, use the {\bf unmount} command, + remove the tape, and insert the tape to be renamed. +\item Write an EOF mark in the tape using the following commands: + +\footnotesize +\begin{verbatim} + mt -f /dev/nst0 rewind + mt -f /dev/nst0 weof +\end{verbatim} +\normalsize + +where you replace {\bf /dev/nst0} with the appropriate device name on your +system. +\item Use the {\bf label} command to write a new label to the tape and to + enter it in the catalog. +\end{itemize} + +Please be aware that the {\bf delete} command can be dangerous. Once it is +done, to recover the File records, you must either restore your database as it +was before the {\bf delete} command, or use the {\bf bscan} utility program to +scan the tape and recreate the database entries. diff --git a/docs/manuals/en/concepts/requirements.tex b/docs/manuals/en/concepts/requirements.tex new file mode 100644 index 00000000..9dbeed98 --- /dev/null +++ b/docs/manuals/en/concepts/requirements.tex @@ -0,0 +1,67 @@ +%% +%% + +\chapter{System Requirements} +\label{SysReqs} +\index[general]{System Requirements } +\index[general]{Requirements!System } + +\begin{itemize} +\item {\bf Bacula} has been compiled and run on OpenSuSE Linux, FreeBSD, and + Solaris systems. +\item It requires GNU C++ version 2.95 or higher to compile. You can try with + other compilers and older versions, but you are on your own. We have + successfully compiled and used Bacula using GNU C++ version 4.1.3. + Note, in general GNU C++ is a separate package (e.g. RPM) from GNU C, so you + need them both loaded. On Red Hat systems, the C++ compiler is part of the + {\bf gcc-c++} rpm package. +\item There are certain third party packages that Bacula may need. Except for + MySQL and PostgreSQL, they can all be found in the {\bf depkgs} and {\bf + depkgs1} releases. However, most current Linux and FreeBSD systems + provide these as system packages. +\item The minimum versions for each of the databases supported by Bacula + are: + + \begin{itemize} + \item MySQL 4.1 + \item PostgreSQL 7.4 + \item SQLite 2.8.16 or SQLite 3 + \end{itemize} + +\item If you want to build the Win32 binaries, please see the + README.mingw32 file in the src/win32 directory. We cross-compile the + Win32 release on Linux. We provide documentation on building the Win32 + version, but due to the complexity, you are pretty much on your own + if you want to build it yourself. +\item {\bf Bacula} requires a good implementation of pthreads to work. This + is not the case on some of the BSD systems. +\item The source code has been written with portability in mind and is mostly + POSIX compatible. Thus porting to any POSIX compatible operating system + should be relatively easy. +\item The GNOME Console program is developed and tested under GNOME 2.x. + GNOME 1.4 is no longer supported. +\item The wxWidgets Console program is developed and tested with the latest + stable ANSI or Unicode version of + \elink{wxWidgets}{\url{http://www.wxwidgets.org/}} (2.6.1). It works fine with the + Windows and GTK+-2.x version of wxWidgets, and should also work on other + platforms supported by wxWidgets. +\item The Tray Monitor program is developed for GTK+-2.x. It needs GNOME less + or equal to 2.2, KDE greater or equal to 3.1 or any window manager supporting + the + \elink{ FreeDesktop system tray + standard}{\url{http://www.freedesktop.org/Standards/systemtray-spec}}. +\item If you want to enable command line editing and history, you will need + to have /usr/include/termcap.h and either the termcap or the ncurses library + loaded (libtermcap-devel or ncurses-devel). +\item If you want to use DVD as backup medium, you will need to download the + \elink{dvd+rw-tools 5.21.4.10.8}{\url{http://fy.chalmers.se/~appro/linux/DVD+RW/}}, + apply the patch that is in the {\bf patches} directory of the main + source tree + to make these tools compatible with Bacula, then compile and install them. + There is also a patch for dvd+rw-tools version 6.1, and we hope that the + patch is integrated into a later version. + Do not use the dvd+rw-tools provided by your distribution, unless you + are sure it contains the patch. dvd+rw-tools without the patch will not + work with Bacula. DVD media is not recommended for serious or important + backups because of its low reliability. +\end{itemize} diff --git a/docs/manuals/en/concepts/restore.tex b/docs/manuals/en/concepts/restore.tex new file mode 100644 index 00000000..05e23fd6 --- /dev/null +++ b/docs/manuals/en/concepts/restore.tex @@ -0,0 +1,1438 @@ +%% +%% +\chapter{The Restore Command} +\label{RestoreChapter} +\index[general]{Command!Console Restore} +\index[general]{Console Restore Command} + +\section{General} +\index[general]{General } + +Below, we will discuss restoring files with the Console {\bf restore} command, +which is the recommended way of doing restoring files. It is not possible +to restore files by automatically starting a job as you do with Backup, +Verify, ... jobs. However, in addition to the console restore command, +there is a standalone program named {\bf bextract}, which also permits +restoring files. For more information on this program, please see the +\ilink{Bacula Utility Programs}{bextract} chapter of this manual. We +don't particularly recommend the {\bf bextract} program because it +lacks many of the features of the normal Bacula restore, such as the +ability to restore Win32 files to Unix systems, and the ability to +restore access control lists (ACL). As a consequence, we recommend, +wherever possible to use Bacula itself for restores as described below. + +You may also want to look at the {\bf bls} program in the same chapter, +which allows you to list the contents of your Volumes. Finally, if you +have an old Volume that is no longer in the catalog, you can restore the +catalog entries using the program named {\bf bscan}, documented in the same +\ilink{Bacula Utility Programs}{bscan} chapter. + +In general, to restore a file or a set of files, you must run a {\bf restore} +job. That is a job with {\bf Type = Restore}. As a consequence, you will need +a predefined {\bf restore} job in your {\bf bacula-dir.conf} (Director's +config) file. The exact parameters (Client, FileSet, ...) that you define are +not important as you can either modify them manually before running the job or +if you use the {\bf restore} command, explained below, Bacula will +automatically set them for you. In fact, you can no longer simply run a restore +job. You must use the restore command. + +Since Bacula is a network backup program, you must be aware that when you +restore files, it is up to you to ensure that you or Bacula have selected the +correct Client and the correct hard disk location for restoring those files. +{\bf Bacula} will quite willingly backup client A, and restore it by sending +the files to a different directory on client B. Normally, you will want to +avoid this, but assuming the operating systems are not too different in their +file structures, this should work perfectly well, if so desired. +By default, Bacula will restore data to the same Client that was backed +up, and those data will be restored not to the original places but to +{\bf /tmp/bacula-restores}. You may modify any of these defaults when the +restore command prompts you to run the job by selecting the {\bf mod} +option. + +\label{Example1} +\section{The Restore Command} +\index[general]{Command!Restore} +\index[general]{Restore Command} + +Since Bacula maintains a catalog of your files and on which Volumes (disk or +tape), they are stored, it can do most of the bookkeeping work, allowing you +simply to specify what kind of restore you want (current, before a particular +date), and what files to restore. Bacula will then do the rest. + +This is accomplished using the {\bf restore} command in the Console. First you +select the kind of restore you want, then the JobIds are selected, +the File records for those Jobs are placed in an internal Bacula directory +tree, and the restore enters a file selection mode that allows you to +interactively walk up and down the file tree selecting individual files to be +restored. This mode is somewhat similar to the standard Unix {\bf restore} +program's interactive file selection mode. + +If a Job's file records have been pruned from the catalog, the {\bf +restore} command will be unable to find any files to restore. See below +for more details on this. + +Within the Console program, after entering the {\bf restore} command, you are +presented with the following selection prompt: + +\footnotesize +\begin{verbatim} +First you select one or more JobIds that contain files +to be restored. You will be presented several methods +of specifying the JobIds. Then you will be allowed to +select which files from those JobIds are to be restored. +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +There are a lot of options, and as a point of reference, most people will +want to slect item 5 (the most recent backup for a client). The details +of the above options are: + +\begin{itemize} +\item Item 1 will list the last 20 jobs run. If you find the Job you want, + you can then select item 3 and enter its JobId(s). + +\item Item 2 will list all the Jobs where a specified file is saved. If you + find the Job you want, you can then select item 3 and enter the JobId. + +\item Item 3 allows you the enter a list of comma separated JobIds whose + files will be put into the directory tree. You may then select which + files from those JobIds to restore. Normally, you would use this option + if you have a particular version of a file that you want to restore and + you know its JobId. The most common options (5 and 6) will not select + a job that did not terminate normally, so if you know a file is + backed up by a Job that failed (possibly because of a system crash), you + can access it through this option by specifying the JobId. + +\item Item 4 allows you to enter any arbitrary SQL command. This is + probably the most primitive way of finding the desired JobIds, but at + the same time, the most flexible. Once you have found the JobId(s), you + can select item 3 and enter them. + +\item Item 5 will automatically select the most recent Full backup and all + subsequent incremental and differential backups for a specified Client. + These are the Jobs and Files which, if reloaded, will restore your + system to the most current saved state. It automatically enters the + JobIds found into the directory tree in an optimal way such that only + the most recent copy of any particular file found in the set of Jobs + will be restored. This is probably the most convenient of all the above + options to use if you wish to restore a selected Client to its most + recent state. + + There are two important things to note. First, this automatic selection + will never select a job that failed (terminated with an error status). + If you have such a job and want to recover one or more files from it, + you will need to explicitly enter the JobId in item 3, then choose the + files to restore. + + If some of the Jobs that are needed to do the restore have had their + File records pruned, the restore will be incomplete. Bacula currently + does not correctly detect this condition. You can however, check for + this by looking carefully at the list of Jobs that Bacula selects and + prints. If you find Jobs with the JobFiles column set to zero, when + files should have been backed up, then you should expect problems. + + If all the File records have been pruned, Bacula will realize that there + are no file records in any of the JobIds chosen and will inform you. It + will then propose doing a full restore (non-selective) of those JobIds. + This is possible because Bacula still knows where the beginning of the + Job data is on the Volumes, even if it does not know where particular + files are located or what their names are. + +\item Item 6 allows you to specify a date and time, after which Bacula will + automatically select the most recent Full backup and all subsequent + incremental and differential backups that started before the specified date + and time. + +\item Item 7 allows you to specify one or more filenames (complete path + required) to be restored. Each filename is entered one at a time or if you + prefix a filename with the less-than symbol (\lt{}) Bacula will read that + file and assume it is a list of filenames to be restored. If you + prefix the filename with a question mark (?), then the filename will + be interpreted as an SQL table name, and Bacula will include the rows + of that table in the list to be restored. The table must contain the + JobId in the first column and the FileIndex in the second column. + This table feature is intended for external programs that want to build + their own list of files to be restored. + The filename entry mode is terminated by entering a blank line. + +\item Item 8 allows you to specify a date and time before entering the + filenames. See Item 7 above for more details. + +\item Item 9 allows you find the JobIds of the most recent backup for + a client. This is much like option 5 (it uses the same code), but + those JobIds are retained internally as if you had entered them + manually. You may then select item 11 (see below) to restore one + or more directories. + +\item Item 10 is the same as item 9, except that it allows you to enter + a before date (as with item 6). These JobIds will then be retained + internally. + +\index[general]{Restore Directories} +\item Item 11 allows you to enter a list of JobIds from which you can + select directories to be restored. The list of JobIds can have been + previously created by using either item 9 or 10 on the menu. You + may then enter a full path to a directory name or a filename preceded + by a less than sign (\lt{}). The filename should contain a list + of directories to be restored. All files in those directories will + be restored, but if the directory contains subdirectories, nothing + will be restored in the subdirectory unless you explicitly enter its + name. + +\item Item 12 allows you to cancel the restore command. +\end{itemize} + +As an example, suppose that we select item 5 (restore to most recent state). +If you have not specified a client=xxx on the command line, it +it will then ask for the desired Client, which on my system, will print all +the Clients found in the database as follows: + +\footnotesize +\begin{verbatim} +Defined clients: + 1: Rufus + 2: Matou + 3: Polymatou + 4: Minimatou + 5: Minou + 6: MatouVerify + 7: PmatouVerify + 8: RufusVerify + 9: Watchdog +Select Client (File daemon) resource (1-9): +\end{verbatim} +\normalsize + +You will probably have far fewer Clients than this example, and if you have +only one Client, it will be automatically selected. In this case, I enter +{\bf Rufus} to select the Client. Then Bacula needs to know what FileSet is +to be restored, so it prompts with: + +\footnotesize +\begin{verbatim} +The defined FileSet resources are: + 1: Full Set + 2: Other Files +Select FileSet resource (1-2): +\end{verbatim} +\normalsize + +If you have only one FileSet defined for the Client, it will be selected +automatically. I choose item 1, which is my full backup. Normally, you +will only have a single FileSet for each Job, and if your machines are +similar (all Linux) you may only have one FileSet for all your Clients. + +At this point, {\bf Bacula} has all the information it needs to find the most +recent set of backups. It will then query the database, which may take a bit +of time, and it will come up with something like the following. Note, some of +the columns are truncated here for presentation: + +\footnotesize +\begin{verbatim} ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +| JobId | Levl | JobFiles | StartTime | VolumeName | File | SesId | +VolSesTime | ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +| 1,792 | F | 128,374 | 08-03 01:58 | DLT-19Jul02 | 67 | 18 | +1028042998 | +| 1,792 | F | 128,374 | 08-03 01:58 | DLT-04Aug02 | 0 | 18 | +1028042998 | +| 1,797 | I | 254 | 08-04 13:53 | DLT-04Aug02 | 5 | 23 | +1028042998 | +| 1,798 | I | 15 | 08-05 01:05 | DLT-04Aug02 | 6 | 24 | +1028042998 | ++-------+------+----------+-------------+-------------+------+-------+---------- +--+ +You have selected the following JobId: 1792,1792,1797 +Building directory tree for JobId 1792 ... +Building directory tree for JobId 1797 ... +Building directory tree for JobId 1798 ... +cwd is: / +$ +\end{verbatim} +\normalsize + +Depending on the number of {\bf JobFiles} for each JobId, the {\bf Building +directory tree ..."} can take a bit of time. If you notice ath all the +JobFiles are zero, your Files have probably been pruned and you will not be +able to select any individual files -- it will be restore everything or +nothing. + +In our example, Bacula found four Jobs that comprise the most recent backup of +the specified Client and FileSet. Two of the Jobs have the same JobId because +that Job wrote on two different Volumes. The third Job was an incremental +backup to the previous Full backup, and it only saved 254 Files compared to +128,374 for the Full backup. The fourth Job was also an incremental backup +that saved 15 files. + +Next Bacula entered those Jobs into the directory tree, with no files marked +to be restored as a default, tells you how many files are in the tree, and +tells you that the current working directory ({\bf cwd}) is /. Finally, Bacula +prompts with the dollar sign (\$) to indicate that you may enter commands to +move around the directory tree and to select files. + +If you want all the files to automatically be marked when the directory +tree is built, you could have entered the command {\bf restore all}, or +at the \$ prompt, you can simply enter {\bf mark *}. + +Instead of choosing item 5 on the first menu (Select the most recent backup +for a client), if we had chosen item 3 (Enter list of JobIds to select) and we +had entered the JobIds {\bf 1792,1797,1798} we would have arrived at the same +point. + +One point to note, if you are manually entering JobIds, is that you must enter +them in the order they were run (generally in increasing JobId order). If you +enter them out of order and the same file was saved in two or more of the +Jobs, you may end up with an old version of that file (i.e. not the most +recent). + +Directly entering the JobIds can also permit you to recover data from +a Job that wrote files to tape but that terminated with an error status. + +While in file selection mode, you can enter {\bf help} or a question mark (?) +to produce a summary of the available commands: + +\footnotesize +\begin{verbatim} + Command Description + ======= =========== + cd change current directory + count count marked files in and below the cd + dir long list current directory, wildcards allowed + done leave file selection mode + estimate estimate restore size + exit same as done command + find find files, wildcards allowed + help print help + ls list current directory, wildcards allowed + lsmark list the marked files in and below the cd + mark mark dir/file to be restored recursively in dirs + markdir mark directory name to be restored (no files) + pwd print current working directory + unmark unmark dir/file to be restored recursively in dir + unmarkdir unmark directory name only no recursion + quit quit and do not do restore + ? print help +\end{verbatim} +\normalsize + +As a default no files have been selected for restore (unless you +added {\bf all} to the command line. If you want to restore +everything, at this point, you should enter {\bf mark *}, and then {\bf done} +and {\bf Bacula} will write the bootstrap records to a file and request your +approval to start a restore job. + +If you do not enter the above mentioned {\bf mark *} command, you will start +with an empty slate. Now you can simply start looking at the tree and {\bf +mark} particular files or directories you want restored. It is easy to make +a mistake in specifying a file to mark or unmark, and Bacula's error handling +is not perfect, so please check your work by using the {\bf ls} or {\bf dir} +commands to see what files are actually selected. Any selected file has its +name preceded by an asterisk. + +To check what is marked or not marked, enter the {\bf count} command, which +displays: + +\footnotesize +\begin{verbatim} +128401 total files. 128401 marked to be restored. + +\end{verbatim} +\normalsize + +Each of the above commands will be described in more detail in the next +section. We continue with the above example, having accepted to restore all +files as Bacula set by default. On entering the {\bf done} command, Bacula +prints: + +\footnotesize +\begin{verbatim} +Bootstrap records written to /home/kern/bacula/working/restore.bsr +The job will require the following + Volume(s) Storage(s) SD Device(s) +=========================================================================== + + DLT-19Jul02 Tape DLT8000 + DLT-04Aug02 Tape DLT8000 + +128401 files selected to restore. +Run Restore job +JobName: kernsrestore +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Other Files +Client: Rufus +Storage: Tape +When: 2006-12-11 18:20:33 +Catalog: MyCatalog +Priority: 10 +OK to run? (yes/mod/no): + +\end{verbatim} +\normalsize + +Please examine each of the items very carefully to make sure that they are +correct. In particular, look at {\bf Where}, which tells you where in the +directory structure the files will be restored, and {\bf Client}, which +tells you which client will receive the files. Note that by default the +Client which will receive the files is the Client that was backed up. +These items will not always be completed with the correct values depending +on which of the restore options you chose. You can change any of these +default items by entering {\bf mod} and responding to the prompts. + +The above assumes that you have defined a {\bf Restore} Job resource in your +Director's configuration file. Normally, you will only need one Restore Job +resource definition because by its nature, restoring is a manual operation, +and using the Console interface, you will be able to modify the Restore Job to +do what you want. + +An example Restore Job resource definition is given below. + +Returning to the above example, you should verify that the Client name is +correct before running the Job. However, you may want to modify some of the +parameters of the restore job. For example, in addition to checking the Client +it is wise to check that the Storage device chosen by Bacula is indeed +correct. Although the {\bf FileSet} is shown, it will be ignored in restore. +The restore will choose the files to be restored either by reading the {\bf +Bootstrap} file, or if not specified, it will restore all files associated +with the specified backup {\bf JobId} (i.e. the JobId of the Job that +originally backed up the files). + +Finally before running the job, please note that the default location for +restoring files is {\bf not} their original locations, but rather the directory +{\bf /tmp/bacula-restores}. You can change this default by modifying your {\bf +bacula-dir.conf} file, or you can modify it using the {\bf mod} option. If you +want to restore the files to their original location, you must have {\bf +Where} set to nothing or to the root, i.e. {\bf /}. + +If you now enter {\bf yes}, Bacula will run the restore Job. The Storage +daemon will first request Volume {\bf DLT-19Jul02} and after the appropriate +files have been restored from that volume, it will request Volume {\bf +DLT-04Aug02}. + +\section{Selecting Files by Filename} +\index[general]{Selecting Files by Filename } +\index[general]{Filename!Selecting Files by } + +If you have a small number of files to restore, and you know the filenames, +you can either put the list of filenames in a file to be read by Bacula, or +you can enter the names one at a time. The filenames must include the full +path and filename. No wild cards are used. + +To enter the files, after the {\bf restore}, you select item number 7 from the +prompt list: + +\footnotesize +\begin{verbatim} +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +which then prompts you for the client name: + +\footnotesize +\begin{verbatim} +Defined Clients: + 1: Timmy + 2: Tibs + 3: Rufus +Select the Client (1-3): 3 +\end{verbatim} +\normalsize + +Of course, your client list will be different, and if you have only one +client, it will be automatically selected. And finally, Bacula requests you to +enter a filename: + +\footnotesize +\begin{verbatim} +Enter filename: +\end{verbatim} +\normalsize + +At this point, you can enter the full path and filename + +\footnotesize +\begin{verbatim} +Enter filename: /home/kern/bacula/k/Makefile.in +Enter filename: +\end{verbatim} +\normalsize + +as you can see, it took the filename. If Bacula cannot find a copy of the +file, it prints the following: + +\footnotesize +\begin{verbatim} +Enter filename: junk filename +No database record found for: junk filename +Enter filename: +\end{verbatim} +\normalsize + +If you want Bacula to read the filenames from a file, you simply precede the +filename with a less-than symbol (\lt{}). When you have entered all the +filenames, you enter a blank line, and Bacula will write the bootstrap file, +tells you what tapes will be used, and proposes a Restore job to be run: + +\footnotesize +\begin{verbatim} +Enter filename: +Automatically selected Storage: DDS-4 +Bootstrap records written to /home/kern/bacula/working/restore.bsr +The restore job will require the following Volumes: + + test1 +1 file selected to restore. +Run Restore job +JobName: kernsrestore +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Other Files +Client: Rufus +Storage: DDS-4 +When: 2003-09-11 10:20:53 +Priority: 10 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +It is possible to automate the selection by file by putting your list of files +in say {\bf /tmp/file-list}, then using the following command: + +\footnotesize +\begin{verbatim} +restore client=Rufus file= = / ! ; % : , ~ # = & +\end{verbatim} + +You can use several expressions separated by a commas. + +\subsection*{Examples} + +\begin{tabular}{|c|c|c|l} +\hline +Orignal filename & Computed filename & RegexWhere & Comments \\ +\hline +\hline +\texttt{c:/system.ini} & \texttt{c:/system.old.ini} & \texttt{/.ini\$/.old.ini/} & use \$ as end of filename\\ +\hline +\texttt{/prod/u01/pdata/} & \texttt{/rect/u01/rdata} & \texttt{/prod/rect/,/pdata/rdata/} & using two regexp\\ +\hline +\texttt{/prod/u01/pdata/} & \texttt{/rect/u01/rdata} & \texttt{!/prod/!/rect/!,/pdata/rdata/} & using \texttt{!} instead of \texttt{/}\\ +\hline +\texttt{C:/WINNT} & \texttt{d:/WINNT} & \texttt{/c:/d:/i} & using case-insensitive pattern matching \\ +\hline + +\end{tabular} + +%\subsubsection{Using group} +% +%Like with Perl or Sed, you can make submatch with \texttt{()}, +% +%\subsubsection*{Examples} + + +%\subsubsection{Options} +% +% i Do case-insensitive pattern matching. + +\section{Restoring Directory Attributes} +\index[general]{Attributes!Restoring Directory } +\index[general]{Restoring Directory Attributes } + +Depending how you do the restore, you may or may not get the directory entries +back to their original state. Here are a few of the problems you can +encounter, and for same machine restores, how to avoid them. + +\begin{itemize} +\item You backed up on one machine and are restoring to another that is + either a different OS or doesn't have the same users/groups defined. Bacula + does the best it can in these situations. Note, Bacula has saved the + user/groups in numeric form, which means on a different machine, they + may map to different user/group names. + +\item You are restoring into a directory that is already created and has + file creation restrictions. Bacula tries to reset everything but + without walking up the full chain of directories and modifying them all + during the restore, which Bacula does and will not do, getting + permissions back correctly in this situation depends to a large extent + on your OS. + +\item You are doing a recursive restore of a directory tree. In this case + Bacula will restore a file before restoring the file's parent directory + entry. In the process of restoring the file Bacula will create the + parent directory with open permissions and ownership of the file being + restored. Then when Bacula tries to restore the parent directory Bacula + sees that it already exists (Similar to the previous situation). If you + had set the Restore job's "Replace" property to "never" then Bacula will + not change the directory's permissions and ownerships to match what it + backed up, you should also notice that the actual number of files + restored is less then the expected number. If you had set the Restore + job's "Replace" property to "always" then Bacula will change the + Directory's ownership and permissions to match what it backed up, also + the actual number of files restored should be equal to the expected + number. + +\item You selected one or more files in a directory, but did not select the + directory entry to be restored. In that case, if the directory is not + on disk Bacula simply creates the directory with some default attributes + which may not be the same as the original. If you do not select a + directory and all its contents to be restored, you can still select + items within the directory to be restored by individually marking those + files, but in that case, you should individually use the "markdir" + command to select all higher level directory entries (one at a time) to + be restored if you want the directory entries properly restored. + +\item The {\bf bextract} program does not restore access control lists + (ACLs), nor will it restore non-portable Win32 data (default) to Unix + machines. +\end{itemize} + +\label{Windows} +\section{Restoring on Windows} +\index[general]{Restoring on Windows } +\index[general]{Windows!Restoring on } + +If you are restoring on WinNT/2K/XP systems, Bacula will restore the files +with the original ownerships and permissions as would be expected. This is +also true if you are restoring those files to an alternate directory (using +the Where option in restore). However, if the alternate directory does not +already exist, the Bacula File daemon (Client) will try to create it. In +some cases, it may not create the directories, and if it does since the +File daemon runs under the SYSTEM account, the directory will be created +with SYSTEM ownership and permissions. In this case, you may have problems +accessing the newly restored files. + +To avoid this problem, you should create any alternate directory before +doing the restore. Bacula will not change the ownership and permissions of +the directory if it is already created as long as it is not one of the +directories being restored (i.e. written to tape). + +The default restore location is {\bf /tmp/bacula-restores/} and if you are +restoring from drive {\bf E:}, the default will be +{\bf /tmp/bacula-restores/e/}, so you should ensure that this directory +exists before doing the restore, or use the {\bf mod} option to +select a different {\bf where} directory that does exist. + +Some users have experienced problems restoring files that participate in +the Active Directory. They also report that changing the userid under which +Bacula (bacula-fd.exe) runs, from SYSTEM to a Domain Admin userid, resolves +the problem. + + +\section{Restoring Files Can Be Slow} +\index[general]{Slow!Restoring Files Can Be } +\index[general]{Restoring Files Can Be Slow } + +Restoring files is generally {\bf much} slower than backing them up for several +reasons. The first is that during a backup the tape is normally already +positioned and Bacula only needs to write. On the other hand, because restoring +files is done so rarely, Bacula keeps only the start file and block on the +tape for the whole job rather than on a file by file basis which would use +quite a lot of space in the catalog. + +Bacula will forward space to the correct file mark on the tape for the Job, +then forward space to the correct block, and finally sequentially read each +record until it gets to the correct one(s) for the file or files you want to +restore. Once the desired files are restored, Bacula will stop reading the +tape. + +Finally, instead of just reading a file for backup, during the restore, Bacula +must create the file, and the operating system must allocate disk space for +the file as Bacula is restoring it. + +For all the above reasons the restore process is generally much slower than +backing up (sometimes it takes three times as long). + +\section{Problems Restoring Files} +\index[general]{Files!Problems Restoring } +\index[general]{Problems Restoring Files } + +The most frequent problems users have restoring files are error messages such +as: + +\footnotesize +\begin{verbatim} +04-Jan 00:33 z217-sd: RestoreFiles.2005-01-04_00.31.04 Error: +block.c:868 Volume data error at 20:0! Short block of 512 bytes on +device /dev/tape discarded. +\end{verbatim} +\normalsize + +or + +\footnotesize +\begin{verbatim} +04-Jan 00:33 z217-sd: RestoreFiles.2005-01-04_00.31.04 Error: +block.c:264 Volume data error at 20:0! Wanted ID: "BB02", got ".". +Buffer discarded. +\end{verbatim} +\normalsize + +Both these kinds of messages indicate that you were probably running your tape +drive in fixed block mode rather than variable block mode. Fixed block mode +will work with any program that reads tapes sequentially such as tar, but +Bacula repositions the tape on a block basis when restoring files because this +will speed up the restore by orders of magnitude when only a few files are being +restored. There are several ways that you can attempt to recover from this +unfortunate situation. + +Try the following things, each separately, and reset your Device resource to +what it is now after each individual test: + +\begin{enumerate} +\item Set "Block Positioning = no" in your Device resource and try the + restore. This is a new directive and untested. + +\item Set "Minimum Block Size = 512" and "Maximum Block Size = 512" and + try the restore. If you are able to determine the block size your drive + was previously using, you should try that size if 512 does not work. + This is a really horrible solution, and it is not at all recommended + to continue backing up your data without correcting this condition. + Please see the Tape Testing chapter for more on this. + +\item Try editing the restore.bsr file at the Run xxx yes/mod/no prompt + before starting the restore job and remove all the VolBlock statements. + These are what causes Bacula to reposition the tape, and where problems + occur if you have a fixed block size set for your drive. The VolFile + commands also cause repositioning, but this will work regardless of the + block size. + +\item Use bextract to extract the files you want -- it reads the Volume + sequentially if you use the include list feature, or if you use a .bsr + file, but remove all the VolBlock statements after the .bsr file is + created (at the Run yes/mod/no) prompt but before you start the restore. +\end{enumerate} + +\section{Restore Errors} +\index[general]{Errors!Restore} +\index[general]{Restore Errors} + +There are a number of reasons why there may be restore errors or +warning messages. Some of the more common ones are: + +\begin{description} + +\item [file count mismatch] + This can occur for the following reasons: + \begin{itemize} + \item You requested Bacula not to overwrite existing or newer + files. + \item A Bacula miscount of files/directories. This is an + on-going problem due to the complications of directories, + soft/hard link, and such. Simply check that all the files you + wanted were actually restored. + \end{itemize} + +\item [file size error] + When Bacula restores files, it checks that the size of the + restored file is the same as the file status data it saved + when starting the backup of the file. If the sizes do not + agree, Bacula will print an error message. This size mismatch + most often occurs because the file was being written as Bacula + backed up the file. In this case, the size that Bacula + restored will be greater than the status size. This often + happens with log files. + + If the restored size is smaller, then you should be concerned + about a possible tape error and check the Bacula output as + well as your system logs. +\end{description} + + + +\section{Example Restore Job Resource} +\index[general]{Example Restore Job Resource } +\index[general]{Resource!Example Restore Job } + +\footnotesize +\begin{verbatim} +Job { + Name = "RestoreFiles" + Type = Restore + Client = Any-client + FileSet = "Any-FileSet" + Storage = Any-storage + Where = /tmp/bacula-restores + Messages = Standard + Pool = Default +} +\end{verbatim} +\normalsize + +If {\bf Where} is not specified, the default location for restoring files will +be their original locations. +\label{Selection} + +\section{File Selection Commands} +\index[general]{Commands!File Selection } +\index[general]{File Selection Commands } + +After you have selected the Jobs to be restored and Bacula has created the +in-memory directory tree, you will enter file selection mode as indicated by +the dollar sign ({\bf \$}) prompt. While in this mode, you may use the +commands listed above. The basic idea is to move up and down the in memory +directory structure with the {\bf cd} command much as you normally do on the +system. Once you are in a directory, you may select the files that you want +restored. As a default no files are marked to be restored. If you wish to +start with all files, simply enter: {\bf cd /} and {\bf mark *}. Otherwise +proceed to select the files you wish to restore by marking them with the {\bf +mark} command. The available commands are: + +\begin{description} + +\item [cd] + The {\bf cd} command changes the current directory to the argument + specified. + It operates much like the Unix {\bf cd} command. Wildcard specifications are + not permitted. + + Note, on Windows systems, the various drives (c:, d:, ...) are treated like + a + directory within the file tree while in the file selection mode. As a + consequence, you must do a {\bf cd c:} or possibly in some cases a {\bf cd + C:} (note upper case) to get down to the first directory. + +\item [dir] + \index[dir]{dir } + The {\bf dir} command is similar to the {\bf ls} command, except that it + prints it in long format (all details). This command can be a bit slower + than + the {\bf ls} command because it must access the catalog database for the + detailed information for each file. + +\item [estimate] + \index[dir]{estimate } + The {\bf estimate} command prints a summary of the total files in the tree, + how many are marked to be restored, and an estimate of the number of bytes + to + be restored. This can be useful if you are short on disk space on the + machine + where the files will be restored. + +\item [find] + \index[dir]{find} + The {\bf find} command accepts one or more arguments and displays all files + in the tree that match that argument. The argument may have wildcards. It is + somewhat similar to the Unix command {\bf find / -name arg}. + +\item [ls] + The {\bf ls} command produces a listing of all the files contained in the + current directory much like the Unix {\bf ls} command. You may specify an + argument containing wildcards, in which case only those files will be + listed. + + Any file that is marked to be restored will have its name preceded by an + asterisk ({\bf *}). Directory names will be terminated with a forward slash + ({\bf /}) to distinguish them from filenames. + +\item [lsmark] + \index[fd]{lsmark} + The {\bf lsmark} command is the same as the {\bf ls} except that it will + print only those files marked for extraction. The other distinction is that + it will recursively descend into any directory selected. + +\item [mark] + \index[dir]{mark} + The {\bf mark} command allows you to mark files to be restored. It takes a + single argument which is the filename or directory name in the current + directory to be marked for extraction. The argument may be a wildcard + specification, in which case all files that match in the current directory + are marked to be restored. If the argument matches a directory rather than a + file, then the directory and all the files contained in that directory + (recursively) are marked to be restored. Any marked file will have its name + preceded with an asterisk ({\bf *}) in the output produced by the {\bf ls} +or + {\bf dir} commands. Note, supplying a full path on the mark command does not + work as expected to select a file or directory in the current directory. + Also, the {\bf mark} command works on the current and lower directories but + does not touch higher level directories. + + After executing the {\bf mark} command, it will print a brief summary: + +\footnotesize +\begin{verbatim} + No files marked. + +\end{verbatim} +\normalsize + + If no files were marked, or: + +\footnotesize +\begin{verbatim} + nn files marked. + +\end{verbatim} +\normalsize + + if some files are marked. + +\item [unmark] + \index[dir]{unmark } + The {\bf unmark} is identical to the {\bf mark} command, except that it + unmarks the specified file or files so that they will not be restored. Note: + the {\bf unmark} command works from the current directory, so it does not + unmark any files at a higher level. First do a {\bf cd /} before the {\bf + unmark *} command if you want to unmark everything. + +\item [pwd] + \index[dir]{pwd } + The {\bf pwd} command prints the current working directory. It accepts no + arguments. + +\item [count] + \index[dir]{count } + The {\bf count} command prints the total files in the directory tree and the + number of files marked to be restored. + +\item [done] + \index[dir]{done } + This command terminates file selection mode. + +\item [exit] + \index[fd]{exit } + This command terminates file selection mode (the same as done). + +\item [quit] + \index[fd]{quit } + This command terminates the file selection and does not run the restore +job. + + +\item [help] + \index[fd]{help } + This command prints a summary of the commands available. + +\item [?] + This command is the same as the {\bf help} command. +\end{description} + +\label{database_restore} +\section{Restoring When Things Go Wrong} +\index[general]{Restoring When Things Go Wrong } +\index[general]{Restoring Your Database} +\index[general]{Database!Restoring} + +This and the following sections will try to present a few of the kinds of +problems that can come up making restoring more difficult. We will try to +provide a few ideas how to get out of these problem situations. +In addition to what is presented here, there is more specific information +on restoring a \ilink{Client}{restore_client} and your +\ilink{Server}{restore_server} in the \ilink{Disaster Recovery Using +Bacula}{RescueChapter} chapter of this manual. + +\begin{description} +\item[Problem] + My database is broken. +\item[Solution] + For SQLite, use the vacuum command to try to fix the database. For either + MySQL or PostgreSQL, see the vendor's documentation. They have specific tools + that check and repair databases, see the \ilink{database + repair}{DatabaseRepair} sections of this manual for links to vendor + information. + + Assuming the above does not resolve the problem, you will need to restore + or rebuild your catalog. Note, if it is a matter of some + inconsistencies in the Bacula tables rather than a broken database, then + running \ilink{dbcheck}{dbcheck} might help, but you will need to ensure + that your database indexes are properly setup. Please see + the \ilink{Database Performance Issues}{DatabasePerformance} sections + of this manual for more details. + +\item[Problem] + How do I restore my catalog? +\item[Solution with a Catalog backup] + If you have backed up your database nightly (as you should) and you + have made a bootstrap file, you can immediately load back your + database (or the ASCII SQL output). Make a copy of your current + database, then re-initialize it, by running the following scripts: +\begin{verbatim} + ./drop_bacula_tables + ./make_bacula_tables +\end{verbatim} + After re-initializing the database, you should be able to run + Bacula. If you now try to use the restore command, it will not + work because the database will be empty. However, you can manually + run a restore job and specify your bootstrap file. You do so + by entering the {bf run} command in the console and selecting the + restore job. If you are using the default bacula-dir.conf, this + Job will be named {\bf RestoreFiles}. Most likely it will prompt + you with something such as: + +\footnotesize +\begin{verbatim} +Run Restore job +JobName: RestoreFiles +Bootstrap: /home/kern/bacula/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Full Set +Client: rufus-fd +Storage: File +When: 2005-07-10 17:33:40 +Catalog: MyCatalog +Priority: 10 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + + A number of the items will be different in your case. What you want to + do is: to use the mod option to change the Bootstrap to point to your + saved bootstrap file; and to make sure all the other items such as + Client, Storage, Catalog, and Where are correct. The FileSet is not + used when you specify a bootstrap file. Once you have set all the + correct values, run the Job and it will restore the backup of your + database, which is most likely an ASCII dump. + + You will then need to follow the instructions for your + database type to recreate the database from the ASCII backup file. + See the \ilink {Catalog Maintenance}{CatMaintenanceChapter} chapter of + this manual for examples of the command needed to restore a + database from an ASCII dump (they are shown in the Compacting Your + XXX Database sections). + + Also, please note that after you restore your database from an ASCII + backup, you do NOT want to do a {\bf make\_bacula\_tables} command, or + you will probably erase your newly restored database tables. + + +\item[Solution with a Job listing] + If you did save your database but did not make a bootstrap file, then + recovering the database is more difficult. You will probably need to + use bextract to extract the backup copy. First you should locate the + listing of the job report from the last catalog backup. It has + important information that will allow you to quickly find your database + file. For example, in the job report for the CatalogBackup shown below, + the critical items are the Volume name(s), the Volume Session Id and the + Volume Session Time. If you know those, you can easily restore your + Catalog. + +\footnotesize +\begin{verbatim} +22-Apr 10:22 HeadMan: Start Backup JobId 7510, +Job=CatalogBackup.2005-04-22_01.10.0 +22-Apr 10:23 HeadMan: Bacula 1.37.14 (21Apr05): 22-Apr-2005 10:23:06 + JobId: 7510 + Job: CatalogBackup.2005-04-22_01.10.00 + Backup Level: Full + Client: Polymatou + FileSet: "CatalogFile" 2003-04-10 01:24:01 + Pool: "Default" + Storage: "DLTDrive" + Start time: 22-Apr-2005 10:21:00 + End time: 22-Apr-2005 10:23:06 + FD Files Written: 1 + SD Files Written: 1 + FD Bytes Written: 210,739,395 + SD Bytes Written: 210,739,521 + Rate: 1672.5 KB/s + Software Compression: None + Volume name(s): DLT-22Apr05 + Volume Session Id: 11 + Volume Session Time: 1114075126 + Last Volume Bytes: 1,428,240,465 + Non-fatal FD errors: 0 + SD Errors: 0 + FD termination status: OK + SD termination status: OK + Termination: Backup OK +\end{verbatim} +\normalsize + + From the above information, you can manually create a bootstrap file, + and then follow the instructions given above for restoring your database. + A reconstructed bootstrap file for the above backup Job would look + like the following: + +\footnotesize +\begin{verbatim} +Volume="DLT-22Apr05" +VolSessionId=11 +VolSessionTime=1114075126 +FileIndex=1-1 +\end{verbatim} +\normalsize + + Where we have inserted the Volume name, Volume Session Id, and Volume + Session Time that correspond to the values in the job report. We've also + used a FileIndex of one, which will always be the case providing that + there was only one file backed up in the job. + + The disadvantage of this bootstrap file compared to what is created when + you ask for one to be written, is that there is no File and Block + specified, so the restore code must search all data in the Volume to find + the requested file. A fully specified bootstrap file would have the File + and Blocks specified as follows: + +\footnotesize +\begin{verbatim} +Volume="DLT-22Apr05" +VolSessionId=11 +VolSessionTime=1114075126 +VolFile=118-118 +VolBlock=0-4053 +FileIndex=1-1 +\end{verbatim} +\normalsize + + Once you have restored the ASCII dump of the database, + you will then to follow the instructions for your + database type to recreate the database from the ASCII backup file. + See the \ilink {Catalog Maintenance}{CatMaintenanceChapter} chapter of + this manual for examples of the command needed to restore a + database from an ASCII dump (they are shown in the Compacting Your + XXX Database sections). + + Also, please note that after you restore your database from an ASCII + backup, you do NOT want to do a {\bf make\_bacula\_tables} command, or + you will probably erase your newly restored database tables. + +\item [Solution without a Job Listing] + If you do not have a job listing, then it is a bit more difficult. + Either you use the \ilink{bscan}{bscan} program to scan the contents + of your tape into a database, which can be very time consuming + depending on the size of the tape, or you can use the \ilink{bls}{bls} + program to list everything on the tape, and reconstruct a bootstrap + file from the bls listing for the file or files you want following + the instructions given above. + + There is a specific example of how to use {\bf bls} below. + +\item [Problem] + I try to restore the last known good full backup by specifying + item 3 on the restore menu then the JobId to restore. Bacula + then reports: + +\footnotesize +\begin{verbatim} + 1 Job 0 Files +\end{verbatim} +\normalsize + and restores nothing. + +\item[Solution] + Most likely the File records were pruned from the database either due + to the File Retention period expiring or by explicitly purging the + Job. By using the "llist jobid=nn" command, you can obtain all the + important information about the job: + +\footnotesize +\begin{verbatim} +llist jobid=120 + JobId: 120 + Job: save.2005-12-05_18.27.33 + Job.Name: save + PurgedFiles: 0 + Type: B + Level: F + Job.ClientId: 1 + Client.Name: Rufus + JobStatus: T + SchedTime: 2005-12-05 18:27:32 + StartTime: 2005-12-05 18:27:35 + EndTime: 2005-12-05 18:27:37 + JobTDate: 1133803657 + VolSessionId: 1 + VolSessionTime: 1133803624 + JobFiles: 236 + JobErrors: 0 + JobMissingFiles: 0 + Job.PoolId: 4 + Pool.Name: Full + Job.FileSetId: 1 + FileSet.FileSet: BackupSet +\end{verbatim} +\normalsize + + Then you can find the Volume(s) used by doing: + +\footnotesize +\begin{verbatim} +sql +select VolumeName from JobMedia,Media where JobId=1 and JobMedia.MediaId=Media.MediaId; +\end{verbatim} +\normalsize + + Finally, you can create a bootstrap file as described in the previous + problem above using this information. + + If you are using Bacula version 1.38.0 or greater, when you select + item 3 from the menu and enter the JobId, it will ask you if + you would like to restore all the files in the job, and it will + collect the above information and write the bootstrap file for + you. + +\item [Problem] + You don't have a bootstrap file, and you don't have the Job report for + the backup of your database, but you did backup the database, and you + know the Volume to which it was backed up. + +\item [Solution] + Either bscan the tape (see below for bscanning), or better use {\bf bls} + to find where it is on the tape, then use {\bf bextract} to + restore the database. For example, + + +\footnotesize +\begin{verbatim} +./bls -j -V DLT-22Apr05 /dev/nst0 +\end{verbatim} +\normalsize + Might produce the following output: +\footnotesize +\begin{verbatim} +bls: butil.c:258 Using device: "/dev/nst0" for reading. +21-Jul 18:34 bls: Ready to read from volume "DLT-22Apr05" on device "DLTDrive" +(/dev/nst0). +Volume Record: File:blk=0:0 SessId=11 SessTime=1114075126 JobId=0 DataLen=164 +... +Begin Job Session Record: File:blk=118:0 SessId=11 SessTime=1114075126 +JobId=7510 + Job=CatalogBackup.2005-04-22_01.10.0 Date=22-Apr-2005 10:21:00 Level=F Type=B +End Job Session Record: File:blk=118:4053 SessId=11 SessTime=1114075126 +JobId=7510 + Date=22-Apr-2005 10:23:06 Level=F Type=B Files=1 Bytes=210,739,395 Errors=0 +Status=T +... +21-Jul 18:34 bls: End of Volume at file 201 on device "DLTDrive" (/dev/nst0), +Volume "DLT-22Apr05" +21-Jul 18:34 bls: End of all volumes. +\end{verbatim} +\normalsize + Of course, there will be many more records printed, but we have indicated + the essential lines of output. From the information on the Begin Job and End + Job Session Records, you can reconstruct a bootstrap file such as the one + shown above. + +\item[Problem] + How can I find where a file is stored. +\item[Solution] + Normally, it is not necessary, you just use the {\bf restore} command to + restore the most recently saved version (menu option 5), or a version + saved before a given date (menu option 8). If you know the JobId of the + job in which it was saved, you can use menu option 3 to enter that JobId. + + If you would like to know the JobId where a file was saved, select + restore menu option 2. + + You can also use the {\bf query} command to find information such as: +\footnotesize +\begin{verbatim} +*query +Available queries: + 1: List up to 20 places where a File is saved regardless of the +directory + 2: List where the most recent copies of a file are saved + 3: List last 20 Full Backups for a Client + 4: List all backups for a Client after a specified time + 5: List all backups for a Client + 6: List Volume Attributes for a selected Volume + 7: List Volumes used by selected JobId + 8: List Volumes to Restore All Files + 9: List Pool Attributes for a selected Pool + 10: List total files/bytes by Job + 11: List total files/bytes by Volume + 12: List Files for a selected JobId + 13: List Jobs stored on a selected MediaId + 14: List Jobs stored for a given Volume name + 15: List Volumes Bacula thinks are in changer + 16: List Volumes likely to need replacement from age or errors +Choose a query (1-16): +\end{verbatim} +\normalsize + +\item[Problem] + I didn't backup my database. What do I do now? +\item[Solution] + This is probably the worst of all cases, and you will probably have + to re-create your database from scratch and then bscan in all your + Volumes, which is a very long, painful, and inexact process. + +There are basically three steps to take: + +\begin{enumerate} +\item Ensure that your SQL server is running (MySQL or PostgreSQL) + and that the Bacula database (normally bacula) exists. See the + \ilink{Installation}{CreateDatabase} chapter of the manual. +\item Ensure that the Bacula databases are created. This is also + described at the above link. +\item Start and stop the Bacula Director using the propriate + bacula-dir.conf file so that it can create the Client and + Storage records which are not stored on the Volumes. Without these + records, scanning is unable to connect the Job records to the proper + client. +\end{enumerate} + +When the above is complete, you can begin bscanning your Volumes. Please +see the \ilink{bscan}{bscan} section of the Volume Utility Tools of this +chapter for more details. + +\end{description} diff --git a/docs/manuals/en/concepts/setup.sm b/docs/manuals/en/concepts/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/concepts/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/concepts/spooling.tex b/docs/manuals/en/concepts/spooling.tex new file mode 100644 index 00000000..9d1e4a9a --- /dev/null +++ b/docs/manuals/en/concepts/spooling.tex @@ -0,0 +1,138 @@ +%% +%% + +\chapter{Data Spooling} +\label{SpoolingChapter} +\index[general]{Data Spooling } +\index[general]{Spooling!Data } + +Bacula allows you to specify that you want the Storage daemon to initially +write your data to disk and then subsequently to tape. This serves several +important purposes. + +\begin{itemize} +\item It takes a long time for data to come in from the File daemon during + an Incremental backup. If it is directly written to tape, the tape will + start and stop or shoe-shine as it is often called causing tape wear. + By first writing the data to disk, then writing it to tape, the tape can + be kept in continual motion. +\item While the spooled data is being written to the tape, the despooling + process has exclusive use of the tape. This means that you can spool + multiple simultaneous jobs to disk, then have them very efficiently + despooled one at a time without having the data blocks from several jobs + intermingled, thus substantially improving the time needed to restore + files. While despooling, all jobs spooling continue running. +\item Writing to a tape can be slow. By first spooling your data to disk, + you can often reduce the time the File daemon is running on a system, + thus reducing downtime, and/or interference with users. Of course, if + your spool device is not large enough to hold all the data from your + File daemon, you may actually slow down the overall backup. +\end{itemize} + +Data spooling is exactly that "spooling". It is not a way to first write a +"backup" to a disk file and then to a tape. When the backup has only been +spooled to disk, it is not complete yet and cannot be restored until it is +written to tape. + +Bacula version 1.39.x and later supports writing a backup +to disk then later {\bf Migrating} or moving it to a tape (or any +other medium). For +details on this, please see the \ilink{Migration}{MigrationChapter} chapter +of this manual for more details. + +The remainder of this chapter explains the various directives that you can use +in the spooling process. + +\label{directives} +\section{Data Spooling Directives} +\index[general]{Directives!Data Spooling } +\index[general]{Data Spooling Directives } + +The following directives can be used to control data spooling. + +\begin{itemize} +\item To turn data spooling on/off at the Job level in the Job resource in + the Director's conf file (default {\bf no}). + +{\bf SpoolData = yes|no} + +\item To override the Job specification in a Schedule Run directive in the + Director's conf file. + +{\bf SpoolData = yes|no} + +\item To limit the maximum total size of the spooled data for a particular + device. Specified in the Device resource of the Storage daemon's conf file + (default unlimited). + +{\bf Maximum Spool Size = size} + Where size is a the maximum spool size for all jobs specified in bytes. + +\item To limit the maximum total size of the spooled data for a particular + device for a single job. Specified in the Device Resource of the Storage + daemon's conf file (default unlimited). + +{\bf Maximum Job Spool Size = size} + Where size is the maximum spool file size for a single job specified in + bytes. + +\item To specify the spool directory for a particular device. Specified in + the Device Resource of the Storage daemon's conf file (default, the working + directory). + +{\bf Spool Directory = directory} +\end{itemize} + +\label{warning} + +% TODO: fix this section name +\section{!!! MAJOR WARNING !!!} +\index[general]{WARNING! MAJOR } +\index[general]{ MAJOR WARNING } + +Please be very careful to exclude the spool directory from any backup, +otherwise, your job will write enormous amounts of data to the Volume, and +most probably terminate in error. This is because in attempting to backup the +spool file, the backup data will be written a second time to the spool file, +and so on ad infinitum. + +Another advice is to always specify the maximum spool size so that your disk +doesn't completely fill up. In principle, data spooling will properly detect a +full disk, and despool data allowing the job to continue. However, attribute +spooling is not so kind to the user. If the disk on which attributes are being +spooled fills, the job will be canceled. In addition, if your working +directory is on the same partition as the spool directory, then Bacula jobs +will fail possibly in bizarre ways when the spool fills. + +\label{points} +\section{Other Points} +\index[general]{Points!Other } +\index[general]{Other Points } + +\begin{itemize} +\item When data spooling is enabled, Bacula automatically turns on attribute + spooling. In other words, it also spools the catalog entries to disk. This is + done so that in case the job fails, there will be no catalog entries + pointing to non-existent tape backups. +\item Attribute despooling occurs near the end of a job. The Storage daemon + accumulates file attributes during the backup and sends them to the + Director at the end of the job. The Director then inserts the file + attributes into the catalog. During this insertion, the tape drive may + be inactive. When the file attribute insertion is completed, the job + terminates. +\item Attribute spool files are always placed in the working directory of + the Storage daemon. +\item When Bacula begins despooling data spooled to disk, it takes exclusive + use of the tape. This has the major advantage that in running multiple + simultaneous jobs at the same time, the blocks of several jobs will not be + intermingled. +\item It probably does not make a lot of sense to enable data spooling if you + are writing to disk files. +\item It is probably best to provide as large a spool file as possible to + avoid repeatedly spooling/despooling. Also, while a job is despooling to + tape, the File daemon must wait (i.e. spooling stops for the job while it is + despooling). +\item If you are running multiple simultaneous jobs, Bacula will continue + spooling other jobs while one is despooling to tape, provided there is + sufficient spool file space. +\end{itemize} diff --git a/docs/manuals/en/concepts/state.tex b/docs/manuals/en/concepts/state.tex new file mode 100644 index 00000000..066d8f2f --- /dev/null +++ b/docs/manuals/en/concepts/state.tex @@ -0,0 +1,259 @@ +%% +%% + +\chapter{The Current State of Bacula} +\label{StateChapter} +\index[general]{Current State of Bacula } + +In other words, what is and what is not currently implemented and functional. + +\section{What is Implemented} +\index[general]{Implemented!What} +\index[general]{What is Implemented} + +\begin{itemize} +\item Job Control + \begin{itemize} + \item Network backup/restore with centralized Director. + \item Internal scheduler for automatic + \ilink{Job}{JobDef} execution. + \item Scheduling of multiple Jobs at the same time. + \item You may run one Job at a time or multiple simultaneous Jobs + (sometimes called multiplexing). + \item Job sequencing using priorities. + \item \ilink{Console}{UADef} interface to the Director allowing complete + control. A shell, Qt4 GUI, GNOME GUI and wxWidgets GUI versions of + the Console program are available. Note, the Qt4 GUI program called + the Bacula Administration tool or bat, offers many additional + features over the shell program. + \end{itemize} + +\item Security + \begin{itemize} + \item Verification of files previously cataloged, permitting a Tripwire like + capability (system break-in detection). + \item CRAM-MD5 password authentication between each component (daemon). + \item Configurable + \ilink{TLS (SSL) communications encryption}{CommEncryption} between each + component. + \item Configurable + \ilink{Data (on Volume) encryption}{DataEncryption} + on a Client by Client basis. + \item Computation of MD5 or SHA1 signatures of the file data if requested. + \end{itemize} + + +\item Restore Features + \begin{itemize} + \item Restore of one or more files selected interactively either for the + current backup or a backup prior to a specified time and date. + \item Restore of a complete system starting from bare metal. This is mostly + automated for Linux systems and partially automated for Solaris. See + \ilink{Disaster Recovery Using Bacula}{RescueChapter}. This is also + reported to work on Win2K/XP systems. + \item Listing and Restoration of files using stand-alone {\bf bls} and {\bf + bextract} tool programs. Among other things, this permits extraction of files + when Bacula and/or the catalog are not available. Note, the recommended way + to restore files is using the restore command in the Console. These programs + are designed for use as a last resort. + \item Ability to restore the catalog database rapidly by using bootstrap + files (previously saved). + \item Ability to recreate the catalog database by scanning backup Volumes + using the {\bf bscan} program. + \end{itemize} + +\item SQL Catalog + \begin{itemize} + \item Catalog database facility for remembering Volumes, Pools, Jobs, and + Files backed up. + \item Support for MySQL, PostgreSQL, and SQLite Catalog databases. + \item User extensible queries to the MySQL, PostgreSQL and SQLite databases. + \end{itemize} + +\item Advanced Volume and Pool Management + \begin{itemize} + \item Labeled Volumes, preventing accidental overwriting (at least by + Bacula). + \item Any number of Jobs and Clients can be backed up to a single Volume. + That is, you can backup and restore Linux, Unix, Sun, and Windows machines to + the same Volume. + \item Multi-volume saves. When a Volume is full, {\bf Bacula} automatically + requests the next Volume and continues the backup. + \item + \ilink{Pool and Volume}{PoolResource} library management + providing Volume flexibility (e.g. monthly, weekly, daily Volume sets, Volume + sets segregated by Client, ...). + \item Machine independent Volume data format. Linux, Solaris, and Windows + clients can all be backed up to the same Volume if desired. + \item The Volume data format is upwards compatible so that old Volumes + can always be read. + \item A flexible + \ilink{message}{MessagesChapter} handler including routing + of messages from any daemon back to the Director and automatic email + reporting. + \item Data spooling to disk during backup with subsequent write to tape from + the spooled disk files. This prevents tape "shoe shine" during + Incremental/Differential backups. + \end{itemize} + +\item Advanced Support for most Storage Devices + \begin{itemize} + \item Autochanger support using a simple shell interface that can interface + to virtually any autoloader program. A script for {\bf mtx} is provided. + \item Support for autochanger barcodes -- automatic tape labeling from + barcodes. + \item Automatic support for multiple autochanger magazines either using + barcodes or by reading the tapes. + \item Support for multiple drive autochangers. + \item Raw device backup/restore. Restore must be to the same device. + \item All Volume blocks (approximately 64K bytes) contain a data checksum. + \item Migration support -- move data from one Pool to another or + one Volume to another. + \item Supports writing to DVD. + \end{itemize} + +\item Multi-Operating System Support + \begin{itemize} + \item Programmed to handle arbitrarily long filenames and messages. + \item GZIP compression on a file by file basis done by the Client program if + requested before network transit. + \item Saves and restores POSIX ACLs on most OSes if enabled. + \item Access control lists for Consoles that permit restricting user access + to only their data. + \item Support for save/restore of files larger than 2GB. + \item Support for 64 bit machines, e.g. amd64, Sparc. + \item Support ANSI and IBM tape labels. + \item Support for Unicode filenames (e.g. Chinese) on Win32 machines on + version 1.37.28 and greater. + \item Consistent backup of open files on Win32 systems (WinXP, Win2003, + and Vista) + but not Win2000, using Volume Shadow Copy (VSS). + \item Support for path/filename lengths of up to 64K on Win32 machines + (unlimited on Unix/Linux machines). + \end{itemize} + +\item Miscellaneous + \begin{itemize} + \item Multi-threaded implementation. + \item A comprehensive and extensible + \ilink{configuration file}{DirectorChapter} for each daemon. + \end{itemize} +\end{itemize} + +\section{Advantages Over Other Backup Programs} +\index[general]{Advantages of Bacula Over Other Backup Programs } +\index[general]{Programs!Advantages of Bacula Over Other Backup } + +\begin{itemize} +\item Since there is a client for each machine, you can backup + and restore clients of any type ensuring that all attributes + of files are properly saved and restored. +\item It is also possible to backup clients without any client + software by using NFS or Samba. However, if possible, we + recommend running a Client File daemon on each machine to be + backed up. +\item Bacula handles multi-volume backups. +\item A full comprehensive SQL standard database of all files backed up. This + permits online viewing of files saved on any particular Volume. +\item Automatic pruning of the database (removal of old records) thus + simplifying database administration. +\item Any SQL database engine can be used making Bacula very flexible. + Drivers currently exist for MySQL, PostgreSQL, and SQLite. +\item The modular but integrated design makes Bacula very scalable. +\item Since Bacula uses client file servers, any database or + other application can be properly shutdown by Bacula using the + native tools of the system, backed up, then restarted (all + within a Bacula Job). +\item Bacula has a built-in Job scheduler. +\item The Volume format is documented and there are simple C programs to + read/write it. +\item Bacula uses well defined (IANA registered) TCP/IP ports -- no rpcs, no + shared memory. +\item Bacula installation and configuration is relatively simple compared to + other comparable products. +\item According to one user Bacula is as fast as the big major commercial + applications. +\item According to another user Bacula is four times as fast as another + commercial application, probably because that application stores its catalog + information in a large number of individual files rather than an SQL database + as Bacula does. +\item Aside from several GUI administrative interfaces, Bacula has a + comprehensive shell administrative interface, which allows the + administrator to use tools such as ssh to administrate any part of + Bacula from anywhere (even from home). +\item Bacula has a Rescue CD for Linux systems with the following features: + \begin{itemize} + \item You build it on your own system from scratch with one simple command: + make -- well, then make burn. + \item It uses your kernel + \item It captures your current disk parameters and builds scripts that allow + you to automatically repartition a disk and format it to put it back to what + you had before. + \item It has a script that will restart your networking (with the right IP + address) + \item It has a script to automatically mount your hard disks. + \item It has a full Bacula FD statically linked + \item You can easily add additional data/programs, ... to the disk. + \end{itemize} + +\end{itemize} + +\section{Current Implementation Restrictions} +\index[general]{Current Implementation Restrictions } +\index[general]{Restrictions!Current Implementation } + +\begin{itemize} +\item If you have over 4 billion file entries stored in your database, the + database FileId is likely to overflow. This is a monster database, but still + possible. Bacula's FileId fields have been modified so that they can be + upgraded from 32 to 64 bits in version 1.39 or later, but you must + manually do so. +\item Files deleted after a Full save will be included in a restoration. This + is typical for most similar backup programs (we have a project to + correct this). +\item Bacula's Differential and Incremental backups are based on + time stamps. Consequently, if you move files into an existing + directory or move a whole directory into the backup fileset + after a Full backup, those files will probably not be backed + up by an Incremental save because they will have old dates. + You must explicitly update the date/time stamp on all moved + files (we have a project to correct this). +\item File System Modules (configurable routines for + saving/restoring special files) are not yet implemented. However, + this feature is easily implemented using RunScripts. +\item Bacula supports doing backups and restores to multiple + devices of different media type and multiple Storage daemons. + However, if you have backed up a job to multiple storage + devices, Bacula can do a restore from only one device, which + means that you will need to manually edit the bootstrap file + to split it into two restores if you split the backup across + storage devices. This restriction has been removed in version + 2.2.0 and later, but it is not yet fully tested. +\item Bacula cannot restore two different jobs in the same + restore if those jobs were run simultaneously, unless you had + data spooling turned on and the spool file held the full + contents of both jobs. In other terms, Bacula cannot restore + two jobs in the same restore if the jobs' data blocks were + intermixed on the backup medium. This poses no restrictions + for normal backup jobs even if they are run simultaneously. +\item Bacula can generally restore any backup made from a client + to any other client. However, if the architecture is significantly + different (i.e. 32 bit architecture to 64 bit or Win32 to Unix), + some restrictions may apply (e.g. Solaris door files do not exist + on other Unix/Linux machines; there are reports that Zlib compression + written with 64 bit machines does not always read correctly on a 32 bit + machine). +\end{itemize} + +\section{Design Limitations or Restrictions} +\index[general]{Restrictions!Design Limitations or } +\index[general]{Design Limitations or Restrictions } + +\begin{itemize} +\item Names (resource names, Volume names, and such) defined in Bacula + configuration files are limited to a fixed number of + characters. Currently the limit is defined as 127 characters. Note, + this does not apply to filenames, which may be arbitrarily long. +\item Command line input to some of the stand alone tools -- e.g. btape, + bconsole is restricted to several hundred characters maximum. +\end{itemize} diff --git a/docs/manuals/en/concepts/strategies.tex b/docs/manuals/en/concepts/strategies.tex new file mode 100644 index 00000000..b0bcfebc --- /dev/null +++ b/docs/manuals/en/concepts/strategies.tex @@ -0,0 +1,439 @@ +%% +%% + +\chapter{Backup Strategies} +\label{StrategiesChapter} +\index[general]{Strategies!Backup } +\index[general]{Backup Strategies } + +Although Recycling and Backing Up to Disk Volume have been discussed in +previous chapters, this chapter is meant to give you an overall view of +possible backup strategies and to explain their advantages and disadvantages. +\label{Simple} + +\section{Simple One Tape Backup} +\index[general]{Backup!Simple One Tape } +\index[general]{Simple One Tape Backup } + +Probably the simplest strategy is to back everything up to a single tape and +insert a new (or recycled) tape when it fills and Bacula requests a new one. + +\subsection{Advantages} +\index[general]{Advantages } + +\begin{itemize} +\item The operator intervenes only when a tape change is needed. (once a + month at my site). +\item There is little chance of operator error because the tape is not + changed daily. +\item A minimum number of tapes will be needed for a full restore. Typically + the best case will be one tape and worst two. +\item You can easily arrange for the Full backup to occur a different night + of the month for each system, thus load balancing and shortening the backup + time. +\end{itemize} + +\subsection{Disadvantages} +\index[general]{Disadvantages } + +\begin{itemize} +\item If your site burns down, you will lose your current backups, and in my + case about a month of data. +\item After a tape fills and you have put in a blank tape, the backup will + continue, and this will generally happen during working hours. + \end{itemize} + +\subsection{Practical Details} +\index[general]{Details!Practical } +\index[general]{Practical Details } + +This system is very simple. When the tape fills and Bacula requests a new +tape, you {\bf unmount} the tape from the Console program, insert a new tape +and {\bf label} it. In most cases after the label, Bacula will automatically +mount the tape and resume the backup. Otherwise, you simply {\bf mount} the +tape. + +Using this strategy, one typically does a Full backup once a week followed by +daily Incremental backups. To minimize the amount of data written to the tape, +one can do a Full backup once a month on the first Sunday of the +month, a Differential backup on the 2nd-5th Sunday of the month, and +incremental backups the rest of the week. +\label{Manual} + +\section{Manually Changing Tapes} +\index[general]{Tapes!Manually Changing } +\index[general]{Manually Changing Tapes } + +If you use the strategy presented above, Bacula will ask you to change the +tape, and you will {\bf unmount} it and then remount it when you have inserted +the new tape. + +If you do not wish to interact with Bacula to change each tape, there are +several ways to get Bacula to release the tape: + +\begin{itemize} +\item In your Storage daemon's Device resource, set + {\bf AlwaysOpen = no} + In this case, Bacula will release the tape after every job. If you run + several jobs, the tape will be rewound and repositioned to the end at the + beginning of every job. This is not very efficient, but does let you change + the tape whenever you want. +\item Use a {\bf RunAfterJob} statement to run a script after your last job. + This could also be an {\bf Admin} job that runs after all your backup jobs. + The script could be something like: + +\footnotesize +\begin{verbatim} + #!/bin/sh + /full-path/bconsole -c /full-path/bconsole.conf <----| Stunnel 1 |-----> Port 9102 + |===========| + stunnel-fd2.conf + |===========| + Port 9103 >----| Stunnel 2 |-----> server:29103 + |===========| + Director (server): + stunnel-dir.conf + |===========| + Port 29102 >----| Stunnel 3 |-----> client:29102 + |===========| + stunnel-sd.conf + |===========| + Port 29103 >----| Stunnel 4 |-----> 9103 + |===========| +\end{verbatim} +\normalsize + +\section{Certificates} +\index[general]{Certificates } + +In order for stunnel to function as a server, which it does in our diagram for +Stunnel 1 and Stunnel 4, you must have a certificate and the key. It is +possible to keep the two in separate files, but normally, you keep them in one +single .pem file. You may create this certificate yourself in which case, it +will be self-signed, or you may have it signed by a CA. + +If you want your clients to verify that the server is in fact valid (Stunnel 2 +and Stunnel 3), you will need to have the server certificates signed by a CA +(Certificate Authority), and you will need to have the CA's public certificate +(contains the CA's public key). + +Having a CA signed certificate is {\bf highly} recommended if you are using +your client across the Internet, otherwise you are exposed to the man in the +middle attack and hence loss of your data. + +See below for how to create a self-signed certificate. + +\section{Securing the Data Channel} +\index[general]{Channel!Securing the Data } +\index[general]{Securing the Data Channel } + +To simplify things a bit, let's for the moment consider only the data channel. +That is the connection between the File daemon and the Storage daemon, which +takes place on port 9103. In fact, in a minimalist solution, this is the only +connection that needs to be encrypted, because it is the one that transports your +data. The connection between the Director and the File daemon is simply a +control channel used to start the job and get the job status. + +Normally the File daemon will contact the Storage daemon on port 9103 +(supplied by the Director), so we need an stunnel that listens on port 9103 on +the File daemon's machine, encrypts the data and sends it to the Storage +daemon. This is depicted by Stunnel 2 above. Note that this stunnel is +listening on port 9103 and sending to server:29103. We use port 29103 on the +server because if we would send the data to port 9103, it would go directly to the +Storage daemon, which doesn't understand encrypted data. On the server +machine, we run Stunnel 4, which listens on port 29103, decrypts the data and +sends it to the Storage daemon, which is listening on port 9103. + +\section{Data Channel Configuration} +\index[general]{Modification of bacula-dir.conf for the Data Channel } +\index[general]{baculoa-dir.conf!Modification for the Data Channel } + +The Storage resource of the bacula-dir.conf normally looks something like the +following: + +\footnotesize +\begin{verbatim} +Storage { + Name = File + Address = server + SDPort = 9103 + Password = storage_password + Device = File + Media Type = File +} +\end{verbatim} +\normalsize + +Notice that this is running on the server machine, and it points the File +daemon back to server:9103, which is where our Storage daemon is listening. We +modify this to be: + +\footnotesize +\begin{verbatim} +Storage { + Name = File + Address = localhost + SDPort = 9103 + Password = storage_password + Device = File + Media Type = File +} +\end{verbatim} +\normalsize + +This causes the File daemon to send the data to the stunnel running on +localhost (the client machine). We could have used client as the address as +well. + +\section{Stunnel Configuration for the Data Channel} +\index[general]{Stunnel Configuration for the Data Channel } + +In the diagram above, we see above Stunnel 2 that we use stunnel-fd2.conf on the +client. A pretty much minimal config file would look like the following: + +\footnotesize +\begin{verbatim} +client = yes +[29103] +accept = localhost:9103 +connect = server:29103 +\end{verbatim} +\normalsize + +The above config file does encrypt the data but it does not require a +certificate, so it is subject to the man in the middle attack. The file I +actually used, stunnel-fd2.conf, looked like this: + +\footnotesize +\begin{verbatim} +# +# Stunnel conf for Bacula client -> SD +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29103] +accept = localhost:9103 +connect = server:29103 +\end{verbatim} +\normalsize + +You will notice that I specified a pid file location because I ran stunnel +under my own userid so I could not use the default, which requires root +permission. I also specified a certificate that I have as well as verify level +2 so that the certificate is required and verified, and I must supply the +location of the CA (Certificate Authority) certificate so that the stunnel +certificate can be verified. Finally, you will see that there are two lines +commented out, which when enabled, produce a lot of nice debug info in the +command window. + +If you do not have a signed certificate (stunnel.pem), you need to delete the +cert, CAfile, and verify lines. + +Note that the stunnel.pem, is actually a private key and a certificate in a +single file. These two can be kept and specified individually, but keeping +them in one file is more convenient. + +The config file, stunnel-sd.conf, needed for Stunnel 4 on the server machine +is: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for Storage daemon +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is mandatory here, it may be self signed +# If it is self signed, the client may not use +# verify +# +cert = /home/kern/stunnel/stunnel.pem +client = no +# debug = 7 +# foreground = yes +[29103] +accept = 29103 +connect = 9103 +\end{verbatim} +\normalsize + +\section{Starting and Testing the Data Encryption} +\index[general]{Starting and Testing the Data Encryption } +\index[general]{Encryption!Starting and Testing the Data } + +It will most likely be the simplest to implement the Data Channel encryption +in the following order: + +\begin{itemize} +\item Setup and run Bacula backing up some data on your client machine + without encryption. +\item Stop Bacula. +\item Modify the Storage resource in the Director's conf file. +\item Start Bacula +\item Start stunnel on the server with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-sd.conf + +\end{verbatim} +\normalsize + +\item Start stunnel on the client with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-fd2.conf + +\end{verbatim} +\normalsize + +\item Run a job. +\item If it doesn't work, turn debug on in both stunnel conf files, restart + the stunnels, rerun the job, repeat until it works. + \end{itemize} + +\section{Encrypting the Control Channel} +\index[general]{Channel!Encrypting the Control } +\index[general]{Encrypting the Control Channel } + +The Job control channel is between the Director and the File daemon, and as +mentioned above, it is not really necessary to encrypt, but it is good +practice to encrypt it as well. The two stunnels that are used in this case +will be Stunnel 1 and Stunnel 3 in the diagram above. Stunnel 3 on the server +might normally listen on port 9102, but if you have a local File daemon, this +will not work, so we make it listen on port 29102. It then sends the data to +client:29102. Again we use port 29102 so that the stunnel on the client +machine can decrypt the data before passing it on to port 9102 where the File +daemon is listening. + +\section{Control Channel Configuration} +\index[general]{Control Channel Configuration } + +We need to modify the standard Client resource, which would normally look +something like: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = client + FDPort = 9102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +to be: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = localhost + FDPort = 29102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +This will cause the Director to send the control information to +localhost:29102 instead of directly to the client. + +\section{Stunnel Configuration for the Control Channel} +\index[general]{Config Files for stunnel to Encrypt the Control Channel } + +The stunnel config file, stunnel-dir.conf, for the Director's machine would +look like the following: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +\end{verbatim} +\normalsize + +and the config file, stunnel-fd1.conf, needed to run stunnel on the Client +would be: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +\end{verbatim} +\normalsize + +\section{Starting and Testing the Control Channel} +\index[general]{Starting and Testing the Control Channel } +\index[general]{Channel!Starting and Testing the Control } + +It will most likely be the simplest to implement the Control Channel +encryption in the following order: + +\begin{itemize} +\item Stop Bacula. +\item Modify the Client resource in the Director's conf file. +\item Start Bacula +\item Start stunnel on the server with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-dir.conf + +\end{verbatim} +\normalsize + +\item Start stunnel on the client with: + + \footnotesize +\begin{verbatim} + stunnel stunnel-fd1.conf + +\end{verbatim} +\normalsize + +\item Run a job. +\item If it doesn't work, turn debug on in both stunnel conf files, restart + the stunnels, rerun the job, repeat until it works. + \end{itemize} + +\section{Using stunnel to Encrypt to a Second Client} +\index[general]{Using stunnel to Encrypt to a Second Client } +\index[general]{Client!Using stunnel to Encrypt to a Second } + +On the client machine, you can just duplicate the setup that you have on the +first client file for file and it should work fine. + +In the bacula-dir.conf file, you will want to create a second client pretty +much identical to how you did for the first one, but the port number must be +unique. We previously used: + +\footnotesize +\begin{verbatim} +Client { + Name = client-fd + Address = localhost + FDPort = 29102 + Catalog = BackupDB + Password = "xxx" +} +\end{verbatim} +\normalsize + +so for the second client, we will, of course, have a different name, and we +will also need a different port. Remember that we used port 29103 for the +Storage daemon, so for the second client, we can use port 29104, and the +Client resource would look like: + +\footnotesize +\begin{verbatim} +Client { + Name = client2-fd + Address = localhost + FDPort = 29104 + Catalog = BackupDB + Password = "yyy" +} +\end{verbatim} +\normalsize + +Now, fortunately, we do not need a third stunnel to on the Director's machine, +we can just add the new port to the config file, stunnel-dir.conf, to make: + +\footnotesize +\begin{verbatim} +# +# Bacula stunnel conf for the Directory to contact a client +# +pid = /home/kern/bacula/bin/working/stunnel.pid +# +# A cert is not mandatory here. If verify=2, a +# cert signed by a CA must be specified, and +# either CAfile or CApath must point to the CA's +# cert +# +cert = /home/kern/stunnel/stunnel.pem +CAfile = /home/kern/ssl/cacert.pem +verify = 2 +client = yes +# debug = 7 +# foreground = yes +[29102] +accept = localhost:29102 +connect = client:29102 +[29104] +accept = localhost:29102 +connect = client2:29102 +\end{verbatim} +\normalsize + +There are no changes necessary to the Storage daemon or the other stunnel so +that this new client can talk to our Storage daemon. + +\section{Creating a Self-signed Certificate} +\index[general]{Creating a Self-signed Certificate } +\index[general]{Certificate!Creating a Self-signed } + +You may create a self-signed certificate for use with stunnel that will permit +you to make it function, but will not allow certificate validation. The .pem +file containing both the certificate and the key can be made with the +following, which I put in a file named {\bf makepem}: + +\footnotesize +\begin{verbatim} +#!/bin/sh +# +# Simple shell script to make a .pem file that can be used +# with stunnel and Bacula +# +OPENSSL=openssl + umask 77 + PEM1="/bin/mktemp openssl.XXXXXX" + PEM2="/bin/mktemp openssl.XXXXXX" + ${OPENSSL} req -newkey rsa:1024 -keyout $PEM1 -nodes \ + -x509 -days 365 -out $PEM2 + cat $PEM1 > stunnel.pem + echo "" >>stunnel.pem + cat $PEM2 >>stunnel.pem + rm $PEM1 $PEM2 +\end{verbatim} +\normalsize + +The above script will ask you a number of questions. You may simply answer +each of them by entering a return, or if you wish you may enter your own data. + + +\section{Getting a CA Signed Certificate} +\index[general]{Certificate!Getting a CA Signed } +\index[general]{Getting a CA Signed Certificate } + +The process of getting a certificate that is signed by a CA is quite a bit +more complicated. You can purchase one from quite a number of PKI vendors, but +that is not at all necessary for use with Bacula. + +To get a CA signed +certificate, you will either need to find a friend that has setup his own CA +or to become a CA yourself, and thus you can sign all your own certificates. +The book OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly +explains how to do it, or you can read the documentation provided in the +Open-source PKI Book project at Source Forge: +\elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +\section{Using ssh to Secure the Communications} +\index[general]{Communications!Using ssh to Secure the } +\index[general]{Using ssh to Secure the Communications } + +Please see the script {\bf ssh-tunnel.sh} in the {\bf examples} directory. It +was contributed by Stephan Holl. diff --git a/docs/manuals/en/concepts/supportedchangers.tex b/docs/manuals/en/concepts/supportedchangers.tex new file mode 100644 index 00000000..ebf876bc --- /dev/null +++ b/docs/manuals/en/concepts/supportedchangers.tex @@ -0,0 +1,76 @@ +%% +%% + +\chapter{Supported Autochangers} +\label{Models} +\index[general]{Supported Autochanger Models} +\index[general]{Autochangers!Supported} + +I hesitate to call these "supported" autochangers because the only +autochangers that I have in my possession and am able to test are the HP +SureStore DAT40X6 and the Overland PowerLoader LTO-2. All the other +autochangers have been reported to work by Bacula users. Note, in the +Capacity/Slot column below, I quote the Compressed capacity per tape (or +Slot). + +Since on most systems (other than FreeBSD), Bacula uses {\bf mtx} +through the {\bf mtx-changer} script, in principle, if {\bf mtx} +will operate your changer correctly, then it is just a question +of adapting the {\bf mtx-changer} script (or selecting one +already adapted) for proper interfacing. You can find a list of +autochangers supported by {\bf mtx} at the following link: +\elink{http://mtx.opensource-sw.net/compatibility.php} +{\url{http://mtx.opensource-sw.net/compatibility.php}}. +The home page for the {\bf mtx} project can be found at: +\elink{http://mtx.opensource-sw.net/}{\url{http://mtx.opensource-sw.net/}}. + + +\addcontentsline{lot}{table}{Autochangers Known to Work with Bacula} +\begin{longtable}{|p{0.6in}|p{0.8in}|p{1.9in}|p{0.8in}|p{0.5in}|p{0.75in}|} + \hline +\multicolumn{1}{|c| }{\bf OS } & \multicolumn{1}{c| }{\bf Man. } & +\multicolumn{1}{c| }{\bf Media } & \multicolumn{1}{c| }{\bf Model } & +\multicolumn{1}{c| }{\bf Slots } & \multicolumn{1}{c| }{\bf Cap/Slot } \\ + \hline {Linux } & {Adic } & {DDS-3} & {Adic 1200G } & {12} & {-} \\ + \hline {Linux } & {Adic } & {DLT} & {FastStore 4000 } & {7} & {20GB} \\ + \hline {Linux } & {Adic } & {LTO-1/2, SDLT 320 } & {Adic Scalar 24 } & {24} & {100GB } \\ + \hline {Linux } & {Adic } & {LTO-2 } & {Adic FastStor 2, Sun Storedge L8 } & {8} & {200GB } \\ + \hline {Linux } & {BDT } & {AIT } & {BDT ThinStor } & {?} & {200GB } \\ + \hline {- } & {CA-VM } & {?? } & {Tape } & {??} & {?? } \\ + \hline {Linux } & {Dell} & {DLT VI,LTO-2,LTO3} & {PowerVault 122T/132T/136T } & {-} & {100GB } \\ + \hline {Linux } & {Dell} & {LTO-2} & {PowerVault 124T } & {-} & {200GB } \\ + \hline {- } & {DFSMS } & {?? } & {VM RMM} & {-} & {?? } \\ + \hline {Linux } & {Exabyte } & {VXA2 } & {VXA PacketLoader 1x10 2U } & {10} & {80/160GB } \\ + \hline {- } & {Exabyte } & {LTO } & {Magnum 1x7 LTO Tape Auotloader } & {7} & {200/400GB } \\ + \hline {Linux } & {Exabyte } & {AIT-2 } & {215A } & {15 (2 drives)} & {50GB } \\ + \hline {Linux } & {HP } & {DDS-4 } & {SureStore DAT-40X6 } & {6 } & {40GB } \\ + \hline {Linux } & {HP } & {Ultrium-2/LTO } & {MSL 6000/ 60030/ 5052 } & {28 } & {200/400GB } \\ + \hline {- } & {HP } & {DLT } & {A4853 DLT } & {30} & {40/70GB } \\ + \hline {Linux } & {HP (Compaq) } & {DLT VI } & {Compaq TL-895 } & {96+4 import export} & {35/70GB } \\ + \hline {z/VM } & {IBM } & {?? } & {IBM Tape Manager } & {-} & {?? } \\ + \hline {z/VM } & {IBM } & {?? } & {native tape } & {-} & {?? } \\ + \hline {Linux } & {IBM } & {LTO } & {IBM 3581 Ultrium Tape Loader } & {7} & {200/400GB } \\ + \hline {FreeBSD 5.4} & {IBM } & {DLT} & {IBM 3502-R14 -- rebranded ATL L-500} & {14} & {35/70GB } \\ + \hline {Linux} & {IBM } & {???} & {IBM TotalStorage 3582L23} & {??} & {?? } \\ + \hline {Debian} & {Overland } & {LTO } & {Overland LoaderXpress LTO/DLT8000 } & {10-19} & {40-100GB } \\ + \hline {Fedora} & {Overland } & {LTO } & {Overland PowerLoader LTO-2 } & {10-19} & {200/400GB } \\ + \hline {FreeBSD 5.4-Stable} & {Overland} & {LTO-2} & {Overland Powerloader tape} & {17} & {100GB } \\ + \hline {- } & {Overland} & {LTO } & {Overland Neo2000 LTO } & {26-30} & {100GB } \\ + \hline {Linux} & {Quantum } & {DLT-S4} & {Superloader 3} & {16} & {800/1600GB } \\ + \hline {Linux} & {Quantum } & {LTO-2} & {Superloader 3} & {16} & {200/400GB } \\ + \hline {Linux} & {Quantum } & {LTO-3 } & {PX502 } & {??} & {?? } \\ + \hline {FreeBSD 4.9 } & {QUALSTAR TLS-4210 (Qualstar) } & {AIT1: 36GB, AIT2: 50GB all +uncomp } & {QUALSTAR TLS-4210 } & {12} & {AIT1: 36GB, AIT2: 50GB all uncomp }\\ + \hline {Linux } & {Skydata } & {DLT } & {ATL-L200 } & {8} & {40/80 } \\ + \hline {- } & {Sony } & {DDS-4 } & {TSL-11000 } & {8} & {40GB } \\ + \hline {Linux } & {Sony } & {AIT-2} & {LIB-304(SDX-500C) } & {?} & {200GB } \\ + \hline {Linux } & {Sony } & {AIT-3} & {LIB-D81) } & {?} & {200GB } \\ + \hline {FreeBSD 4.9-STABLE } & {Sony } & {AIT-1 } & {TSL-SA300C } & {4} & {45/70GB }\\ + \hline {- } & {Storagetek } & {DLT } & {Timberwolf DLT } & {6} & {40/70 } \\ + \hline {- } & {Storagetek } & {?? } & {ACSLS } & {??} & {?? } \\ + \hline {Solaris } & {Sun } & {4mm DLT } & {Sun Desktop Archive Python 29279 } & {4} & {20GB } \\ + \hline {Linux } & {Tandberg } & {DLT VI } & {VS 640 } & {8?} & {35/70GB } \\ + \hline {Linux 2.6.x } & {Tandberg Data } & {SLR100 } & {SLR100 Autoloader } & {8} & {50/100GB }\\ +\hline + +\end{longtable} diff --git a/docs/manuals/en/concepts/supporteddrives.tex b/docs/manuals/en/concepts/supporteddrives.tex new file mode 100644 index 00000000..005ba405 --- /dev/null +++ b/docs/manuals/en/concepts/supporteddrives.tex @@ -0,0 +1,158 @@ +%% +%% + +\chapter{Supported Tape Drives} +\label{SupportedDrives} +\index[general]{Drives!Supported Tape } +\index[general]{Supported Tape Drives } + +Bacula uses standard operating system calls (read, write, ioctl) to +interface to tape drives. As a consequence, it relies on having a +correctly written OS tape driver. Bacula is known to work perfectly well +with SCSI tape drivers on FreeBSD, Linux, Solaris, and Windows machines, +and it may work on other *nix machines, but we have not tested it. +Recently there are many new drives that use IDE, ATAPI, or +SATA interfaces rather than SCSI. On Linux the OnStream drive, which uses +the OSST driver is one such +example, and it is known to work with Bacula. In addition a number of such +tape drives (i.e. OS drivers) seem to work on Windows systems. However, +non-SCSI tape drives (other than the OnStream) that use ide-scis, ide-tape, +or other non-scsi drivers do not function correctly with Bacula (or any +other demanding tape application) as of today (April 2007). If you +have purchased a non-SCSI tape drive for use with Bacula on Linux, there +is a good chance that it will not work. We are working with the kernel +developers to rectify this situation, but it will not be resolved in the +near future. + +Even if your drive is on the list below, please check the +\ilink{Tape Testing Chapter}{btape1} of this manual for +procedures that you can use to verify if your tape drive will work with +Bacula. If your drive is in fixed block mode, it may appear to work with +Bacula until you attempt to do a restore and Bacula wants to position the +tape. You can be sure only by following the procedures suggested above and +testing. + +It is very difficult to supply a list of supported tape drives, or drives that +are known to work with Bacula because of limited feedback (so if you use +Bacula on a different drive, please let us know). Based on user feedback, the +following drives are known to work with Bacula. A dash in a column means +unknown: + +\addcontentsline{lot}{table}{Supported Tape Drives} +\begin{longtable}{|p{2.0in}|l|l|p{2.5in}|l|} + \hline +\multicolumn{1}{|c| }{\bf OS } & \multicolumn{1}{c| }{\bf Man. } & +\multicolumn{1}{c| }{\bf Media } & \multicolumn{1}{c| }{\bf Model } & +\multicolumn{1}{c| }{\bf Capacity } \\ + \hline {- } & {ADIC } & {DLT } & {Adic Scalar 100 DLT } & {100GB } \\ + \hline {- } & {ADIC } & {DLT } & {Adic Fastor 22 DLT } & {- } \\ + \hline {FreeBSD 5.4-RELEASE-p1 amd64 } & {Certance} & {LTO } & {AdicCertance CL400 LTO Ultrium 2 } & {200GB } \\ + \hline {- } & {- } & {DDS } & {Compaq DDS 2,3,4 } & {- } \\ + \hline {SuSE 8.1 Pro} & {Compaq} & {AIT } & {Compaq AIT 35 LVD } & {35/70GB } \\ + \hline {- } & {Exabyte } & {- } & {Exabyte drives less than 10 years old } & {- } \\ + \hline {- } & {Exabyte } & {- } & {Exabyte VXA drives } & {- } \\ + \hline {- } & {HP } & {Travan 4 } & {Colorado T4000S } & {- } \\ + \hline {- } & {HP } & {DLT } & {HP DLT drives } & {- } \\ + \hline {- } & {HP } & {LTO } & {HP LTO Ultrium drives } & {- } \\ + \hline {- } & {IBM} & {??} & {3480, 3480XL, 3490, 3490E, 3580 and 3590 drives} & {- } \\ + \hline {FreeBSD 4.10 RELEASE } & {HP } & {DAT } & {HP StorageWorks DAT72i } & {- } \\ + \hline {- } & {Overland } & {LTO } & {LoaderXpress LTO } & {- } \\ + \hline {- } & {Overland } & {- } & {Neo2000 } & {- } \\ + \hline {- } & {OnStream } & {- } & {OnStream drives (see below) } & {- } \\ + \hline {FreeBSD 4.11-Release} & {Quantum } & {SDLT } & {SDLT320 } & {160/320GB } \\ + \hline {- } & {Quantum } & {DLT } & {DLT-8000 } & {40/80GB } \\ + \hline {Linux } & {Seagate } & {DDS-4 } & {Scorpio 40 } & {20/40GB } \\ + \hline {FreeBSD 4.9 STABLE } & {Seagate } & {DDS-4 } & {STA2401LW } & {20/40GB } \\ + \hline {FreeBSD 5.2.1 pthreads patched RELEASE } & {Seagate } & {AIT-1 } & {STA1701W} & {35/70GB } \\ + \hline {Linux } & {Sony } & {DDS-2,3,4 } & {- } & {4-40GB } \\ + \hline {Linux } & {Tandberg } & {- } & {Tandbert MLR3 } & {- } \\ + \hline {FreeBSD } & {Tandberg } & {- } & {Tandberg SLR6 } & {- } \\ + \hline {Solaris } & {Tandberg } & {- } & {Tandberg SLR75 } & {- } \\ + \hline + +\end{longtable} + +There is a list of \ilink{supported autochangers}{Models} in the Supported +Autochangers chapter of this document, where you will find other tape drives +that work with Bacula. + +\section{Unsupported Tape Drives} +\label{UnSupportedDrives} +\index[general]{Unsupported Tape Drives } +\index[general]{Drives!Unsupported Tape } + +Previously OnStream IDE-SCSI tape drives did not work with Bacula. As of +Bacula version 1.33 and the osst kernel driver version 0.9.14 or later, they +now work. Please see the testing chapter as you must set a fixed block size. + +QIC tapes are known to have a number of particularities (fixed block size, and +one EOF rather than two to terminate the tape). As a consequence, you will +need to take a lot of care in configuring them to make them work correctly +with Bacula. + +\section{FreeBSD Users Be Aware!!!} +\index[general]{FreeBSD Users Be Aware } +\index[general]{Aware!FreeBSD Users Be } + +Unless you have patched the pthreads library on FreeBSD 4.11 systems, you will +lose data when Bacula spans tapes. This is because the unpatched pthreads +library fails to return a warning status to Bacula that the end of the tape is +near. This problem is fixed in FreeBSD systems released after 4.11. Please see the +\ilink{Tape Testing Chapter}{FreeBSDTapes} of this manual for +{\bf important} information on how to configure your tape drive for +compatibility with Bacula. + +\section{Supported Autochangers} +\index[general]{Autochangers!Supported } +\index[general]{Supported Autochangers } + +For information on supported autochangers, please see the +\ilink{Autochangers Known to Work with Bacula}{Models} +section of the Supported Autochangers chapter of this manual. + +\section{Tape Specifications} +\index[general]{Specifications!Tape} +\index[general]{Tape Specifications} +If you want to know what tape drive to buy that will work with Bacula, +we really cannot tell you. However, we can say that if you are going +to buy a drive, you should try to avoid DDS drives. The technology is +rather old and DDS tape drives need frequent cleaning. DLT drives are +generally much better (newer technology) and do not need frequent +cleaning. + +Below, you will find a table of DLT and LTO tape specifications that will +give you some idea of the capacity and speed of modern tapes. The +capacities that are listed are the native tape capacity without compression. +All modern drives have hardware compression, and manufacturers often list +compressed capacity using a compression ration of 2:1. The actual compression +ratio will depend mostly on the data you have to backup, but I find that +1.5:1 is a much more reasonable number (i.e. multiply the value shown in +the table by 1.5 to get a rough average of what you will probably see). +The transfer rates are rounded to the nearest GB/hr. All values are provided +by various manufacturers. + +The Media Type is what is designated by the manufacturers and you are not +required to use (but you may) the same name in your Bacula conf resources. + + +\begin{tabular}{|c|c|c|c} +Media Type & Drive Type & Media Capacity & Transfer Rate \\ \hline +DDS-1 & DAT & 2 GB & ?? GB/hr \\ \hline +DDS-2 & DAT & 4 GB & ?? GB/hr \\ \hline +DDS-3 & DAT & 12 GB & 5.4 GB/hr \\ \hline +Travan 40 & Travan & 20 GB & ?? GB/hr \\ \hline +DDS-4 & DAT & 20 GB & 11 GB/hr \\ \hline +VXA-1 & Exabyte & 33 GB & 11 GB/hr \\ \hline +DAT-72 & DAT & 36 GB & 13 GB/hr \\ \hline +DLT IV & DLT8000 & 40 GB & 22 GB/hr \\ \hline +VXA-2 & Exabyte & 80 GB & 22 GB/hr \\ \hline +Half-high Ultrium 1 & LTO 1 & 100 GB & 27 GB/hr \\ \hline +Ultrium 1 & LTO 1 & 100 GB & 54 GB/hr \\ \hline +Super DLT 1 & SDLT 220 & 110 GB & 40 GB/hr \\ \hline +VXA-3 & Exabyte & 160 GB & 43 GB/hr \\ \hline +Super DLT I & SDLT 320 & 160 GB & 58 GB/hr \\ \hline +Ultrium 2 & LTO 2 & 200 GB & 108 GB/hr \\ \hline +Super DLT II & SDLT 600 & 300 GB & 127 GB/hr \\ \hline +VXA-4 & Exabyte & 320 GB & 86 GB/hr \\ \hline +Ultrium 3 & LTO 3 & 400 GB & 216 GB/hr \\ \hline +\end{tabular} diff --git a/docs/manuals/en/concepts/supportedoses.tex b/docs/manuals/en/concepts/supportedoses.tex new file mode 100644 index 00000000..797b9eb2 --- /dev/null +++ b/docs/manuals/en/concepts/supportedoses.tex @@ -0,0 +1,48 @@ +%% +%% + +\chapter{Supported Operating Systems} +\label{SupportedOSes} +\index[general]{Systems!Supported Operating } +\index[general]{Supported Operating Systems } + +\begin{itemize} +\item Linux systems (built and tested on CentOS 5). +\item Most flavors of Linux (Gentoo, Red Hat, Fedora, Mandriva, + Debian, OpenSuSE, Ubuntu, Kubuntu, ...). +\item Solaris various versions. +\item FreeBSD (tape driver supported in 1.30 -- for FreeBSD older than + version 5.0, please see some {\bf important} considerations in the + \ilink{ Tape Modes on FreeBSD}{FreeBSDTapes} section of the + Tape Testing chapter of this manual.) +\item Windows (Win98/Me, WinNT/2K/XP, Vista) Client (File daemon) binaries. +\item The Windows servers (Director and Storage daemon) are available + in the binary Client installer. The are reported to work in + many cases. However they are NOT supported. +\item MacOS X/Darwin (see \elink{ http://fink.sourceforge.net/}{http://fink.sourceforge.net/} for + obtaining the packages) +\item OpenBSD Client (File daemon). +\item Irix Client (File daemon). +\item Tru64 +\item Bacula is said to work on other systems (AIX, BSDI, HPUX, NetBSD, ...) but we + do not have first hand knowledge of these systems. +\item RHat 7.2 AS2, AS3, AS4, RHEL5, Fedora Core 2,3,4,5,6,7 SuSE SLES + 7,8,9,10,10.1,10.2,10.3 + and Debian Woody and Sarge Linux on + S/390 and Linux on zSeries. +\item See the Porting chapter of the Bacula Developer's Guide for information + on porting to other systems. + +\item If you have a older Red Hat Linux system running the 2.4.x kernel and + you have the directory {\bf /lib/tls} installed on your system (normally by + default), bacula will {\bf NOT} run. This is the new pthreads library and it + is defective. You must remove this directory prior to running Bacula, or you + can simply change the name to {\bf /lib/tls-broken}) then you must reboot + your machine (one of the few times Linux must be rebooted). If you are not + able to remove/rename /lib/tls, an alternative is to set the environment + variable "LD\_ASSUME\_KERNEL=2.4.19" prior to executing Bacula. For this + option, you do not need to reboot, and all programs other than Bacula will + continue to use /lib/tls. +\item The above mentioned {\bf /lib/tls} problem does not occur with Linux 2.6 kernels. + +\end{itemize} diff --git a/docs/manuals/en/concepts/thanks.tex b/docs/manuals/en/concepts/thanks.tex new file mode 100644 index 00000000..aa324925 --- /dev/null +++ b/docs/manuals/en/concepts/thanks.tex @@ -0,0 +1,102 @@ +%% +%% + +\chapter{Thanks} +\label{ThanksChapter} +\index[general]{Thanks } +I thank everyone who has helped this project. Unfortunately, I cannot +thank everyone (bad memory). However, the AUTHORS file in the main source +code directory should include the names of all persons who have contributed +to the Bacula project. Just the same, I would like to include thanks below +to special contributors as well as to the major contributors to the current +release. + +Thanks to Richard Stallman for starting the Free Software movement and for +bringing us gcc and all the other GNU tools as well as the GPL license. + +Thanks to Linus Torvalds for bringing us Linux. + +Thanks to all the Free Software programmers. Without being able to peek at +your code, and in some cases, take parts of it, this project would have been +much more difficult. + +Thanks to John Walker for suggesting this project, giving it a name, +contributing software he has written, and for his programming efforts on +Bacula as well as having acted as a constant sounding board and source of +ideas. + +Thanks to the apcupsd project where I started my Free Software efforts, and +from which I was able to borrow some ideas and code that I had written. + +Special thanks to D. Scott Barninger for writing the bacula RPM spec file, +building all the RPM files and loading them onto Source Forge. This has been a +tremendous help. + +Many thanks to Karl Cunningham for converting the manual from html format to +LaTeX. It was a major effort flawlessly done that will benefit the Bacula +users for many years to come. Thanks Karl. + +Thanks to Dan Langille for the {\bf incredible} amount of testing he did on +FreeBSD. His perseverance is truly remarkable. Thanks also for the many +contributions he has made to improve Bacula (pthreads patch for FreeBSD, +improved start/stop script and addition of Bacula userid and group, stunnel, +...), his continuing support of Bacula users. He also wrote the PostgreSQL +driver for Bacula and has been a big help in correcting the SQL. + +Thanks to multiple other Bacula Packagers who make and release packages for +different platforms for Bacula. + +Thanks to Christopher Hull for developing the native Win32 Bacula emulation +code and for contributing it to the Bacula project. + +Thanks to Robert Nelson for bringing our Win32 implementation up to par +with all the same features that exist in the Unix/Linux versions. In +addition, he has ported the Director and Storage daemon to Win32! + +Thanks to Thorsten Engel for his excellent knowledge of Win32 systems, and +for making the Win32 File daemon Unicode compatible, as well as making +the Win32 File daemon interface to Microsoft's Volume Shadow Copy (VSS). +These two are big pluses for Bacula! + +Thanks to Landon Fuller for writing both the communications and the +data encryption code for Bacula. + +Thanks to Arno Lehmann for his excellent and infatigable help and advice +to users. + +Thanks to all the Bacula users, especially those of you who have contributed +ideas, bug reports, patches, and new features. + +Bacula can be enabled with data encryption and/or communications +encryption. If this is the case, you will be including OpenSSL code that +that contains cryptographic software written by Eric Young +(eay@cryptsoft.com) and also software written by Tim Hudson +(tjh@cryptsoft.com). + +The Bat (Bacula Administration Tool) graphs are based in part on the work +of the Qwt project (http://qwt.sf.net). + +The original variable expansion code used in the LabelFormat comes from the +Open Source Software Project (www.ossp.org). It has been adapted and extended +for use in Bacula. This code is now deprecated. + +There have been numerous people over the years who have contributed ideas, +code, and help to the Bacula project. The file AUTHORS in the main source +release file contains a list of contributors. For all those who I have +left out, please send me a reminder, and in any case, thanks for your +contribution. + +Thanks to the Free Software Foundation Europe e.V. for assuming the +responsibilities of protecting the Bacula copyright. + +% TODO: remove this from the book? +\section*{Copyrights and Trademarks} +\index[general]{Trademarks!Copyrights and } +\index[general]{Copyrights and Trademarks } + +Certain words and/or products are Copyrighted or Trademarked such as Windows +(by Microsoft). Since they are numerous, and we are not necessarily aware of +the details of each, we don't try to list them here. However, we acknowledge +all such Copyrights and Trademarks, and if any copyright or trademark holder +wishes a specific acknowledgment, notify us, and we will be happy to add it +where appropriate. diff --git a/docs/manuals/en/concepts/tls.tex b/docs/manuals/en/concepts/tls.tex new file mode 100644 index 00000000..6c90e110 --- /dev/null +++ b/docs/manuals/en/concepts/tls.tex @@ -0,0 +1,315 @@ + +\chapter{Bacula TLS -- Communications Encryption} +\label{CommEncryption} +\index[general]{TLS -- Communications Encryption} +\index[general]{Communications Encryption} +\index[general]{Encryption!Communications} +\index[general]{Encryption!Transport} +\index[general]{Transport Encryption} +\index[general]{TLS} + +Bacula TLS (Transport Layer Security) is built-in network +encryption code to provide secure network transport similar to +that offered by {\bf stunnel} or {\bf ssh}. The data written to +Volumes by the Storage daemon is not encrypted by this code. +For data encryption, please see the \ilink{Data Encryption +Chapter}{DataEncryption} of this manual. + +The Bacula encryption implementations were written by Landon Fuller. + +Supported features of this code include: +\begin{itemize} +\item Client/Server TLS Requirement Negotiation +\item TLSv1 Connections with Server and Client Certificate +Validation +\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying +\end{itemize} + +This document will refer to both "server" and "client" contexts. These +terms refer to the accepting and initiating peer, respectively. + +Diffie-Hellman anonymous ciphers are not supported by this code. The +use of DH anonymous ciphers increases the code complexity and places +explicit trust upon the two-way CRAM-MD5 implementation. CRAM-MD5 is +subject to known plaintext attacks, and it should be considered +considerably less secure than PKI certificate-based authentication. + +Appropriate autoconf macros have been added to detect and use OpenSSL +if enabled on the {\bf ./configure} line with {\bf \verb?--?with-openssl} + +\section{TLS Configuration Directives} +Additional configuration directives have been added to all the daemons +(Director, File daemon, and Storage daemon) as well as the various +different Console programs. +These new directives are defined as follows: + +\begin{description} +\item [TLS Enable = \lt{}yes|no\gt{}] +Enable TLS support. If TLS is not enabled, none of the other TLS directives +have any effect. In other words, even if you set {\bf TLS Require = yes} +you need to have TLS enabled or TLS will not be used. + +\item [TLS Require = \lt{}yes|no\gt{}] +Require TLS connections. This directive is ignored unless {\bf TLS Enable} +is set to {\bf yes}. If TLS is not required, and TLS is enabled, then +Bacula will connect with other daemons either with or without TLS depending +on what the other daemon requests. If TLS is enabled and TLS is required, +then Bacula will refuse any connection that does not use TLS. + +\item [TLS Certificate = \lt{}Filename\gt{}] +The full path and filename of a PEM encoded TLS certificate. It can be +used as either a client or server certificate. PEM stands for Privacy +Enhanced Mail, but in this context refers to how the certificates are +encoded. It is used because PEM files are base64 encoded and hence ASCII +text based rather than binary. They may also contain encrypted +information. + +\item [TLS Key = \lt{}Filename\gt{}] +The full path and filename of a PEM encoded TLS private key. It must +correspond to the TLS certificate. + +\item [TLS Verify Peer = \lt{}yes|no\gt{}] +Verify peer certificate. Instructs server to request and verify the +client's x509 certificate. Any client certificate signed by a known-CA +will be accepted unless the TLS Allowed CN configuration directive is used, +in which case the client certificate must correspond to the Allowed +Common Name specified. This directive is valid only for a server +and not in a client context. + +\item [TLS Allowed CN = \lt{}string list\gt{}] +Common name attribute of allowed peer certificates. If this directive is +specified, all server certificates will be verified against this list. This +can be used to ensure that only the CA-approved Director may connect. +This directive may be specified more than once. + +\item [TLS CA Certificate File = \lt{}Filename\gt{}] +The full path and filename specifying a +PEM encoded TLS CA certificate(s). Multiple certificates are +permitted in the file. One of \emph{TLS CA Certificate File} or \emph{TLS +CA Certificate Dir} are required in a server context if \emph{TLS +Verify Peer} (see above) is also specified, and are always required in a client +context. + +\item [TLS CA Certificate Dir = \lt{}Directory\gt{}] +Full path to TLS CA certificate directory. In the current implementation, +certificates must be stored PEM encoded with OpenSSL-compatible hashes, +which is the subject name's hash and an extension of {bf .0}. +One of \emph{TLS CA Certificate File} or \emph{TLS CA Certificate Dir} are +required in a server context if \emph{TLS Verify Peer} is also specified, +and are always required in a client context. + +\item [TLS DH File = \lt{}Directory\gt{}] +Path to PEM encoded Diffie-Hellman parameter file. If this directive is +specified, DH key exchange will be used for the ephemeral keying, allowing +for forward secrecy of communications. DH key exchange adds an additional +level of security because the key used for encryption/decryption by the +server and the client is computed on each end and thus is never passed over +the network if Diffie-Hellman key exchange is used. Even if DH key +exchange is not used, the encryption/decryption key is always passed +encrypted. This directive is only valid within a server context. + +To generate the parameter file, you +may use openssl: + +\begin{verbatim} + openssl dhparam -out dh1024.pem -5 1024 +\end{verbatim} + +\end{description} + +\section{Creating a Self-signed Certificate} +\index[general]{Creating a Self-signed Certificate } +\index[general]{Certificate!Creating a Self-signed } + +You may create a self-signed certificate for use with the Bacula TLS that +will permit you to make it function, but will not allow certificate +validation. The .pem file containing both the certificate and the key +valid for ten years can be made with the following: + +\footnotesize +\begin{verbatim} + openssl req -new -x509 -nodes -out bacula.pem -keyout bacula.pem -days 3650 +\end{verbatim} +\normalsize + +The above script will ask you a number of questions. You may simply answer +each of them by entering a return, or if you wish you may enter your own data. + +Note, however, that self-signed certificates will only work for the +outgoing end of connections. For example, in the case of the Director +making a connection to a File Daemon, the File Daemon may be configured to +allow self-signed certificates, but the certificate used by the +Director must be signed by a certificate that is explicitly trusted on the +File Daemon end. + +This is necessary to prevent ``man in the middle'' attacks from tools such +as \elink{ettercap}{http://ettercap.sourceforge.net/}. Essentially, if the +Director does not verify that it is talking to a trusted remote endpoint, +it can be tricked into talking to a malicious 3rd party who is relaying and +capturing all traffic by presenting its own certificates to the Director +and File Daemons. The only way to prevent this is by using trusted +certificates, so that the man in the middle is incapable of spoofing the +connection using his own. + +To get a trusted certificate (CA or Certificate Authority signed +certificate), you will either need to purchase certificates signed by a +commercial CA or find a friend that has setup his own CA or become a CA +yourself, and thus you can sign all your own certificates. The book +OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly explains +how to do it, or you can read the documentation provided in the Open-source +PKI Book project at Source Forge: \elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +The program TinyCA has a very nice Graphical User Interface +that allows you to easily setup and maintain your own CA. +TinyCA can be found at +\elink{http://tinyca.sm-zone.net/}{http://tinyca.sm-zone.net/}. + + +\section{Getting a CA Signed Certificate} +\index[general]{Certificate!Getting a CA Signed } +\index[general]{Getting a CA Signed Certificate } + +The process of getting a certificate that is signed by a CA is quite a bit +more complicated. You can purchase one from quite a number of PKI vendors, but +that is not at all necessary for use with Bacula. To get a CA signed +certificate, you will either need to find a friend that has setup his own CA +or to become a CA yourself, and thus you can sign all your own certificates. +The book OpenSSL by John Viega, Matt Mesier \& Pravir Chandra from O'Reilly +explains how to do it, or you can read the documentation provided in the +Open-source PKI Book project at Source Forge: +\elink{ +http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm} +{http://ospkibook.sourceforge.net/docs/OSPKI-2.4.7/OSPKI-html/ospki-book.htm}. +Note, this link may change. + +\section{Example TLS Configuration Files} +\index[general]{Example!TLS Configuration Files} +\index[general]{TLS Configuration Files} + +Landon has supplied us with the TLS portions of his configuration +files, which should help you setting up your own. Note, this example +shows the directives necessary for a Director to Storage daemon session. +The technique is the same between the Director and the Client and +for bconsole to the Director. + +{\bf bacula-dir.conf} +\footnotesize +\begin{verbatim} + Director { # define myself + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + TLS Verify Peer = yes + TLS Allowed CN = "bacula@backup1.example.com" + TLS Allowed CN = "administrator@example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate, used for incoming + # console connections. + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } + + Storage { + Name = File + Address = backup1.example.com + ... + TLS Require = yes + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a client certificate, used by the director to + # connect to the storage daemon + TLS Certificate = /usr/local/etc/ssl/bacula@backup1/cert.pem + TLS Key = /usr/local/etc/ssl/bacula@backup1/key.pem + } + + Client { + Name = backup1-fd + Address = server1.example.com + ... + + TLS Enable = yes + TLS Require = yes + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + } + +\end{verbatim} +\normalsize + +{\bf bacula-fd.conf} +\footnotesize +\begin{verbatim} + Director { + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + TLS Verify Peer = yes + # Allow only the Director to connect + TLS Allowed CN = "bacula@backup1.example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by connecting + # directors to verify the authenticity of this file daemon + TLS Certificate = /usr/local/etc/ssl/server1/cert.pem + TLS Key = /usr/local/etc/ssl/server1/key.pem + } + + FileDaemon { + Name = backup1-fd + ... + # you need these TLS entries so the SD and FD can + # communicate + TLS Enable = yes + TLS Require = yes + + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + TLS Certificate = /usr/local/etc/ssl/server1/cert.pem + TLS Key = /usr/local/etc/ssl/server1/key.pem +} +\end{verbatim} +\normalsize + +{\bf bacula-sd.conf} +\footnotesize +\begin{verbatim} + Storage { # definition of myself + Name = backup1-sd + ... + # These TLS configuration options are used for incoming + # file daemon connections. Director TLS settings are handled + # below. + TLS Enable = yes + TLS Require = yes + # Peer certificate is not required/requested -- peer validity + # is verified by the storage connection cookie provided to the + # File Daemon by the director. + TLS Verify Peer = no + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by connecting + # file daemons to verify the authenticity of this storage daemon + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } + + # + # List Directors who are permitted to contact Storage daemon + # + Director { + Name = backup1-dir + ... + TLS Enable = yes + TLS Require = yes + # Require the connecting director to provide a certificate + # with the matching CN. + TLS Verify Peer = yes + TLS Allowed CN = "bacula@backup1.example.com" + TLS CA Certificate File = /usr/local/etc/ssl/ca.pem + # This is a server certificate. It is used by the connecting + # director to verify the authenticity of this storage daemon + TLS Certificate = /usr/local/etc/ssl/backup1/cert.pem + TLS Key = /usr/local/etc/ssl/backup1/key.pem + } +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/concepts/translate_images.pl b/docs/manuals/en/concepts/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/concepts/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/concepts/tutorial.tex b/docs/manuals/en/concepts/tutorial.tex new file mode 100644 index 00000000..7062a978 --- /dev/null +++ b/docs/manuals/en/concepts/tutorial.tex @@ -0,0 +1,1373 @@ +%% +%% + +\chapter{A Brief Tutorial} +\label{TutorialChapter} +\index[general]{Brief Tutorial } +\index[general]{Tutorial!Brief } + +This chapter will guide you through running Bacula. To do so, we assume you +have installed Bacula, possibly in a single file as shown in the previous +chapter, in which case, you can run Bacula as non-root for these tests. +However, we assume that you have not changed the .conf files. If you have +modified the .conf files, please go back and uninstall Bacula, then reinstall +it, but do not make any changes. The examples in this chapter use the default +configuration files, and will write the volumes to disk in your {\bf /tmp} +directory, in addition, the data backed up will be the source directory where +you built Bacula. As a consequence, you can run all the Bacula daemons for +these tests as non-root. Please note, in production, your File daemon(s) must +run as root. See the Security chapter for more information on this subject. + +% TODO: use crossreferences above +% TODO: add a section here + +The general flow of running Bacula is: + +\begin{enumerate} +\item cd \lt{}install-directory\gt{} +\item Start the Database (if using MySQL or PostgreSQL) +\item Start the Daemons with {\bf ./bacula start} +\item Start the Console program to interact with the Director +\item Run a job +\item When the Volume fills, unmount the Volume, if it is a tape, label a new + one, and continue running. In this chapter, we will write only to disk files + so you won't need to worry about tapes for the moment. +\item Test recovering some files from the Volume just written to ensure the + backup is good and that you know how to recover. Better test before disaster + strikes +\item Add a second client. + \end{enumerate} + +Each of these steps is described in more detail below. + +\section{Before Running Bacula} +\index[general]{Bacula!Before Running } +\index[general]{Before Running Bacula } + +% TODO: some of this content is already covered once or twice critical +% TODO: or quickstart. Consolidate! + +Before running Bacula for the first time in production, we recommend that you +run the {\bf test} command in the {\bf btape} program as described in the +\ilink{Utility Program Chapter}{btape} of this manual. This will +help ensure that Bacula functions correctly with your tape drive. If you have +a modern HP, Sony, or Quantum DDS or DLT tape drive running on Linux or +Solaris, you can probably skip this test as Bacula is well tested with these +drives and systems. For all other cases, you are {\bf strongly} encouraged to +run the test before continuing. {\bf btape} also has a {\bf fill} command that +attempts to duplicate what Bacula does when filling a tape and writing on the +next tape. You should consider trying this command as well, but be forewarned, +it can take hours (about four hours on my drive) to fill a large capacity tape. + +\section{Starting the Database} +\label{StartDB} +\index[general]{Starting the Database } +\index[general]{Database!Starting the } + +If you are using MySQL or PostgreSQL as the Bacula database, you should start +it before you attempt to run a job to avoid getting error messages from Bacula +when it starts. The scripts {\bf startmysql} and {\bf stopmysql} are what I +(Kern) use to start and stop my local MySQL. Note, if you are using SQLite, +you will not want to use {\bf startmysql} or {\bf stopmysql}. If you are +running this in production, you will probably want to find some way to +automatically start MySQL or PostgreSQL after each system reboot. + +If you are using SQLite (i.e. you specified the {\bf \verb:--:with-sqlite=xxx} option +on the {\bf ./configure} command, you need do nothing. SQLite is automatically +started by {\bf Bacula}. + +\section{Starting the Daemons} +\label{StartDaemon} +\index[general]{Starting the Daemons } +\index[general]{Daemons!Starting the } + +Assuming you have built from source or have installed the rpms, +to start the three daemons, from your installation directory, simply enter: + +./bacula start + +The {\bf bacula} script starts the Storage daemon, the File daemon, and the +Director daemon, which all normally run as daemons in the background. If you +are using the autostart feature of Bacula, your daemons will either be +automatically started on reboot, or you can control them individually with the +files {\bf bacula-dir}, {\bf bacula-fd}, and {\bf bacula-sd}, which are +usually located in {\bf /etc/init.d}, though the actual location is system +dependent. +Some distributions may do this differently. + +Note, on Windows, currently only the File daemon is ported, and it must be +started differently. Please see the +\ilink{Windows Version of Bacula}{Win32Chapter} Chapter of this +manual. + +The rpm packages configure the daemons to run as user=root and group=bacula. +The rpm installation also creates the group bacula if it does not exist on the +system. Any users that you add to the group bacula will have access to files +created by the daemons. To disable or alter this behavior edit the daemon +startup scripts: + +\begin{itemize} +\item /etc/bacula/bacula +\item /etc/init.d/bacula-dir +\item /etc/init.d/bacula-sd +\item /etc/init.d/bacula-fd + \end{itemize} + +and then restart as noted above. + +The +\ilink{installation chapter}{InstallChapter} of this manual +explains how you can install scripts that will automatically restart the +daemons when the system starts. + +\section{Using the Director to Query and Start Jobs} +\index[general]{Jobs!Querying or Starting Jobs} +\index[general]{Querying or starting Jobs} +% TODO: section name is too long; maybe use "Using the Console Program" ?? + +To communicate with the director and to query the state of Bacula or run jobs, +from the top level directory, simply enter: + +./bconsole + +Alternatively to running the command line console, if you have +Qt4 installed and used the {\bf \verb:--:enable-bat} on the configure command, +you may use the Bacula Administration Tool ({\bf bat}): + +./bat + +Which has a graphical interface, and many more features than bconsole. + +Two other possibilities are to run the GNOME console +{\bf bgnome-console} or the wxWidgets program {\bf bwx-console}. + +For simplicity, here we will describe only the {\bf ./bconsole} program. Most +of what is described here applies equally well to {\bf ./bat}, +{\bf ./bgnome-console}, and to {\bf bwx-console}. + +The {\bf ./bconsole} runs the Bacula Console program, which connects to the +Director daemon. Since Bacula is a network program, you can run the Console +program anywhere on your network. Most frequently, however, one runs it on the +same machine as the Director. Normally, the Console program will print +something similar to the following: + +\footnotesize +\begin{verbatim} +[kern@polymatou bin]$ ./bconsole +Connecting to Director lpmatou:9101 +1000 OK: HeadMan Version: 2.1.8 (14 May 2007) +* +\end{verbatim} +\normalsize + +the asterisk is the console command prompt. + +Type {\bf help} to see a list of available commands: + +\footnotesize +\begin{verbatim} +*help + Command Description + ======= =========== + add add media to a pool + autodisplay autodisplay [on|off] -- console messages + automount automount [on|off] -- after label + cancel cancel [ | ] -- cancel a job + create create DB Pool from resource + delete delete [pool= | media volume=] + disable disable -- disable a job + enable enable -- enable a job + estimate performs FileSet estimate, listing gives full listing + exit exit = quit + gui gui [on|off] -- non-interactive gui mode + help print this command + list list [pools | jobs | jobtotals | media | +files ]; from catalog + label label a tape + llist full or long list like list command + memory print current memory usage + messages messages + mount mount + prune prune expired records from catalog + purge purge records from catalog + python python control commands + quit quit + query query catalog + restore restore files + relabel relabel a tape + release release + reload reload conf file + run run + status status [storage | client]= + setdebug sets debug level + setip sets new client address -- if authorized + show show (resource records) [jobs | pools | ... | all] + sqlquery use SQL to query catalog + time print current time + trace turn on/off trace to file + unmount unmount + umount umount for old-time Unix guys + update update Volume, Pool or slots + use use catalog xxx + var does variable expansion + version print Director version + wait wait until no jobs are running [ | | ] +* +\end{verbatim} +\normalsize + +Details of the console program's commands are explained in the +\ilink{Console Chapter}{_ConsoleChapter} of this manual. + +\section{Running a Job} +\label{Running} +\index[general]{Job!Running a } +\index[general]{Running a Job } + +At this point, we assume you have done the following: + +\begin{itemize} +\item Configured Bacula with {\bf ./configure \verb:--:your-options} +\item Built Bacula using {\bf make} +\item Installed Bacula using {\bf make install} +\item Have created your database with, for example, {\bf + ./create\_sqlite\_database} +\item Have created the Bacula database tables with, {\bf + ./make\_bacula\_tables} +\item Have possibly edited your {\bf bacula-dir.conf} file to personalize it + a bit. BE CAREFUL! if you change the Director's name or password, you will + need to make similar modifications in the other .conf files. For the moment + it is probably better to make no changes. +\item You have started Bacula with {\bf ./bacula start} +\item You have invoked the Console program with {\bf ./bconsole} +\end{itemize} + +Furthermore, we assume for the moment you are using the default configuration +files. + +At this point, enter the following command: + +\footnotesize +\begin{verbatim} +show filesets +\end{verbatim} +\normalsize + +and you should get something similar to: + +\footnotesize +\begin{verbatim} +FileSet: name=Full Set + O M + N + I /home/kern/bacula/regress/build + N + E /proc + E /tmp + E /.journal + E /.fsck + N +FileSet: name=Catalog + O M + N + I /home/kern/bacula/regress/working/bacula.sql + N +\end{verbatim} +\normalsize + +This is a pre-defined {\bf FileSet} that will backup the Bacula source +directory. The actual directory names printed should correspond to your system +configuration. For testing purposes, we have chosen a directory of moderate +size (about 40 Megabytes) and complexity without being too big. The FileSet +{\bf Catalog} is used for backing up Bacula's catalog and is not of interest +to us for the moment. The {\bf I} entries are the files or directories that +will be included in the backup and the {\bf E} are those that will be +excluded, and the {\bf O} entries are the options specified for +the FileSet. You can change what is backed up by editing {\bf bacula-dir.conf} +and changing the {\bf File =} line in the {\bf FileSet} resource. + +Now is the time to run your first backup job. We are going to backup your +Bacula source directory to a File Volume in your {\bf /tmp} directory just to +show you how easy it is. Now enter: + +\footnotesize +\begin{verbatim} +status dir +\end{verbatim} +\normalsize + +and you should get the following output: + +\footnotesize +\begin{verbatim} +rufus-dir Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Console connected at 28-Apr-2003 14:03 +No jobs are running. +Level Type Scheduled Name +================================================================= +Incremental Backup 29-Apr-2003 01:05 Client1 +Full Backup 29-Apr-2003 01:10 BackupCatalog +==== +\end{verbatim} +\normalsize + +where the times and the Director's name will be different according to your +setup. This shows that an Incremental job is scheduled to run for the Job {\bf +Client1} at 1:05am and that at 1:10, a {\bf BackupCatalog} is scheduled to +run. Note, you should probably change the name {\bf Client1} to be the name of +your machine, if not, when you add additional clients, it will be very +confusing. For my real machine, I use {\bf Rufus} rather than {\bf Client1} as +in this example. + +Now enter: + +\footnotesize +\begin{verbatim} +status client +\end{verbatim} +\normalsize + +and you should get something like: + +\footnotesize +\begin{verbatim} +The defined Client resources are: + 1: rufus-fd +Item 1 selected automatically. +Connecting to Client rufus-fd at rufus:8102 +rufus-fd Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Director connected at: 28-Apr-2003 14:14 +No jobs running. +==== +\end{verbatim} +\normalsize + +In this case, the client is named {\bf rufus-fd} your name will be different, +but the line beginning with {\bf rufus-fd Version ...} is printed by your File +daemon, so we are now sure it is up and running. + +Finally do the same for your Storage daemon with: + +\footnotesize +\begin{verbatim} +status storage +\end{verbatim} +\normalsize + +and you should get: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File +Item 1 selected automatically. +Connecting to Storage daemon File at rufus:8103 +rufus-sd Version: 1.30 (28 April 2003) +Daemon started 28-Apr-2003 14:03, 0 Jobs run. +Device /tmp is not open. +No jobs running. +==== +\end{verbatim} +\normalsize + +You will notice that the default Storage daemon device is named {\bf File} and +that it will use device {\bf /tmp}, which is not currently open. + +Now, let's actually run a job with: + +\footnotesize +\begin{verbatim} +run +\end{verbatim} +\normalsize + +you should get the following output: + +\footnotesize +\begin{verbatim} +Using default Catalog name=MyCatalog DB=bacula +A job name must be specified. +The defined Job resources are: + 1: Client1 + 2: BackupCatalog + 3: RestoreFiles +Select Job resource (1-3): +\end{verbatim} +\normalsize + +Here, Bacula has listed the three different Jobs that you can run, and you +should choose number {\bf 1} and type enter, at which point you will get: + +\footnotesize +\begin{verbatim} +Run Backup job +JobName: Client1 +FileSet: Full Set +Level: Incremental +Client: rufus-fd +Storage: File +Pool: Default +When: 2003-04-28 14:18:57 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +At this point, take some time to look carefully at what is printed and +understand it. It is asking you if it is OK to run a job named {\bf Client1} +with FileSet {\bf Full Set} (we listed above) as an Incremental job on your +Client (your client name will be different), and to use Storage {\bf File} and +Pool {\bf Default}, and finally, it wants to run it now (the current time +should be displayed by your console). + +Here we have the choice to run ({\bf yes}), to modify one or more of the above +parameters ({\bf mod}), or to not run the job ({\bf no}). Please enter {\bf +yes}, at which point you should immediately get the command prompt (an +asterisk). If you wait a few seconds, then enter the command {\bf messages} +you will get back something like: + +\footnotesize +\begin{verbatim} +28-Apr-2003 14:22 rufus-dir: Last FULL backup time not found. Doing + FULL backup. +28-Apr-2003 14:22 rufus-dir: Start Backup JobId 1, + Job=Client1.2003-04-28_14.22.33 +28-Apr-2003 14:22 rufus-sd: Job Client1.2003-04-28_14.22.33 waiting. + Cannot find any appendable volumes. +Please use the "label" command to create a new Volume for: + Storage: FileStorage + Media type: File + Pool: Default +\end{verbatim} +\normalsize + +The first message, indicates that no previous Full backup was done, so Bacula +is upgrading our Incremental job to a Full backup (this is normal). The second +message indicates that the job started with JobId 1., and the third message +tells us that Bacula cannot find any Volumes in the Pool for writing the +output. This is normal because we have not yet created (labeled) any Volumes. +Bacula indicates to you all the details of the volume it needs. + +At this point, the job is BLOCKED waiting for a Volume. You can check this if +you want by doing a {\bf status dir}. In order to continue, we must create a +Volume that Bacula can write on. We do so with: + +\footnotesize +\begin{verbatim} +label +\end{verbatim} +\normalsize + +and Bacula will print: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File +Item 1 selected automatically. +Enter new Volume name: +\end{verbatim} +\normalsize + +at which point, you should enter some name beginning with a letter and +containing only letters and numbers (period, hyphen, and underscore) are also +permitted. For example, enter {\bf TestVolume001}, and you should get back: + +\footnotesize +\begin{verbatim} +Defined Pools: + 1: Default +Item 1 selected automatically. +Connecting to Storage daemon File at rufus:8103 ... +Sending label command for Volume "TestVolume001" Slot 0 ... +3000 OK label. Volume=TestVolume001 Device=/tmp +Catalog record for Volume "TestVolume002", Slot 0 successfully created. +Requesting mount FileStorage ... +3001 OK mount. Device=/tmp +\end{verbatim} +\normalsize + +Finally, enter {\bf messages} and you should get something like: + +\footnotesize +\begin{verbatim} +28-Apr-2003 14:30 rufus-sd: Wrote label to prelabeled Volume + "TestVolume001" on device /tmp +28-Apr-2003 14:30 rufus-dir: Bacula 1.30 (28Apr03): 28-Apr-2003 14:30 +JobId: 1 +Job: Client1.2003-04-28_14.22.33 +FileSet: Full Set +Backup Level: Full +Client: rufus-fd +Start time: 28-Apr-2003 14:22 +End time: 28-Apr-2003 14:30 +Files Written: 1,444 +Bytes Written: 38,988,877 +Rate: 81.2 KB/s +Software Compression: None +Volume names(s): TestVolume001 +Volume Session Id: 1 +Volume Session Time: 1051531381 +Last Volume Bytes: 39,072,359 +FD termination status: OK +SD termination status: OK +Termination: Backup OK +28-Apr-2003 14:30 rufus-dir: Begin pruning Jobs. +28-Apr-2003 14:30 rufus-dir: No Jobs found to prune. +28-Apr-2003 14:30 rufus-dir: Begin pruning Files. +28-Apr-2003 14:30 rufus-dir: No Files found to prune. +28-Apr-2003 14:30 rufus-dir: End auto prune. +\end{verbatim} +\normalsize + +If you don't see the output immediately, you can keep entering {\bf messages} +until the job terminates, or you can enter, {\bf autodisplay on} and your +messages will automatically be displayed as soon as they are ready. + +If you do an {\bf ls -l} of your {\bf /tmp} directory, you will see that you +have the following item: + +\footnotesize +\begin{verbatim} +-rw-r----- 1 kern kern 39072153 Apr 28 14:30 TestVolume001 +\end{verbatim} +\normalsize + +This is the file Volume that you just wrote and it contains all the data of +the job just run. If you run additional jobs, they will be appended to this +Volume unless you specify otherwise. + +You might ask yourself if you have to label all the Volumes that Bacula is +going to use. The answer for disk Volumes, like the one we used, is no. It is +possible to have Bacula automatically label volumes. For tape Volumes, you +will most likely have to label each of the Volumes you want to use. + +If you would like to stop here, you can simply enter {\bf quit} in the Console +program, and you can stop Bacula with {\bf ./bacula stop}. To clean up, simply +delete the file {\bf /tmp/TestVolume001}, and you should also re-initialize +your database using: + +\footnotesize +\begin{verbatim} +./drop_bacula_tables +./make_bacula_tables +\end{verbatim} +\normalsize + +Please note that this will erase all information about the previous jobs that +have run, and that you might want to do it now while testing but that normally +you will not want to re-initialize your database. + +If you would like to try restoring the files that you just backed up, read the +following section. +\label{restoring} + +\section{Restoring Your Files} +\index[general]{Files!Restoring Your } +\index[general]{Restoring Your Files } + +If you have run the default configuration and the save of the Bacula source +code as demonstrated above, you can restore the backed up files in the Console +program by entering: + +\footnotesize +\begin{verbatim} +restore all +\end{verbatim} +\normalsize + +where you will get: + +\footnotesize +\begin{verbatim} +First you select one or more JobIds that contain files +to be restored. You will be presented several methods +of specifying the JobIds. Then you will be allowed to +select which files from those JobIds are to be restored. + +To select the JobIds, you have the following choices: + 1: List last 20 Jobs run + 2: List Jobs where a given File is saved + 3: Enter list of comma separated JobIds to select + 4: Enter SQL list command + 5: Select the most recent backup for a client + 6: Select backup for a client before a specified time + 7: Enter a list of files to restore + 8: Enter a list of files to restore before a specified time + 9: Find the JobIds of the most recent backup for a client + 10: Find the JobIds for a backup for a client before a specified time + 11: Enter a list of directories to restore for found JobIds + 12: Cancel +Select item: (1-12): +\end{verbatim} +\normalsize + +As you can see, there are a number of options, but for the current +demonstration, please enter {\bf 5} to do a restore of the last backup you +did, and you will get the following output: + +\footnotesize +\begin{verbatim} +Defined Clients: + 1: rufus-fd +Item 1 selected automatically. +The defined FileSet resources are: + 1: 1 Full Set 2003-04-28 14:22:33 +Item 1 selected automatically. ++-------+-------+----------+---------------------+---------------+ +| JobId | Level | JobFiles | StartTime | VolumeName | ++-------+-------+----------+---------------------+---------------+ +| 1 | F | 1444 | 2003-04-28 14:22:33 | TestVolume002 | ++-------+-------+----------+---------------------+---------------+ +You have selected the following JobId: 1 +Building directory tree for JobId 1 ... +1 Job inserted into the tree and marked for extraction. +The defined Storage resources are: + 1: File +Item 1 selected automatically. +You are now entering file selection mode where you add and +remove files to be restored. All files are initially added. +Enter "done" to leave this mode. +cwd is: / +$ +\end{verbatim} +\normalsize + +where I have truncated the listing on the right side to make it more readable. +As you can see by starting at the top of the listing, Bacula knows what client +you have, and since there was only one, it selected it automatically, likewise +for the FileSet. Then Bacula produced a listing containing all the jobs that +form the current backup, in this case, there is only one, and the Storage +daemon was also automatically chosen. Bacula then took all the files that were +in Job number 1 and entered them into a {\bf directory tree} (a sort of in +memory representation of your filesystem). At this point, you can use the {\bf +cd} and {\bf ls} ro {\bf dir} commands to walk up and down the directory tree +and view what files will be restored. For example, if I enter {\bf cd +/home/kern/bacula/bacula-1.30} and then enter {\bf dir} I will get a listing +of all the files in the Bacula source directory. On your system, the path will +be somewhat different. For more information on this, please refer to the +\ilink{Restore Command Chapter}{RestoreChapter} of this manual for +more details. + +To exit this mode, simply enter: + +\footnotesize +\begin{verbatim} +done +\end{verbatim} +\normalsize + +and you will get the following output: + +\footnotesize +\begin{verbatim} +Bootstrap records written to + /home/kern/bacula/testbin/working/restore.bsr +The restore job will require the following Volumes: + + TestVolume001 +1444 files selected to restore. +Run Restore job +JobName: RestoreFiles +Bootstrap: /home/kern/bacula/testbin/working/restore.bsr +Where: /tmp/bacula-restores +Replace: always +FileSet: Full Set +Backup Client: rufus-fd +Restore Client: rufus-fd +Storage: File +JobId: *None* +When: 2005-04-28 14:53:54 +OK to run? (yes/mod/no): +\end{verbatim} +\normalsize + +If you answer {\bf yes} your files will be restored to {\bf +/tmp/bacula-restores}. If you want to restore the files to their original +locations, you must use the {\bf mod} option and explicitly set {\bf Where:} +to nothing (or to /). We recommend you go ahead and answer {\bf yes} and after +a brief moment, enter {\bf messages}, at which point you should get a listing +of all the files that were restored as well as a summary of the job that looks +similar to this: + +\footnotesize +\begin{verbatim} +28-Apr-2005 14:56 rufus-dir: Bacula 2.1.8 (08May07): 08-May-2007 14:56:06 +Build OS: i686-pc-linux-gnu suse 10.2 +JobId: 2 +Job: RestoreFiles.2007-05-08_14.56.06 +Restore Client: rufus-fd +Start time: 08-May-2007 14:56 +End time: 08-May-2007 14:56 +Files Restored: 1,444 +Bytes Restored: 38,816,381 +Rate: 9704.1 KB/s +FD Errors: 0 +FD termination status: OK +SD termination status: OK +Termination: Restore OK +08-May-2007 14:56 rufus-dir: Begin pruning Jobs. +08-May-2007 14:56 rufus-dir: No Jobs found to prune. +08-May-2007 14:56 rufus-dir: Begin pruning Files. +08-May-2007 14:56 rufus-dir: No Files found to prune. +08-May-2007 14:56 rufus-dir: End auto prune. +\end{verbatim} +\normalsize + +After exiting the Console program, you can examine the files in {\bf +/tmp/bacula-restores}, which will contain a small directory tree with all the +files. Be sure to clean up at the end with: + +\footnotesize +\begin{verbatim} +rm -rf /tmp/bacula-restore +\end{verbatim} +\normalsize + +\section{Quitting the Console Program} +\index[general]{Program!Quitting the Console } +\index[general]{Quitting the Console Program } + +Simply enter the command {\bf quit}. +\label{SecondClient} + +\section{Adding a Second Client} +\index[general]{Client!Adding a Second } +\index[general]{Adding a Second Client } + +If you have gotten the example shown above to work on your system, you may be +ready to add a second Client (File daemon). That is you have a second machine +that you would like backed up. The only part you need installed on the other +machine is the binary {\bf bacula-fd} (or {\bf bacula-fd.exe} for Windows) and +its configuration file {\bf bacula-fd.conf}. You can start with the same {\bf +bacula-fd.conf} file that you are currently using and make one minor +modification to it to create the conf file for your second client. Change the +File daemon name from whatever was configured, {\bf rufus-fd} in the example +above, but your system will have a different name. The best is to change it to +the name of your second machine. For example: + +\footnotesize +\begin{verbatim} +... +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = rufus-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /home/kern/bacula/working + Pid Directory = /var/run +} +... +\end{verbatim} +\normalsize + +would become: + +\footnotesize +\begin{verbatim} +... +# +# "Global" File daemon configuration specifications +# +FileDaemon { # this is me + Name = matou-fd + FDport = 9102 # where we listen for the director + WorkingDirectory = /home/kern/bacula/working + Pid Directory = /var/run +} +... +\end{verbatim} +\normalsize + +where I show just a portion of the file and have changed {\bf rufus-fd} to +{\bf matou-fd}. The names you use are your choice. For the moment, I recommend +you change nothing else. Later, you will want to change the password. + +Now you should install that change on your second machine. Then you need to +make some additions to your Director's configuration file to define the new +File daemon or Client. Starting from our original example which should be +installed on your system, you should add the following lines (essentially +copies of the existing data but with the names changed) to your Director's +configuration file {\bf bacula-dir.conf}. + +\footnotesize +\begin{verbatim} +# +# Define the main nightly save backup job +# By default, this job will back up to disk in /tmp +Job { + Name = "Matou" + Type = Backup + Client = matou-fd + FileSet = "Full Set" + Schedule = "WeeklyCycle" + Storage = File + Messages = Standard + Pool = Default + Write Bootstrap = "/home/kern/bacula/working/matou.bsr" +} +# Client (File Services) to backup +Client { + Name = matou-fd + Address = matou + FDPort = 9102 + Catalog = MyCatalog + Password = "xxxxx" # password for + File Retention = 30d # 30 days + Job Retention = 180d # six months + AutoPrune = yes # Prune expired Jobs/Files +} +\end{verbatim} +\normalsize + +Then make sure that the Address parameter in the Storage resource is set to +the fully qualified domain name and not to something like "localhost". The +address specified is sent to the File daemon (client) and it must be a fully +qualified domain name. If you pass something like "localhost" it will not +resolve correctly and will result in a time out when the File daemon fails to +connect to the Storage daemon. + +That is all that is necessary. I copied the existing resource to create a +second Job (Matou) to backup the second client (matou-fd). It has the name +{\bf Matou}, the Client is named {\bf matou-fd}, and the bootstrap file name +is changed, but everything else is the same. This means that Matou will be +backed up on the same schedule using the same set of tapes. You may want to +change that later, but for now, let's keep it simple. + +The second change was to add a new Client resource that defines {\bf matou-fd} +and has the correct address {\bf matou}, but in real life, you may need a +fully qualified domain name or an IP address. I also kept the password the +same (shown as xxxxx for the example). + +At this point, if you stop Bacula and restart it, and start the Client on the +other machine, everything will be ready, and the prompts that you saw above +will now include the second machine. + +To make this a real production installation, you will possibly want to use +different Pool, or a different schedule. It is up to you to customize. In any +case, you should change the password in both the Director's file and the +Client's file for additional security. + +For some important tips on changing names and passwords, and a diagram of what +names and passwords must match, please see +\ilink{Authorization Errors}{AuthorizationErrors} in the FAQ chapter +of this manual. + +\section{When The Tape Fills} +\label{FullTape} +\index[general]{Fills!When The Tape } +\index[general]{When The Tape Fills } + +If you have scheduled your job, typically nightly, there will come a time when +the tape fills up and {\bf Bacula} cannot continue. In this case, Bacula will +send you a message similar to the following: + +\footnotesize +\begin{verbatim} +rufus-sd: block.c:337 === Write error errno=28: ERR=No space left + on device +\end{verbatim} +\normalsize + +This indicates that Bacula got a write error because the tape is full. Bacula +will then search the Pool specified for your Job looking for an appendable +volume. In the best of all cases, you will have properly set your Retention +Periods and you will have all your tapes marked to be Recycled, and {\bf +Bacula} will automatically recycle the tapes in your pool requesting and +overwriting old Volumes. For more information on recycling, please see the +\ilink{Recycling chapter}{RecyclingChapter} of this manual. If you +find that your Volumes were not properly recycled (usually because of a +configuration error), please see the +\ilink{Manually Recycling Volumes}{manualrecycling} section of +the Recycling chapter. + +If like me, you have a very large set of Volumes and you label them with the +date the Volume was first writing, or you have not set up your Retention +periods, Bacula will not find a tape in the pool, and it will send you a +message similar to the following: + +\footnotesize +\begin{verbatim} +rufus-sd: Job kernsave.2002-09-19.10:50:48 waiting. Cannot find any + appendable volumes. +Please use the "label" command to create a new Volume for: + Storage: SDT-10000 + Media type: DDS-4 + Pool: Default +\end{verbatim} +\normalsize + +Until you create a new Volume, this message will be repeated an hour later, +then two hours later, and so on doubling the interval each time up to a +maximum interval of one day. + +The obvious question at this point is: What do I do now? + +The answer is simple: first, using the Console program, close the tape drive +using the {\bf unmount} command. If you only have a single drive, it will be +automatically selected, otherwise, make sure you release the one specified on +the message (in this case {\bf STD-10000}). + +Next, you remove the tape from the drive and insert a new blank tape. Note, on +some older tape drives, you may need to write an end of file mark ({\bf mt \ +-f \ /dev/nst0 \ weof}) to prevent the drive from running away when Bacula +attempts to read the label. + +Finally, you use the {\bf label} command in the Console to write a label to +the new Volume. The {\bf label} command will contact the Storage daemon to +write the software label, if it is successful, it will add the new Volume to +the Pool, then issue a {\bf mount} command to the Storage daemon. See the +previous sections of this chapter for more details on labeling tapes. + +The result is that Bacula will continue the previous Job writing the backup to +the new Volume. + +If you have a Pool of volumes and Bacula is cycling through them, instead of +the above message "Cannot find any appendable volumes.", Bacula may ask you +to mount a specific volume. In that case, you should attempt to do just that. +If you do not have the volume any more (for any of a number of reasons), you +can simply mount another volume from the same Pool, providing it is +appendable, and Bacula will use it. You can use the {\bf list volumes} command +in the console program to determine which volumes are appendable and which are +not. + +If like me, you have your Volume retention periods set correctly, but you have +no more free Volumes, you can relabel and reuse a Volume as follows: + +\begin{itemize} +\item Do a {\bf list volumes} in the Console and select the oldest Volume for + relabeling. +\item If you have setup your Retention periods correctly, the Volume should + have VolStatus {\bf Purged}. +\item If the VolStatus is not set to Purged, you will need to purge the + database of Jobs that are written on that Volume. Do so by using the command + {\bf purge jobs volume} in the Console. If you have multiple Pools, you will +be prompted for the Pool then enter the VolumeName (or MediaId) when +requested. +\item Then simply use the {\bf relabel} command to relabel the Volume. + \end{itemize} + +To manually relabel the Volume use the following additional steps: + +\begin{itemize} +\item To delete the Volume from the catalog use the {\bf delete volume} + command in the Console and select the VolumeName (or MediaId) to be deleted. + +\item Use the {\bf unmount} command in the Console to unmount the old tape. +\item Physically relabel the old Volume that you deleted so that it can be + reused. +\item Insert the old Volume in the tape drive. +\item From a command line do: {\bf mt \ -f \ /dev/st0 \ rewind} and {\bf mt \ + -f \ /dev/st0 \ weof}, where you need to use the proper tape drive name for + your system in place of {\bf /dev/st0}. +\item Use the {\bf label} command in the Console to write a new Bacula label + on your tape. +\item Use the {\bf mount} command in the Console if it is not automatically + done, so that Bacula starts using your newly labeled tape. + \end{itemize} + +\section{Other Useful Console Commands} +\index[general]{Commands!Other Useful Console } +\index[general]{Other Useful Console Commands } + +\begin{description} + +\item [status dir] + \index[console]{status dir } + Print a status of all running jobs and jobs scheduled in the next 24 hours. + +\item [status] + \index[console]{status } + The console program will prompt you to select a daemon type, then will +request the daemon's status. + +\item [status jobid=nn] + \index[console]{status jobid } + Print a status of JobId nn if it is running. The Storage daemon is contacted +and requested to print a current status of the job as well. + +\item [list pools] + \index[console]{list pools } + List the pools defined in the Catalog (normally only Default is used). + +\item [list media] + \index[console]{list media } + Lists all the media defined in the Catalog. + +\item [list jobs] + \index[console]{list jobs } + Lists all jobs in the Catalog that have run. + +\item [list jobid=nn] + \index[console]{list jobid } + Lists JobId nn from the Catalog. + +\item [list jobtotals] + \index[console]{list jobtotals } + Lists totals for all jobs in the Catalog. + +\item [list files jobid=nn] + \index[console]{list files jobid } + List the files that were saved for JobId nn. + +\item [list jobmedia] + \index[console]{list jobmedia } + List the media information for each Job run. + +\item [messages] + \index[console]{messages } + Prints any messages that have been directed to the console. + +\item [unmount storage=storage-name] + \index[console]{unmount storage } + Unmounts the drive associated with the storage device with the name {\bf +storage-name} if the drive is not currently being used. This command is used +if you wish Bacula to free the drive so that you can use it to label a tape. + + +\item [mount storage=storage-name] + \index[sd]{mount storage } + Causes the drive associated with the storage device to be mounted again. When +Bacula reaches the end of a volume and requests you to mount a new volume, +you must issue this command after you have placed the new volume in the +drive. In effect, it is the signal needed by Bacula to know to start reading +or writing the new volume. + +\item [quit] + \index[sd]{quit } + Exit or quit the console program. +\end{description} + +Most of the commands given above, with the exception of {\bf list}, will +prompt you for the necessary arguments if you simply enter the command name. + +\section{Debug Daemon Output} +\index[general]{Debug Daemon Output } +\index[general]{Output!Debug Daemon } + +If you want debug output from the daemons as they are running, start the +daemons from the install directory as follows: + +\footnotesize +\begin{verbatim} +./bacula start -d100 +\end{verbatim} +\normalsize + +This can be particularly helpful if your daemons do not start correctly, +because direct daemon output to the console is normally directed to the +NULL device, but with the debug level greater than zero, the output +will be sent to the starting terminal. + +To stop the three daemons, enter the following from the install directory: + +\footnotesize +\begin{verbatim} +./bacula stop +\end{verbatim} +\normalsize + +The execution of {\bf bacula stop} may complain about pids not found. This is +OK, especially if one of the daemons has died, which is very rare. + +To do a full system save, each File daemon must be running as root so that it +will have permission to access all the files. None of the other daemons +require root privileges. However, the Storage daemon must be able to open the +tape drives. On many systems, only root can access the tape drives. Either run +the Storage daemon as root, or change the permissions on the tape devices to +permit non-root access. MySQL and PostgreSQL can be installed and run with any +userid; root privilege is not necessary. + +\section{Patience When Starting Daemons or Mounting Blank Tapes} + +When you start the Bacula daemons, the Storage daemon attempts to open all +defined storage devices and verify the currently mounted Volume (if +configured). Until all the storage devices are verified, the Storage daemon +will not accept connections from the Console program. If a tape was previously +used, it will be rewound, and on some devices this can take several minutes. +As a consequence, you may need to have a bit of patience when first contacting +the Storage daemon after starting the daemons. If you can see your tape drive, +once the lights stop flashing, the drive will be ready to be used. + +The same considerations apply if you have just mounted a blank tape in a drive +such as an HP DLT. It can take a minute or two before the drive properly +recognizes that the tape is blank. If you attempt to {\bf mount} the tape with +the Console program during this recognition period, it is quite possible that +you will hang your SCSI driver (at least on my Red Hat Linux system). As a +consequence, you are again urged to have patience when inserting blank tapes. +Let the device settle down before attempting to access it. + +\section{Difficulties Connecting from the FD to the SD} +\index[general]{Difficulties Connecting from the FD to the SD} +\index[general]{SD!Difficulties Connecting from the FD to the SD} + +If you are having difficulties getting one or more of your File daemons to +connect to the Storage daemon, it is most likely because you have not used a +fully qualified domain name on the {\bf Address} directive in the +Director's Storage resource. That is the resolver on the File daemon's machine +(not on the Director's) must be able to resolve the name you supply into an IP +address. An example of an address that is guaranteed not to work: {\bf +localhost}. An example that may work: {\bf megalon}. An example that is more +likely to work: {\bf magalon.mydomain.com}. On Win32 if you don't have a good +resolver (often true on older Win98 systems), you might try using an IP +address in place of a name. + +If your address is correct, then make sure that no other program is using the +port 9103 on the Storage daemon's machine. The Bacula port number are +authorized by IANA, and should not be used by other programs, but apparently +some HP printers do use these port numbers. A {\bf netstat -a} on the Storage +daemon's machine can determine who is using the 9103 port (used for FD to SD +communications in Bacula). + +\section{Daemon Command Line Options} +\index[general]{Daemon Command Line Options } +\index[general]{Options!Daemon Command Line } + +Each of the three daemons (Director, File, Storage) accepts a small set of +options on the command line. In general, each of the daemons as well as the +Console program accepts the following options: + +\begin{description} + +\item [-c \lt{}file\gt{}] + \index[sd]{-c \lt{}file\gt{} } + Define the file to use as a configuration file. The default is the daemon + name followed by {\bf .conf} i.e. {\bf bacula-dir.conf} for the Director, + {\bf bacula-fd.conf} for the File daemon, and {\bf bacula-sd} for the Storage + daemon. + +\item [-d nn] + \index[sd]{-d nn } + Set the debug level to {\bf nn}. Higher levels of debug cause more + information to be displayed on STDOUT concerning what the daemon is doing. + +\item [-f] + Run the daemon in the foreground. This option is needed to run the daemon + under the debugger. + +\item [-s] + Do not trap signals. This option is needed to run the daemon under the + debugger. + +\item [-t] + Read the configuration file and print any error messages, then immediately + exit. Useful for syntax testing of new configuration files. + +\item [-v] + Be more verbose or more complete in printing error and informational + messages. Recommended. + +\item [-?] + Print the version and list of options. + \end{description} + +The Director has the following additional Director specific option: + +\begin{description} + +\item [-r \lt{}job\gt{}] + \index[fd]{-r \lt{}job\gt{} } + Run the named job immediately. This is for debugging and should not be used. + +\end{description} + +The File daemon has the following File daemon specific option: + +\begin{description} + +\item [-i] + Assume that the daemon is called from {\bf inetd} or {\bf xinetd}. In this + case, the daemon assumes that a connection has already been made and that it +is passed as STDIN. After the connection terminates the daemon will exit. +\end{description} + +The Storage daemon has no Storage daemon specific options. + +The Console program has no console specific options. + +\section{Creating a Pool} +\label{Pool} +\index[general]{Pool!Creating a } +\index[general]{Creating a Pool } + +Creating the Pool is automatically done when {\bf Bacula} starts, so if you +understand Pools, you can skip to the next section. + +When you run a job, one of the things that Bacula must know is what Volumes to +use to backup the FileSet. Instead of specifying a Volume (tape) directly, you +specify which Pool of Volumes you want Bacula to consult when it wants a tape +for writing backups. Bacula will select the first available Volume from the +Pool that is appropriate for the Storage device you have specified for the Job +being run. When a volume has filled up with data, {\bf Bacula} will change its +VolStatus from {\bf Append} to {\bf Full}, and then {\bf Bacula} will use the +next volume and so on. If no appendable Volume exists in the Pool, the +Director will attempt to recycle an old Volume, if there are still no +appendable Volumes available, {\bf Bacula} will send a message requesting the +operator to create an appropriate Volume. + +{\bf Bacula} keeps track of the Pool name, the volumes contained in the Pool, +and a number of attributes of each of those Volumes. + +When Bacula starts, it ensures that all Pool resource definitions have been +recorded in the catalog. You can verify this by entering: + +\footnotesize +\begin{verbatim} +list pools +\end{verbatim} +\normalsize + +to the console program, which should print something like the following: + +\footnotesize +\begin{verbatim} +*list pools +Using default Catalog name=MySQL DB=bacula ++--------+---------+---------+---------+----------+-------------+ +| PoolId | Name | NumVols | MaxVols | PoolType | LabelFormat | ++--------+---------+---------+---------+----------+-------------+ +| 1 | Default | 3 | 0 | Backup | * | +| 2 | File | 12 | 12 | Backup | File | ++--------+---------+---------+---------+----------+-------------+ +* +\end{verbatim} +\normalsize + +If you attempt to create the same Pool name a second time, {\bf Bacula} will +print: + +\footnotesize +\begin{verbatim} +Error: Pool Default already exists. +Once created, you may use the {\bf update} command to +modify many of the values in the Pool record. +\end{verbatim} +\normalsize + +\label{Labeling} + +\section{Labeling Your Volumes} +\index[general]{Volumes!Labeling Your } +\index[general]{Labeling Your Volumes } + +Bacula requires that each Volume contains a software label. There are several +strategies for labeling volumes. The one I use is to label them as they are +needed by {\bf Bacula} using the console program. That is when Bacula needs a +new Volume, and it does not find one in the catalog, it will send me an email +message requesting that I add Volumes to the Pool. I then use the {\bf label} +command in the Console program to label a new Volume and to define it in the +Pool database, after which Bacula will begin writing on the new Volume. +Alternatively, I can use the Console {\bf relabel} command to relabel a Volume +that is no longer used providing it has VolStatus {\bf Purged}. + +Another strategy is to label a set of volumes at the start, then use them as +{\bf Bacula} requests them. This is most often done if you are cycling through +a set of tapes, for example using an autochanger. For more details on +recycling, please see the +\ilink{Automatic Volume Recycling}{RecyclingChapter} chapter of +this manual. + +If you run a Bacula job, and you have no labeled tapes in the Pool, Bacula +will inform you, and you can create them "on-the-fly" so to speak. In my +case, I label my tapes with the date, for example: {\bf DLT-18April02}. See +below for the details of using the {\bf label} command. + +\section{Labeling Volumes with the Console Program} +\index[general]{Labeling Volumes with the Console Program } +\index[general]{Program!Labeling Volumes with the Console } + +Labeling volumes is normally done by using the console program. + +\begin{enumerate} +\item ./bconsole +\item label + \end{enumerate} + +If Bacula complains that you cannot label the tape because it is already +labeled, simply {\bf unmount} the tape using the {\bf unmount} command in the +console, then physically mount a blank tape and re-issue the {\bf label} +command. + +Since the physical storage media is different for each device, the {\bf label} +command will provide you with a list of the defined Storage resources such as +the following: + +\footnotesize +\begin{verbatim} +The defined Storage resources are: + 1: File + 2: 8mmDrive + 3: DLTDrive + 4: SDT-10000 +Select Storage resource (1-4): +\end{verbatim} +\normalsize + +At this point, you should have a blank tape in the drive corresponding to the +Storage resource that you select. + +It will then ask you for the Volume name. + +\footnotesize +\begin{verbatim} +Enter new Volume name: +\end{verbatim} +\normalsize + +If Bacula complains: + +\footnotesize +\begin{verbatim} +Media record for Volume xxxx already exists. +\end{verbatim} +\normalsize + +It means that the volume name {\bf xxxx} that you entered already exists in +the Media database. You can list all the defined Media (Volumes) with the {\bf +list media} command. Note, the LastWritten column has been truncated for +proper printing. + +\footnotesize +\begin{verbatim} ++---------------+---------+--------+----------------+-----/~/-+------------+-----+ +| VolumeName | MediaTyp| VolStat| VolBytes | LastWri | VolReten | Recy| ++---------------+---------+--------+----------------+---------+------------+-----+ +| DLTVol0002 | DLT8000 | Purged | 56,128,042,217 | 2001-10 | 31,536,000 | 0 | +| DLT-07Oct2001 | DLT8000 | Full | 56,172,030,586 | 2001-11 | 31,536,000 | 0 | +| DLT-08Nov2001 | DLT8000 | Full | 55,691,684,216 | 2001-12 | 31,536,000 | 0 | +| DLT-01Dec2001 | DLT8000 | Full | 55,162,215,866 | 2001-12 | 31,536,000 | 0 | +| DLT-28Dec2001 | DLT8000 | Full | 57,888,007,042 | 2002-01 | 31,536,000 | 0 | +| DLT-20Jan2002 | DLT8000 | Full | 57,003,507,308 | 2002-02 | 31,536,000 | 0 | +| DLT-16Feb2002 | DLT8000 | Full | 55,772,630,824 | 2002-03 | 31,536,000 | 0 | +| DLT-12Mar2002 | DLT8000 | Full | 50,666,320,453 | 1970-01 | 31,536,000 | 0 | +| DLT-27Mar2002 | DLT8000 | Full | 57,592,952,309 | 2002-04 | 31,536,000 | 0 | +| DLT-15Apr2002 | DLT8000 | Full | 57,190,864,185 | 2002-05 | 31,536,000 | 0 | +| DLT-04May2002 | DLT8000 | Full | 60,486,677,724 | 2002-05 | 31,536,000 | 0 | +| DLT-26May02 | DLT8000 | Append | 1,336,699,620 | 2002-05 | 31,536,000 | 1 | ++---------------+---------+--------+----------------+-----/~/-+------------+-----+ +\end{verbatim} +\normalsize + +Once Bacula has verified that the volume does not already exist, it will +prompt you for the name of the Pool in which the Volume (tape) is to be +created. If there is only one Pool (Default), it will be automatically +selected. + +If the tape is successfully labeled, a Volume record will also be created in +the Pool. That is the Volume name and all its other attributes will appear +when you list the Pool. In addition, that Volume will be available for backup +if the MediaType matches what is requested by the Storage daemon. + +When you labeled the tape, you answered very few questions about it -- +principally the Volume name, and perhaps the Slot. However, a Volume record in +the catalog database (internally known as a Media record) contains quite a few +attributes. Most of these attributes will be filled in from the default values +that were defined in the Pool (i.e. the Pool holds most of the default +attributes used when creating a Volume). + +It is also possible to add media to the pool without physically labeling the +Volumes. This can be done with the {\bf add} command. For more information, +please see the +\ilink{Console Chapter}{_ConsoleChapter} of this manual. diff --git a/docs/manuals/en/concepts/update_version b/docs/manuals/en/concepts/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/concepts/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/concepts/update_version.in b/docs/manuals/en/concepts/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/concepts/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/concepts/uploaddoc b/docs/manuals/en/concepts/uploaddoc new file mode 100755 index 00000000..02668a12 --- /dev/null +++ b/docs/manuals/en/concepts/uploaddoc @@ -0,0 +1,11 @@ +#!/bin/sh + +ftp -i ftp.sectoor.de <out + type out +\end{verbatim} +\normalsize + +The precise path to bacula-fd depends on where it is installed. The +example above is the default used in 1.39.22 and later. +The {\bf -t} option will cause Bacula to read the configuration file, print +any error messages and then exit. the {\bf \gt{}} redirects the output to the +file named {\bf out}, which you can list with the {\bf type} command. + +If something is going wrong later, or you want to run {\bf Bacula} with a +debug option, you might try starting it as: + +\footnotesize +\begin{verbatim} + c:\Program Files\bacula\bin\bacula-fd -d 100 >out +\end{verbatim} +\normalsize + +In this case, Bacula will run until you explicitly stop it, which will give +you a chance to connect to it from your Unix/Linux server. In later versions +of Bacula (1.34 on, I think), when you start the File daemon in debug mode it +can write the output to a trace file {\bf bacula.trace} in the current +directory. To enable this, before running a job, use the console, and enter: + +\footnotesize +\begin{verbatim} + trace on +\end{verbatim} +\normalsize + +then run the job, and once you have terminated the File daemon, you will find +the debug output in the {\bf bacula.trace} file, which will probably be +located in the same directory as bacula-fd.exe. + +In addition, you should look in the System Applications log on the Control +Panel to find any Windows errors that Bacula got during the startup process. + +Finally, due to the above problems, when you turn on debugging, and specify +trace=1 on a setdebug command in the Console, Bacula will write the debug +information to the file {\bf bacula.trace} in the directory from which Bacula +is executing. + +If you are having problems with ClientRunBeforeJob scripts randomly dying, +it is possible that you have run into an Oracle bug. See bug number 622 in +the bugs.bacula.org database. The following information has been +provided by a user on this issue: + +\footnotesize +\begin{verbatim} +The information in this document applies to: + Oracle HTTP Server - Version: 9.0.4 + Microsoft Windows Server 2003 + Symptoms + When starting an OC4J instance, the System Clock runs faster, about 7 +seconds per minute. + + Cause + + + This is caused by the Sun JVM bug 4500388, which states that "Calling +Thread.sleep() with a small argument affects the system clock". Although +this is reported as fixed in JDK 1.4.0_02, several reports contradict this +(see the bug in +http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4500388). + + + Also reported by Microsoft as "The system clock may run fast when you +use the ACPI power management timer as a high-resolution counter on Windows +2000-based computers" (See http://support.microsoft.com/?id=821893) +\end{verbatim} +\normalsize + +You may wish to start the daemon with debug mode on rather than doing it +using bconsole. To do so, edit the following registry key: + +\footnotesize +\begin{verbatim} +HKEY_LOCAL_MACHINE\HARDWARE\SYSTEM\CurrentControlSet\Services\Bacula-dir +\end{verbatim} +\normalsize + +using regedit, then add -dnn after the /service option, where nn represents +the debug level you want. + +\label{Compatibility} +\section{Windows Compatibility Considerations} +\index[general]{Windows Compatibility Considerations} +\index[general]{Considerations!Windows Compatibility} + +If you are not using the VSS (Volume Shadow Copy) option described in the +next section of this chapter, and if any applications are running during +the backup and they have files opened exclusively, Bacula will not be able +to backup those files, so be sure you close your applications (or tell your +users to close their applications) before the backup. Fortunately, most +Microsoft applications do not open files exclusively so that they can be +backed up. However, you will need to experiment. In any case, if Bacula +cannot open the file, it will print an error message, so you will always +know which files were not backed up. For version 1.37.25 and greater, see +the section below on Volume Shadow Copy Service that permits backing up any +file. + +During backup, Bacula doesn't know about the system registry, so you will +either need to write it out to an ASCII file using {\bf regedit~~/e} or use a +program specifically designed to make a copy or backup the registry. + +In Bacula version 1.31 and later, we use Windows backup API calls by +default. Typical of Windows, programming these special BackupRead and +BackupWrite calls is a real nightmare of complications. The end result +gives some distinct advantages and some disadvantages. + +First, the advantages are that on WinNT/2K/XP systems, the security and +ownership information is now backed up. In addition, with the exception of +files in exclusive use by another program, Bacula can now access all system +files. This means that when you restore files, the security and ownership +information will be restored on WinNT/2K/XP along with the data. + +The disadvantage of the Windows backup API calls is that it produces +non-portable backups. That is files and their data that are backed up on +WinNT using the native API calls (BackupRead/BackupWrite) cannot be +restored on Win95/98/Me or Unix systems. In principle, a file backed up on +WinNT can be restored on WinXP, but this remains to be seen in practice +(not yet tested). In addition, the stand-alone tools such as {\bf bls} and +{\bf bextract} cannot be used to retrieve the data for those files because +those tools are not available on Windows. All restores must use the Bacula +{\bf restore} command. As of Bacula 1.39.x, thanks to Thorsten Engel, this +restriction is removed, and Bacula should be able to read non-portable +backups on any system and restore the data appropriately. However, +on a system that does not have the BackupRead/BackupWrite calls (older +Windows versions and all Unix/Linux machines), though the file data +can be restored, the Windows security and access control data will not be restored. +This means that a standard set of access permissions will be set for +such restored files. + + +As a default, Bacula backs up Windows systems using the Windows API calls. +If you want to backup data on a WinNT/2K/XP system and restore it on a +Unix/Win95/98/Me system, we have provided a special {\bf portable} option +that backs up the data in a portable fashion by using portable API calls. +See the \ilink{portable option}{portable} on the Include statement in a +FileSet resource in the Director's configuration chapter for the details on +setting this option. However, using the portable option means you may have +permissions problems accessing files, and none of the security and +ownership information will be backed up or restored. The file data can, +however, be restored on any system. + +You should always be able to restore any file backed up on Unix or Win95/98/Me +to any other system. On some systems, such as WinNT/2K/XP, you may have to +reset the ownership of such restored files. Any file backed up on WinNT/2K/XP +should in principle be able to be restored to a similar system (i.e. +WinNT/2K/XP), however, I am unsure of the consequences if the owner +information and accounts are not identical on both systems. Bacula will not +let you restore files backed up on WinNT/2K/XP to any other system (i.e. Unix +Win95/98/Me) if you have used the defaults. + +Finally, if you specify the {\bf portable=yes} option on the files you back +up. Bacula will be able to restore them on any other system. However, any +WinNT/2K/XP specific security and ownership information will be lost. + +The following matrix will give you an idea of what you can expect. Thanks to +Marc Brueckner for doing the tests: + +\addcontentsline{lot}{table}{WinNT/2K/XP Restore Portability Status} +\begin{longtable}{|l|l|p{2.8in}|} + \hline +\multicolumn{1}{|c|}{\bf Backup OS} & \multicolumn{1}{c|}{\bf Restore OS} +& \multicolumn{1}{c|}{\bf Results } \\ + \hline {WinMe} & {WinMe} & {Works } \\ + \hline {WinMe} & {WinNT} & {Works (SYSTEM permissions) } \\ + \hline {WinMe} & {WinXP} & {Works (SYSTEM permissions) } \\ + \hline {WinMe} & {Linux} & {Works (SYSTEM permissions) } \\ + \hline {\ } & {\ } & {\ } \\ + \hline {WinXP} & {WinXP} & {Works } \\ + \hline {WinXP} & {WinNT} & {Works (all files OK, but got "The data is invalid" +message) } \\ + \hline {WinXP} & {WinMe} & {Error: Win32 data stream not supported. } \\ + \hline {WinXP} & {WinMe} & {Works if {\bf Portable=yes} specified during backup.} \\ + \hline {WinXP} & {Linux} & {Error: Win32 data stream not supported. } \\ + \hline {WinXP} & {Linux} & {Works if {\bf Portable=yes} specified during backup.}\\ + \hline {\ } & {\ } & {\ } \\ + \hline {WinNT} & {WinNT} & {Works } \\ + \hline {WinNT} & {WinXP} & {Works } \\ + \hline {WinNT} & {WinMe} & {Error: Win32 data stream not supported. } \\ + \hline {WinNT} & {WinMe} & {Works if {\bf Portable=yes} specified during backup.}\\ + \hline {WinNT} & {Linux} & {Error: Win32 data stream not supported. } \\ + \hline {WinNT} & {Linux} & {Works if {\bf Portable=yes} specified during backup. }\\ + \hline {\ } & {\ } & {\ } \\ + \hline {Linux} & {Linux} & {Works } \\ + \hline {Linux} & {WinNT} & {Works (SYSTEM permissions) } \\ + \hline {Linux} & {WinMe} & {Works } \\ + \hline {Linux} & {WinXP} & {Works (SYSTEM permissions)} +\\ \hline +\end{longtable} + +Note: with Bacula versions 1.39.x and later, non-portable Windows data can +be restore to any machine. + + +\label{VSS} +\section{Volume Shadow Copy Service} +\index[general]{Volume Shadow Copy Service} +\index[general]{VSS} +In version 1.37.30 and greater, you can turn on Microsoft's Volume +Shadow Copy Service (VSS). + +Microsoft added VSS to Windows XP and Windows 2003. From the perspective of +a backup-solution for Windows, this is an extremely important step. VSS +allows Bacula to backup open files and even to interact with applications like +RDBMS to produce consistent file copies. VSS aware applications are called +VSS Writers, they register with the OS so that when Bacula wants to do a +Snapshot, the OS will notify the register Writer programs, which may then +create a consistent state in their application, which will be backed up. +Examples for these writers are "MSDE" (Microsoft database +engine), "Event Log Writer", "Registry Writer" plus 3rd +party-writers. If you have a non-vss aware application (e.g. +SQL Anywhere or probably MySQL), a shadow copy is still generated +and the open files can be backed up, but there is no guarantee +that the file is consistent. + +Bacula produces a message from each of the registered writer programs +when it is doing a VSS backup so you know which ones are correctly backed +up. + +Bacula supports VSS on both Windows 2003 and Windows XP. +Technically Bacula creates a shadow copy as soon as the backup process +starts. It does then backup all files from the shadow copy and destroys the +shadow copy after the backup process. Please have in mind, that VSS +creates a snapshot and thus backs up the system at the state it had +when starting the backup. It will disregard file changes which occur during +the backup process. + +VSS can be turned on by placing an + +\index[dir]{Enable VSS} +\index[general]{Enable VSS} +\begin{verbatim} +Enable VSS = yes +\end{verbatim} + +in your FileSet resource. + +The VSS aware File daemon has the letters VSS on the signon line that +it produces when contacted by the console. For example: +\begin{verbatim} +Tibs-fd Version: 1.37.32 (22 July 2005) VSS Windows XP MVS NT 5.1.2600 +\end{verbatim} +the VSS is shown in the line above. This only means that the File daemon +is capable of doing VSS not that VSS is turned on for a particular backup. +There are two ways of telling if VSS is actually turned on during a backup. +The first is to look at the status output for a job, e.g.: +\footnotesize +\begin{verbatim} +Running Jobs: +JobId 1 Job NightlySave.2005-07-23_13.25.45 is running. + VSS Backup Job started: 23-Jul-05 13:25 + Files=70,113 Bytes=3,987,180,650 Bytes/sec=3,244,247 + Files Examined=75,021 + Processing file: c:/Documents and Settings/kern/My Documents/My Pictures/Misc1/Sans titre - 39.pdd + SDReadSeqNo=5 fd=352 +\end{verbatim} +\normalsize +Here, you see under Running Jobs that JobId 1 is "VSS Backup Job started ..." +This means that VSS is enabled for that job. If VSS is not enabled, it will +simply show "Backup Job started ..." without the letters VSS. + +The second way to know that the job was backed up with VSS is to look at the +Job Report, which will look something like the following: +\footnotesize +\begin{verbatim} +23-Jul 13:25 rufus-dir: Start Backup JobId 1, Job=NightlySave.2005-07-23_13.25.45 +23-Jul 13:26 rufus-sd: Wrote label to prelabeled Volume "TestVolume001" on device "DDS-4" (/dev/nst0) +23-Jul 13:26 rufus-sd: Spooling data ... +23-Jul 13:26 Tibs: Generate VSS snapshots. Driver="VSS WinXP", Drive(s)="C" +23-Jul 13:26 Tibs: VSS Writer: "MSDEWriter", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "Microsoft Writer (Bootable State)", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "WMI Writer", State: 1 (VSS_WS_STABLE) +23-Jul 13:26 Tibs: VSS Writer: "Microsoft Writer (Service State)", State: 1 (VSS_WS_STABLE) +\end{verbatim} +\normalsize +In the above Job Report listing, you see that the VSS snapshot was generated for drive C (if +other drives are backed up, they will be listed on the {\bf Drive(s)="C"} You also see the +reports from each of the writer program. Here they all report VSS\_WS\_STABLE, which means +that you will get a consistent snapshot of the data handled by that writer. + +\section{VSS Problems} +\index[general]{Problems!VSS} +\index[fd] {Problems!VSS} +\index[general]{VSS Problems} +\index[fd]{VSS Problems} + +If you are experiencing problems such as VSS hanging on MSDE, first try +running {\bf vssadmin} to check for problems, then try running {\bf +ntbackup} which also uses VSS to see if it has similar problems. If so, you +know that the problem is in your Windows machine and not with Bacula. + +The FD hang problems were reported with {\bf MSDEwriter} when: +\begin{itemize} +\item a local firewall locked local access to the MSDE TCP port (MSDEwriter +seems to use TCP/IP and not Named Pipes). +\item msdtcs was installed to run under "localsystem": try running msdtcs +under networking account (instead of local system) (com+ seems to work +better with this configuration). +\end{itemize} + + +\section{Windows Firewalls} +\index[general]{Firewalls!Windows} +\index[general]{Windows Firewalls} + +If you turn on the firewalling feature on Windows (default in WinXP SP2), you +are likely to find that the Bacula ports are blocked and you cannot +communicate to the other daemons. This can be deactivated through the {\bf +Security Notification} dialog, which is apparently somewhere in the {\bf +Security Center}. I don't have this on my computer, so I cannot give the exact +details. + +The command: + +\footnotesize +\begin{verbatim} +netsh firewall set opmode disable +\end{verbatim} +\normalsize + +is purported to disable the firewall, but this command is not accepted on my +WinXP Home machine. + +\section{Windows Port Usage} +\index[general]{Windows Port Usage} +\index[general]{Usage!Windows Port} + +If you want to see if the File daemon has properly opened the port and is +listening, you can enter the following command in a shell window: + +\footnotesize +\begin{verbatim} + netstat -an | findstr 910[123] +\end{verbatim} +\normalsize + +TopView is another program that has been recommend, but it is not a +standard Win32 program, so you must find and download it from the Internet. + +\section{Windows Disaster Recovery} +\index[general]{Recovery!Windows Disaster} +\index[general]{Windows Disaster Recovery} + +We don't currently have a good solution for disaster recovery on Windows as we +do on Linux. The main piece lacking is a Windows boot floppy or a Windows boot +CD. Microsoft releases a Windows Pre-installation Environment ({\bf WinPE}) +that could possibly work, but we have not investigated it. This means that +until someone figures out the correct procedure, you must restore the OS from +the installation disks, then you can load a Bacula client and restore files. +Please don't count on using {\bf bextract} to extract files from your backup +tapes during a disaster recovery unless you have backed up those files using +the {\bf portable} option. {\bf bextract} does not run on Windows, and the +normal way Bacula saves files using the Windows API prevents the files from +being restored on a Unix machine. Once you have an operational Windows OS +loaded, you can run the File daemon and restore your user files. + +Please see +\ilink{ Disaster Recovery of Win32 Systems}{Win3233} for the latest +suggestion, which looks very promising. + +It looks like Bart PE Builder, which creates a Windows PE (Pre-installation +Environment) Boot-CD, may be just what is needed to build a complete disaster +recovery system for Win32. This distribution can be found at +\elink{http://www.nu2.nu/pebuilder/}{\url{http://www.nu2.nu/pebuilder/}}. + +\section{Windows Restore Problems} +\index[general]{Problems!Windows Restore} +\index[general]{Windows Restore Problems} +Please see the +\ilink{Restore Chapter}{Windows} of this manual for problems +that you might encounter doing a restore. + +section{Windows Backup Problems} +\index[general]{Problems!Windows Backup} +\index[general]{Windows Backup Problems} +If during a Backup, you get the message: +{\bf ERR=Access is denied} and you are using the portable option, +you should try both adding both the non-portable (backup API) and +the Volume Shadow Copy options to your Director's conf file. + +In the Options resource: +\footnotesize +\begin{verbatim} +portable = no +\end{verbatim} +\normalsize + +In the FileSet resource: +\footnotesize +\begin{verbatim} +enablevss = yes +\end{verbatim} +\normalsize + +In general, specifying these two options should allow you to backup +any file on a Windows system. However, in some cases, if users +have allowed to have full control of their folders, even system programs +such a Bacula can be locked out. In this case, you must identify +which folders or files are creating the problem and do the following: + +\begin{enumerate} +\item Grant ownership of the file/folder to the Administrators group, +with the option to replace the owner on all child objects. +\item Grant full control permissions to the Administrators group, +and change the user's group to only have Modify permission to +the file/folder and all child objects. +\end{enumerate} + +Thanks to Georger Araujo for the above information. + +\section{Windows Ownership and Permissions Problems} +\index[general]{Problems!Windows Ownership and Permissions} +\index[general]{Windows Ownership and Permissions Problems} + +If you restore files backed up from WinNT/XP/2K to an alternate directory, +Bacula may need to create some higher level directories that were not saved +(or restored). In this case, the File daemon will create them under the SYSTEM +account because that is the account that Bacula runs under as a service. As of +version 1.32f-3, Bacula creates these files with full access permission. +However, there may be cases where you have problems accessing those files even +if you run as administrator. In principle, Microsoft supplies you with the way +to cease the ownership of those files and thus change the permissions. +However, a much better solution to working with and changing Win32 permissions +is the program {\bf SetACL}, which can be found at +\elink{http://setacl.sourceforge.net/}{\url{http://setacl.sourceforge.net/}}. + +If you have not installed Bacula while running as Administrator +and if Bacula is not running as a Process with the userid (User Name) SYSTEM, +then it is very unlikely that it will have sufficient permission to +access all your files. + +Some users have experienced problems restoring files that participate in +the Active Directory. They also report that changing the userid under which +Bacula (bacula-fd.exe) runs, from SYSTEM to a Domain Admin userid, resolves +the problem. + + +\section{Manually resetting the Permissions} +\index[general]{Manually resetting the Permissions} +\index[general]{Permissions!Manually resetting the} + +The following solution was provided by Dan Langille \lt{}dan at langille in +the dot org domain\gt{}. The steps are performed using Windows 2000 Server but +they should apply to most Win32 platforms. The procedure outlines how to deal +with a problem which arises when a restore creates a top-level new directory. +In this example, "top-level" means something like {\bf +c:\textbackslash{}src}, not {\bf c:\textbackslash{}tmp\textbackslash{}src} +where {\bf c:\textbackslash{}tmp} already exists. If a restore job specifies / +as the {\bf Where:} value, this problem will arise. + +The problem appears as a directory which cannot be browsed with Windows +Explorer. The symptoms include the following message when you try to click on +that directory: + +\includegraphics{./access-is-denied.eps} + +If you encounter this message, the following steps will change the permissions +to allow full access. + +\begin{enumerate} +\item right click on the top level directory (in this example, {\bf c:/src}) + and select {\bf Properties}. +\item click on the Security tab. +\item If the following message appears, you can ignore it, and click on {\bf + OK}. + +\includegraphics{./view-only.eps} + +You should see something like this: + +\includegraphics{./properties-security.eps} +\item click on Advanced +\item click on the Owner tab +\item Change the owner to something other than the current owner (which is + {\bf SYSTEM} in this example as shown below). + +\includegraphics{./properties-security-advanced-owner.eps} +\item ensure the "Replace owner on subcontainers and objects" box is + checked +\item click on OK +\item When the message "You do not have permission to read the contents of + directory c:\textbackslash{}src\textbackslash{}basis. Do you wish to replace + the directory permissions with permissions granting you Full Control?", click +on Yes. + +\includegraphics{./confirm.eps} +\item Click on OK to close the Properties tab + \end{enumerate} + +With the above procedure, you should now have full control over your restored +directory. + +In addition to the above methods of changing permissions, there is a Microsoft +program named {\bf cacls} that can perform similar functions. + +\section{Backing Up the WinNT/XP/2K System State} +\index[general]{State!Backing Up the WinNT/XP/2K System} +\index[general]{Backing Up the WinNT/XP/2K System State} + +A suggestion by Damian Coutts using Microsoft's NTBackup utility in +conjunction with Bacula should permit a full restore of any damaged system +files on Win2K/XP. His suggestion is to do an NTBackup of the critical system +state prior to running a Bacula backup with the following command: + +\footnotesize +\begin{verbatim} +ntbackup backup systemstate /F c:\systemstate.bkf +\end{verbatim} +\normalsize + +The {\bf backup} is the command, the {\bf systemstate} says to backup only the +system state and not all the user files, and the {\bf /F +c:\textbackslash{}systemstate.bkf} specifies where to write the state file. +this file must then be saved and restored by Bacula. + +To restore the system state, you first reload a base operating system if the +OS is damaged, otherwise, this is not necessary, then you would use Bacula to +restore all the damaged or lost user's files and to recover the {\bf +c:\textbackslash{}systemstate.bkf} file. Finally if there are any damaged or +missing system files or registry problems, you run {\bf NTBackup} and {\bf +catalogue} the system statefile, and then select it for restore. The +documentation says you can't run a command line restore of the systemstate. + +To the best of my knowledge, this has not yet been tested. If you test it, +please report your results to the Bacula email list. + +\section{Considerations for Filename Specifications} +\index[general]{Windows!Considerations for Filename Specifications} + +Please see the +\ilink{Director's Configuration chapter}{win32} of this manual +for important considerations on how to specify Windows paths in Bacula FileSet +Include and Exclude directives. + +\index[general]{Unicode} +Bacula versions prior to 1.37.28 do not support Windows Unicode filenames. +As of that version, both {\bf bconsole} and {\bf bwx-console} support Windows +Unicode filenames. There may still be some problems with multiple byte +characters (e.g. Chinese, ...) where it is a two byte character but the +displayed character is not two characters wide. + +\index[general]{Win32 Path Length Restriction} +Path/filenames longer than 260 characters (up to 32,000) are supported +beginning with Bacula version 1.39.20. Older Bacula versions support +only 260 character path/filenames. + +\section{Win32 Specific File daemon Command Line} +\index[general]{Client!Win32 Specific File daemon Command Line Options} +\index[general]{Win32 Specific File daemon Command Line Options} + +These options are not normally seen or used by the user, and are documented +here only for information purposes. At the current time, to change the default +options, you must either manually run {\bf Bacula} or you must manually edit +the system registry and modify the appropriate entries. + +In order to avoid option clashes between the options necessary for {\bf +Bacula} to run on Windows and the standard Bacula options, all Windows +specific options are signaled with a forward slash character (/), while as +usual, the standard Bacula options are signaled with a minus (-), or a minus +minus (\verb:--:). All the standard Bacula options can be used on the Windows +version. In addition, the following Windows only options are implemented: + +\begin{description} + +\item [/service ] + \index[fd]{/service} + Start Bacula as a service + +\item [/run ] + \index[fd]{/run} + Run the Bacula application + +\item [/install ] + \index[fd]{/install} + Install Bacula as a service in the system registry + +\item [/remove ] + \index[fd]{/remove} + Uninstall Bacula from the system registry + +\item [/about ] + \index[fd]{/about} + Show the Bacula about dialogue box + +\item [/status ] + \index[fd]{/status} + Show the Bacula status dialogue box + +\item [/events ] + \index[fd]{/events} + Show the Bacula events dialogue box (not yet implemented) + +\item [/kill ] + \index[fd]{/kill} + Stop any running {\bf Bacula} + +\item [/help ] + \index[fd]{/help} + Show the Bacula help dialogue box +\end{description} + +It is important to note that under normal circumstances the user should never +need to use these options as they are normally handled by the system +automatically once Bacula is installed. However, you may note these options in +some of the .bat files that have been created for your use. + +\section{Shutting down Windows Systems} +\index[general]{Shutting down Windows Systems} +\index[general]{Systems!Shutting down Windows} + +Some users like to shutdown their Windows machines after a backup using a +Client Run After Job directive. If you want to do something similar, you might +take the shutdown program from the +\elink{apcupsd project}{\url{http://www.apcupsd.com}} or one from the +\elink{Sysinternals project} +{\url{http://www.sysinternals.com/ntw2k/freeware/psshutdown.shtml}}. diff --git a/docs/manuals/en/console/Makefile.in b/docs/manuals/en/console/Makefile.in new file mode 100644 index 00000000..9af2083b --- /dev/null +++ b/docs/manuals/en/console/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=console + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Console and Operators Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Consol*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/console/bconsole.tex b/docs/manuals/en/console/bconsole.tex new file mode 100644 index 00000000..af5b61f7 --- /dev/null +++ b/docs/manuals/en/console/bconsole.tex @@ -0,0 +1,1583 @@ +%% +%% + +\chapter{Bacula Console} +\label{_ConsoleChapter} +\index[general]{Console!Bacula} +\index[general]{Bacula Console} +\index[general]{Console!Bacula} +\index[general]{Bacula Console} + +The {\bf Bacula Console} (sometimes called the User Agent) is a program that +allows the user or the System Administrator, to interact with the Bacula +Director daemon while the daemon is running. + +The current Bacula Console comes in two versions: a shell interface (TTY +style), and a GNOME GUI interface. Both permit the administrator or authorized +users to interact with Bacula. You can determine the status of a particular +job, examine the contents of the Catalog as well as perform certain tape +manipulations with the Console program. + +In addition, there is a bwx-console built with wxWidgets that allows a graphic +restore of files. As of version 1.34.1 it is in an early stage of development, +but it already is quite useful. Unfortunately, it has not been enhanced for +some time now. + +Since the Console program interacts with the Director through the network, your +Console and Director programs do not necessarily need to run on the same +machine. + +In fact, a certain minimal knowledge of the Console program is needed in order +for Bacula to be able to write on more than one tape, because when Bacula +requests a new tape, it waits until the user, via the Console program, +indicates that the new tape is mounted. + +\section{Console Configuration} +\index[general]{Console Configuration} +\index[general]{Configuration!Console} +\index[general]{Console Configuration} +\index[general]{Configuration!Console} + +When the Console starts, it reads a standard Bacula configuration file named +{\bf bconsole.conf} or {\bf bgnome-console.conf} in the case of the GNOME +Console version. This file allows default configuration of the Console, and at +the current time, the only Resource Record defined is the Director resource, +which gives the Console the name and address of the Director. For more +information on configuration of the Console program, please see the +\ilink{Console Configuration File}{ConsoleConfChapter} Chapter of +this document. + +\section{Running the Console Program} +\index[general]{Running the Console Program} +\index[general]{Program!Running the Console} +\index[general]{Running the Console Program} +\index[general]{Program!Running the Console} + +The console program can be run with the following options: +\footnotesize +\begin{verbatim} +Usage: bconsole [-s] [-c config_file] [-d debug_level] + -c set configuration file to file + -dnn set debug level to nn + -n no conio + -s no signals + -t test - read configuration and exit + -? print this message. +\end{verbatim} +\normalsize + + +After launching the Console program (bconsole), it will prompt you for the +next command with an asterisk (*). (Note, in the GNOME version, the prompt is +not present; you simply enter the commands you want in the command text box at +the bottom of the screen.) Generally, for all commands, you can simply enter +the command name and the Console program will prompt you for the necessary +arguments. Alternatively, in most cases, you may enter the command followed by +arguments. The general format is: + +\footnotesize +\begin{verbatim} + [=] [=] ... +\end{verbatim} +\normalsize + +where {\bf command} is one of the commands listed below; {\bf keyword} is one +of the keywords listed below (usually followed by an argument); and {\bf +argument} is the value. The command may be abbreviated to the shortest unique +form. If two commands have the same starting letters, the one that will be +selected is the one that appears first in the {\bf help} listing. If you want +the second command, simply spell out the full command. None of the keywords +following the command may be abbreviated. + +For example: + +\footnotesize +\begin{verbatim} +list files jobid=23 +\end{verbatim} +\normalsize + +will list all files saved for JobId 23. Or: + +\footnotesize +\begin{verbatim} +show pools +\end{verbatim} +\normalsize + +will display all the Pool resource records. + +The maximum command line length is limited to 511 characters, so if you +are scripting the console, you may need to take some care to limit the +line length. + +\section{Stopping the Console Program} +\index[general]{Program!Stopping the Console} +\index[general]{Stopping the Console Program} +\index[general]{Program!Stopping the Console} +\index[general]{Stopping the Console Program} + +Normally, you simply enter {\bf quit} or {\bf exit} and the Console program +will terminate. However, it waits until the Director acknowledges the command. +If the Director is already doing a lengthy command (e.g. prune), it may take +some time. If you want to immediately terminate the Console program, enter the +{\bf .quit} command. + +There is currently no way to interrupt a Console command once issued (i.e. +Ctrl-C does not work). However, if you are at a prompt that is asking you to +select one of several possibilities and you would like to abort the command, +you can enter a period ({\bf .}), and in most cases, you will either be +returned to the main command prompt or if appropriate the previous prompt (in +the case of nested prompts). In a few places such as where it is asking for a +Volume name, the period will be taken to be the Volume name. In that case, you +will most likely be able to cancel at the next prompt. + +\label{keywords} +\section{Alphabetic List of Console Keywords} +\index[general]{Keywords!Alphabetic List of Console} +\index[general]{Alphabetic List of Console Keywords} +\index[general]{Keywords!Alphabetic List of Console} +\index[general]{Alphabetic List of Console Keywords} +Unless otherwise specified, each of the following keywords +takes an argument, which is specified after the keyword following +an equal sign. For example: + +\begin{verbatim} +jobid=536 +\end{verbatim} + +Please note, this list is incomplete as it is currently in +the process of being created and is not currently totally in +alphabetic +order ... + +\begin{description} +\item [restart] + Permitted on the python command, and causes the Python + interpreter to be restarted. Takes no argument. +\item [all] + Permitted on the status and show commands to specify all components or + resources respectively. +\item [allfrompool] + Permitted on the update command to specify that all Volumes in the + pool (specified on the command line) should be updated. +\item [allfrompools] + Permitted on the update command to specify that all Volumes in all + pools should be updated. +\item [before] + Used in the restore command. +\item [bootstrap] + Used in the restore command. +\item [catalog] + Allowed in the use command to specify the catalog name + to be used. +\item [catalogs] + Used in the show command. Takes no arguments. +\item [client | fd] +\item [clients] + Used in the show, list, and llist commands. Takes no arguments. +\item [counters] + Used in the show command. Takes no arguments. +\item [current] + Used in the restore command. Takes no argument. +\item [days] + Used to define the number of days the "list nextvol" command + should consider when looking for jobs to be run. The days keyword + can also be used on the "status dir" command so that it will display + jobs scheduled for the number of days you want. +\item [devices] + Used in the show command. Takes no arguments. +\item [dir | director] +\item [directors] + Used in the show command. Takes no arguments. +\item [directory] + Used in the restore command. Its argument specifies the directory + to be restored. +\item [enabled] + This keyword can appear on the {\bf update volume} as well + as the {\bf update slots} commands, and can + allows one of the following arguments: yes, true, no, false, archived, + 0, 1, 2. Where 0 corresponds to no or false, 1 corresponds to yes or true, and + 2 corresponds to archived. Archived volumes will not be used, nor will + the Media record in the catalog be pruned. Volumes that are not enabled, + will not be used for backup or restore. +\item [done] + Used in the restore command. Takes no argument. +\item [file] + Used in the restore command. +\item [files] + Used in the list and llist commands. Takes no arguments. +\item [fileset] +\item [filesets] + Used in the show command. Takes no arguments. +\item [help] + Used in the show command. Takes no arguments. +\item [jobs] + Used in the show, list and llist commands. Takes no arguments. +\item [jobmedia] + Used in the list and llist commands. Takes no arguments. +\item [jobtotals] + Used in the list and llist commands. Takes no arguments. +\item [jobid] + The JobId is the numeric jobid that is printed in the Job + Report output. It is the index of the database record for the + given job. While it is unique for all the existing Job records + in the catalog database, the same JobId can be reused once a + Job is removed from the catalog. Probably you will refer + specific Jobs that ran using their numeric JobId. +\item [job | jobname] + The Job or Jobname keyword refers to the name you specified + in the Job resource, and hence it refers to any number of + Jobs that ran. It is typically useful if you want to list + all jobs of a particular name. +\item [level] +\item [listing] + Permitted on the estimate command. Takes no argument. +\item [limit] +\item [messages] + Used in the show command. Takes no arguments. +\item [media] + Used in the list and llist commands. Takes no arguments. +\item [nextvol | nextvolume] + Used in the list and llist commands. Takes no arguments. +\item [on] + Takes no keyword. +\item [off] + Takes no keyword. +\item [pool] +\item [pools] + Used in the show, list, and llist commands. Takes no arguments. +\item [select] + Used in the restore command. Takes no argument. +\item [storages] + Used in the show command. Takes no arguments. +\item [schedules] + Used in the show command. Takes no arguments. +\item [sd | store | storage] +\item [ujobid] + The ujobid is a unique job identification that is printed + in the Job Report output. At the current time, it consists + of the Job name (from the Name directive for the job) appended + with the date and time the job was run. This keyword is useful + if you want to completely identify the Job instance run. +\item [volume] +\item [volumes] + Used in the list and llist commands. Takes no arguments. +\item [where] + Used in the restore command. +\item [yes] + Used in the restore command. Takes no argument. +\end{description} + +\label{list} +\section{Alphabetic List of Console Commands} +\index[general]{Commands!Alphabetic List of Console} +\index[general]{Alphabetic List of Console Commands} +\index[general]{Commands!Alphabetic List of Console} +\index[general]{Alphabetic List of Console Commands} + +The following commands are currently implemented: + +\begin{description} +\item [{add [pool=\lt{}pool-name\gt{} storage=\lt{}storage\gt{} + jobid=\lt{}JobId\gt{}]} ] + \index[general]{add} + This command is used to add Volumes to an existing Pool. That is, + it creates the Volume name in the catalog and inserts into the Pool + in the catalog, but does not attempt to access the physical Volume. + Once + added, Bacula expects that Volume to exist and to be labeled. + This command is not normally used since Bacula will + automatically do the equivalent when Volumes are labeled. However, + there may be times when you have removed a Volume from the catalog + and want to later add it back. + + Normally, the {\bf label} command is used rather than this command + because the {\bf label} command labels the physical media (tape, disk, + DVD, ...) and does the equivalent of the {\bf add} command. The {\bf + add} command affects only the Catalog and not the physical media (data + on Volumes). The physical media must exist and be labeled before use + (usually with the {\bf label} command). This command can, however, be + useful if you wish to add a number of Volumes to the Pool that will be + physically labeled at a later time. It can also be useful if you are + importing a tape from another site. Please see the {\bf label} command + below for the list of legal characters in a Volume name. + +\item [autodisplay on/off] + \index[general]{autodisplay on/off} + This command accepts {\bf on} or {\bf off} as an argument, and turns + auto-display of messages on or off respectively. The default for the + console program is {\bf off}, which means that you will be notified when + there are console messages pending, but they will not automatically be + displayed. The default for the bgnome-console program is {\bf on}, which + means that messages will be displayed when they are received (usually + within five seconds of them being generated). + + When autodisplay is turned off, you must explicitly retrieve the + messages with the {\bf messages} command. When autodisplay is turned + on, the messages will be displayed on the console as they are received. + +\item [automount on/off] + \index[general]{automount on/off} + This command accepts {\bf on} or {\bf off} as the argument, and turns + auto-mounting of the Volume after a {\bf label} command on or off + respectively. The default is {\bf on}. If {\bf automount} is turned + off, you must explicitly {\bf mount} tape Volumes after a label command to + use it. + +\item [{cancel [jobid=\lt{}number\gt{} job=\lt{}job-name\gt{} ujobid=\lt{}unique-jobid\gt{}]}] + \index[general]{cancel jobid} + This command is used to cancel a job and accepts {\bf jobid=nnn} or {\bf + job=xxx} as an argument where nnn is replaced by the JobId and xxx is + replaced by the job name. If you do not specify a keyword, the Console + program will prompt you with the names of all the active jobs allowing + you to choose one. + + Once a Job is marked to be canceled, it may take a bit of time + (generally within a minute) before it actually terminates, depending on + what operations it is doing. + +\item [{create [pool=\lt{}pool-name\gt{}]}] + \index[general]{create pool} + This command is not normally used as the Pool records are automatically + created by the Director when it starts based on what it finds in + the conf file. If needed, this command can be + to create a Pool record in the database using the + Pool resource record defined in the Director's configuration file. So + in a sense, this command simply transfers the information from the Pool + resource in the configuration file into the Catalog. Normally this + command is done automatically for you when the Director starts providing + the Pool is referenced within a Job resource. If you use this command + on an existing Pool, it will automatically update the Catalog to have + the same information as the Pool resource. After creating a Pool, you + will most likely use the {\bf label} command to label one or more + volumes and add their names to the Media database. + + When starting a Job, if Bacula determines that there is no Pool record + in the database, but there is a Pool resource of the appropriate name, + it will create it for you. If you want the Pool record to appear in the + database immediately, simply use this command to force it to be created. + +\item [{delete [volume=\lt{}vol-name\gt{} pool=\lt{}pool-name\gt{} job + jobid=\lt{}id\gt{}]}] + \index[general]{delete} + The delete command is used to delete a Volume, Pool or Job record from + the Catalog as well as all associated catalog Volume records that were + created. This command operates only on the Catalog database and has no + effect on the actual data written to a Volume. This command can be + dangerous and we strongly recommend that you do not use it unless you + know what you are doing. + + If the keyword {\bf Volume} appears on the command line, the named + Volume will be deleted from the catalog, if the keyword {\bf Pool} + appears on the command line, a Pool will be deleted, and if the keyword + {\bf Job} appears on the command line, a Job and all its associated + records (File and JobMedia) will be deleted from the catalog. The full + form of this command is: + +\begin{verbatim} +delete pool= +\end{verbatim} + + or + +\begin{verbatim} +delete volume=>volume-name> pool=>pool-name> or +\end{verbatim} + +\begin{verbatim} +delete JobId=>job-id> JobId=>job-id2> ... or +\end{verbatim} + +\begin{verbatim} +delete Job JobId=n,m,o-r,t ... +\end{verbatim} + + The first form deletes a Pool record from the catalog database. The + second form deletes a Volume record from the specified pool in the + catalog database. The third form deletes the specified Job record from + the catalog database. The last form deletes JobId records for JobIds + n, m, o, p, q, r, and t. Where each one of the n,m,... is, of course, a + number. That is a "delete jobid" accepts lists and ranges of + jobids. + +\item [disable job\lt{}job-name\gt{}] + \index[general]{disable} + This command permits you to disable a Job for automatic scheduling. + The job may have been previously enabled with the Job resource + {\bf Enabled} directive or using the console {\bf enable} command. + The next time the Director is restarted or the conf file is reloaded, + the Enable/Disable state will be set to the value in the Job resource + (default enabled) as defined in the bacula-dir.conf file. + +\item [enable job\lt{}job-name\gt{}] + \index[general]{enable} + This command permits you to enable a Job for automatic scheduling. + The job may have been previously disabled with the Job resource + {\bf Enabled} directive or using the console {\bf disable} command. + The next time the Director is restarted or the conf file is reloaded, + the Enable/Disable state will be set to the value in the Job resource + (default enabled) as defined in the bacula-dir.conf file. + +\label{estimate} +\item [estimate] + \index[general]{estimate} + Using this command, you can get an idea how many files will be backed + up, or if you are unsure about your Include statements in your FileSet, + you can test them without doing an actual backup. The default is to + assume a Full backup. However, you can override this by specifying a + {\bf level=Incremental} or {\bf level=Differential} on the command line. + A Job name must be specified or you will be prompted for one, and + optionally a Client and FileSet may be specified on the command line. + It then contacts the client which computes the number of files and bytes + that would be backed up. Please note that this is an estimate + calculated from the number of blocks in the file rather than by reading + the actual bytes. As such, the estimated backup size will generally be + larger than an actual backup. + + Optionally you may specify the keyword {\bf listing} in which case, all the + files to be backed up will be listed. Note, it could take quite some time to + display them if the backup is large. The full form is: + + +\begin{verbatim} +estimate job= listing client= + fileset= level= +\end{verbatim} + + Specification of the {\bf job} is sufficient, but you can also override + the client, fileset and/or level by specifying them on the estimate + command line. + + +As an example, you might do: + +\footnotesize +\begin{verbatim} + @output /tmp/listing + estimate job=NightlySave listing level=Incremental + @output +\end{verbatim} +\normalsize + + which will do a full listing of all files to be backed up for the Job {\bf + NightlySave} during an Incremental save and put it in the file {\bf + /tmp/listing}. Note, the byte estimate provided by this command is + based on the file size contained in the directory item. This can give + wildly incorrect estimates of the actual storage used if there are + sparse files on your systems. Sparse files are often found on 64 bit + systems for certain system files. The size that is returned is the size + Bacula will backup if the sparse option is not specified in the FileSet. + There is currently no way to get an estimate of the real file size that + would be found should the sparse option be enabled. + + +\item [help] + \index[general]{help} + This command displays the list of commands available. + +\item [label] + \index[general]{label} + \index[general]{relabel} + \index[general]{label} + \index[general]{relabel} + This command is used to label physical volumes. The full form of this command + is: + +\begin{verbatim} +label storage=>storage-name> volume=>volume-name> + slot=>slot> +\end{verbatim} + + If you leave out any part, you will be prompted for it. The media type + is automatically taken from the Storage resource definition that you + supply. Once the necessary information is obtained, the Console program + contacts the specified Storage daemon and requests that the Volume be + labeled. If the Volume labeling is successful, the Console program will + create a Volume record in the appropriate Pool. + + The Volume name is restricted to letters, numbers, and the special + characters hyphen ({\bf -}), underscore ({\bf \_}), colon ({\bf :}), and + period ({\bf .}). All other characters including a space are invalid. + This restriction is to ensure good readability of Volume names to reduce + operator errors. + + Please note, when labeling a blank tape, Bacula will get {\bf read I/O + error} when it attempts to ensure that the tape is not already labeled. If + you wish to avoid getting these messages, please write an EOF mark on + your tape before attempting to label it: + +\footnotesize +\begin{verbatim} + mt rewind + mt weof + +\end{verbatim} +\normalsize + +The label command can fail for a number of reasons: + +\begin{enumerate} +\item The Volume name you specify is already in the Volume database. + +\item The Storage daemon has a tape or other Volume already mounted on the + device, in which case you must {\bf unmount} the device, insert a blank + tape, then do the {\bf label} command. + +\item The Volume in the device is already a Bacula labeled Volume. (Bacula will + never relabel a Bacula labeled Volume unless it is recycled and you use the + {\bf relabel} command). + +\item There is no Volume in the drive. +\end{enumerate} + +There are two ways to relabel a volume that already has a Bacula label. The +brute force method is to write an end of file mark on the tape using the +system {\bf mt} program, something like the following: + +\footnotesize +\begin{verbatim} + mt -f /dev/st0 rewind + mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +For a disk volume, you would manually delete the Volume. + +Then you use the {\bf label} command to add a new label. However, this could +leave traces of the old volume in the catalog. + +The preferable method to relabel a Volume is to first {\bf purge} the volume, +either automatically, or explicitly with the {\bf purge} command, then use +the {\bf relabel} command described below. + +If your autochanger has barcode labels, you can label all the Volumes in +your autochanger one after another by using the {\bf label barcodes} +command. For each tape in the changer containing a barcode, Bacula will +mount the tape and then label it with the same name as the barcode. An +appropriate Media record will also be created in the catalog. Any barcode +that begins with the same characters as specified on the +"CleaningPrefix=xxx" directive in the Director's Pool resource, will be +treated as a cleaning tape, and will not be labeled. However, an entry for +the cleaning tape will be created in the catalog. For example with: + +\footnotesize +\begin{verbatim} + Pool { + Name ... + Cleaning Prefix = "CLN" + } + +\end{verbatim} +\normalsize + +Any slot containing a barcode of CLNxxxx will be treated as a cleaning tape +and will not be mounted. Note, the full form of the command is: + +\footnotesize +\begin{verbatim} +label storage=xxx pool=yyy slots=1-5,10 barcodes +\end{verbatim} +\normalsize + +\item [list] + \index[general]{list} + The list command lists the requested contents of the Catalog. The + various fields of each record are listed on a single line. The various + forms of the list command are: +\footnotesize +\begin{verbatim} + list jobs + + list jobid= (list jobid id) + + list ujobid (list job with unique name) + + list job= (list all jobs with "job-name") + + list jobname= (same as above) + + In the above, you can add "limit=nn" to limit the output to + nn jobs. + + list jobmedia + + list jobmedia jobid= + + list jobmedia job= + + list files jobid= + + list files job= + + list pools + + list clients + + list jobtotals + + list volumes + + list volumes jobid= + + list volumes pool= + + list volumes job= + + list volume= + + list nextvolume job= + + list nextvol job= + + list nextvol job= days=nnn + +\end{verbatim} +\normalsize + + What most of the above commands do should be more or less obvious. In + general if you do not specify all the command line arguments, the + command will prompt you for what is needed. + + The {\bf list nextvol} command will print the Volume name to be used by + the specified job. You should be aware that exactly what Volume will be + used depends on a lot of factors including the time and what a prior job + will do. It may fill a tape that is not full when you issue this + command. As a consequence, this command will give you a good estimate + of what Volume will be used but not a definitive answer. In addition, + this command may have certain side effect because it runs through the + same algorithm as a job, which means it may automatically purge or + recycle a Volume. By default, the job specified must run within the + next two days or no volume will be found. You can, however, use the + {\bf days=nnn} specification to specify up to 50 days. For example, + if on Friday, you want to see what Volume will be needed on Monday, + for job MyJob, you would use {\bf list nextvol job=MyJob days=3}. + + If you wish to add specialized commands that list the contents of the + catalog, you can do so by adding them to the {\bf query.sql} file. + However, this takes some knowledge of programming SQL. Please see the + {\bf query} command below for additional information. See below for + listing the full contents of a catalog record with the {\bf llist} + command. + + As an example, the command {\bf list pools} might produce the following + output: + +\footnotesize +\begin{verbatim} ++------+---------+---------+---------+----------+-------------+ +| PoId | Name | NumVols | MaxVols | PoolType | LabelFormat | ++------+---------+---------+---------+----------+-------------+ +| 1 | Default | 0 | 0 | Backup | * | +| 2 | Recycle | 0 | 8 | Backup | File | ++------+---------+---------+---------+----------+-------------+ +\end{verbatim} +\normalsize + + As mentioned above, the {\bf list} command lists what is in the + database. Some things are put into the database immediately when Bacula + starts up, but in general, most things are put in only when they are + first used, which is the case for a Client as with Job records, etc. + + Bacula should create a client record in the database the first time you + run a job for that client. Doing a {\bf status} will not cause a + database record to be created. The client database record will be + created whether or not the job fails, but it must at least start. When + the Client is actually contacted, additional info from the client will + be added to the client record (a "uname -a" output). + + If you want to see what Client resources you have available in your conf + file, you use the Console command {\bf show clients}. + +\item [llist] + \index[general]{llist} + The llist or "long list" command takes all the same arguments that the + list command described above does. The difference is that the llist + command list the full contents of each database record selected. It + does so by listing the various fields of the record vertically, with one + field per line. It is possible to produce a very large number of output + lines with this command. + + If instead of the {\bf list pools} as in the example above, you enter + {\bf llist pools} you might get the following output: + +\footnotesize +\begin{verbatim} + PoolId: 1 + Name: Default + NumVols: 0 + MaxVols: 0 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 1,296,000 + VolUseDuration: 86,400 + MaxVolJobs: 0 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: * + + PoolId: 2 + Name: Recycle + NumVols: 0 + MaxVols: 8 + UseOnce: 0 + UseCatalog: 1 + AcceptAnyVolume: 1 + VolRetention: 3,600 + VolUseDuration: 3,600 + MaxVolJobs: 1 + MaxVolBytes: 0 + AutoPrune: 0 + Recycle: 1 + PoolType: Backup + LabelFormat: File + +\end{verbatim} +\normalsize + +\item [messages] + \index[general]{messages} + This command causes any pending console messages to be immediately displayed. + + +\item [mount] + \index[general]{mount} + The mount command is used to get Bacula to read a volume on a physical + device. It is a way to tell Bacula that you have mounted a tape and + that Bacula should examine the tape. This command is normally + used only after there was no Volume in a drive and Bacula requests you to mount a new + Volume or when you have specifically unmounted a Volume with the {\bf + unmount} console command, which causes Bacula to close the drive. If + you have an autoloader, the mount command will not cause Bacula to + operate the autoloader unless you specify a {\bf slot} and possibly a + {\bf drive}. The various forms of the mount command are: + +mount storage=\lt{}storage-name\gt{} [ slot=\lt{}num\gt{} ] [ + drive=\lt{}num\gt{} ] + +mount [ jobid=\lt{}id\gt{} | job=\lt{}job-name\gt{} ] + + If you have specified {\bf Automatic Mount = yes} in the Storage daemon's + Device resource, under most circumstances, Bacula will automatically access + the Volume unless you have explicitly {\bf unmount}ed it in the Console + program. + +\item[python] + \index[general]{python} + The python command takes a single argument {\bf restart}: + +python restart + + This causes the Python interpreter in the Director to be reinitialized. + This can be helpful for testing because once the Director starts and the + Python interpreter is initialized, there is no other way to make it + accept any changes to the startup script {\bf DirStartUp.py}. For more + details on Python scripting, please see the \ilink{Python + Scripting}{PythonChapter} chapter of this manual. + +\label{ManualPruning} +\item [prune] + \index[general]{prune} + The Prune command allows you to safely remove expired database records + from Jobs and Volumes. This command works only on the Catalog database + and does not affect data written to Volumes. In all cases, the Prune + command applies a retention period to the specified records. You can + Prune expired File entries from Job records; you can Prune expired Job + records from the database, and you can Prune both expired Job and File + records from specified Volumes. + +prune files|jobs|volume client=\lt{}client-name\gt{} +volume=\lt{}volume-name\gt{} + + For a Volume to be pruned, the {\bf VolStatus} must be Full, Used, or + Append, otherwise the pruning will not take place. + +\item [purge] + \index[general]{purge} + The Purge command will delete associated Catalog database records from + Jobs and Volumes without considering the retention period. {\bf Purge} + works only on the Catalog database and does not affect data written to + Volumes. This command can be dangerous because you can delete catalog + records associated with current backups of files, and we recommend that + you do not use it unless you know what you are doing. The permitted + forms of {\bf purge} are: + +purge files jobid=\lt{}jobid\gt{}|job=\lt{}job-name\gt{}|client=\lt{}client-name\gt{} + +purge jobs client=\lt{}client-name\gt{} (of all jobs) + +purge volume|volume=\lt{}vol-name\gt{} (of all jobs) + +For the {\bf purge} command to work on Volume Catalog database records the +{\bf VolStatus} must be Append, Full, Used, or Error. + +The actual data written to the Volume will be unaffected by this command. + +\item [relabel] + \index[general]{relabel} + \index[general]{relabel} + This command is used to label physical volumes. The full form of this + command is: + +relabel storage=\lt{}storage-name\gt{} oldvolume=\lt{}old-volume-name\gt{} + volume=\lt{}newvolume-name\gt{} + + If you leave out any part, you will be prompted for it. In order for + the Volume (old-volume-name) to be relabeled, it must be in the catalog, + and the volume status must be marked {\bf Purged} or {\bf Recycle}. + This happens automatically as a result of applying retention periods, or + you may explicitly purge the volume using the {\bf purge} command. + + Once the volume is physically relabeled, the old data previously written + on the Volume is lost and cannot be recovered. + +\item [release] + \index[general]{release} + This command is used to cause the Storage daemon to rewind (release) the + current tape in the drive, and to re-read the Volume label the next time + the tape is used. + +release storage=\lt{}storage-name\gt{} + + After a release command, the device is still kept open by Bacula (unless + Always Open is set to No in the Storage Daemon's configuration) so it + cannot be used by another program. However, with some tape drives, the + operator can remove the current tape and to insert a different one, and + when the next Job starts, Bacula will know to re-read the tape label to + find out what tape is mounted. If you want to be able to use the drive + with another program (e.g. {\bf mt}), you must use the {\bf unmount} + command to cause Bacula to completely release (close) the device. + +\item [reload] + \index[general]{reload} + The reload command causes the Director to re-read its configuration + file and apply the new values. The new values will take effect + immediately for all new jobs. However, if you change schedules, + be aware that the scheduler pre-schedules jobs up to two hours in + advance, so any changes that are to take place during the next two + hours may be delayed. Jobs that have already been scheduled to run + (i.e. surpassed their requested start time) will continue with the + old values. New jobs will use the new values. Each time you issue + a reload command while jobs are running, the prior config values + will queued until all jobs that were running before issuing + the reload terminate, at which time the old config values will + be released from memory. The Directory permits keeping up to + ten prior set of configurations before it will refuse a reload + command. Once at least one old set of config values has been + released it will again accept new reload commands. + + While it is possible to reload the Director's configuration on the fly, + even while jobs are executing, this is a complex operation and not + without side effects. Accordingly, if you have to reload the Director's + configuration while Bacula is running, it is advisable to restart the + Director at the next convenient opportunity. + +\label{restore_command} +\item [restore] + \index[general]{restore} + The restore command allows you to select one or more Jobs (JobIds) to be + restored using various methods. Once the JobIds are selected, the File + records for those Jobs are placed in an internal Bacula directory tree, + and the restore enters a file selection mode that allows you to + interactively walk up and down the file tree selecting individual files + to be restored. This mode is somewhat similar to the standard Unix {\bf + restore} program's interactive file selection mode. + +restore storage=\lt{}storage-name\gt{} client=\lt{}backup-client-name\gt{} + where=\lt{}path\gt{} pool=\lt{}pool-name\gt{} fileset=\lt{}fileset-name\gt{} + restoreclient=\lt{}restore-client-name\gt{} + select current all done + + Where {\bf current}, if specified, tells the restore command to + automatically select a restore to the most current backup. If not + specified, you will be prompted. The {\bf all} specification tells the + restore command to restore all files. If it is not specified, you will + be prompted for the files to restore. For details of the {\bf restore} + command, please see the \ilink{Restore Chapter}{RestoreChapter} of this + manual. + + The client keyword initially specifies the client from which the backup + was made and the client to which the restore will be make. However, + if the restoreclient keyword is specified, then the restore is written + to that client. + +\item [run] + \index[general]{run} + This command allows you to schedule jobs to be run immediately. The full form + of the command is: + +run job=\lt{}job-name\gt{} client=\lt{}client-name\gt{} + fileset=\lt{}FileSet-name\gt{} level=\lt{}level-keyword\gt{} + storage=\lt{}storage-name\gt{} where=\lt{}directory-prefix\gt{} + when=\lt{}universal-time-specification\gt{} yes + + Any information that is needed but not specified will be listed for + selection, and before starting the job, you will be prompted to accept, + reject, or modify the parameters of the job to be run, unless you have + specified {\bf yes}, in which case the job will be immediately sent to + the scheduler. + + On my system, when I enter a run command, I get the following prompt: + +\footnotesize +\begin{verbatim} +A job name must be specified. +The defined Job resources are: + 1: Matou + 2: Polymatou + 3: Rufus + 4: Minimatou + 5: Minou + 6: PmatouVerify + 7: MatouVerify + 8: RufusVerify + 9: Watchdog +Select Job resource (1-9): + +\end{verbatim} +\normalsize + +If I then select number 5, I am prompted with: + +\footnotesize +\begin{verbatim} +Run Backup job +JobName: Minou +FileSet: Minou Full Set +Level: Incremental +Client: Minou +Storage: DLTDrive +Pool: Default +When: 2003-04-23 17:08:18 +OK to run? (yes/mod/no): + +\end{verbatim} +\normalsize + +If I now enter {\bf yes}, the Job will be run. If I enter {\bf mod}, I will +be presented with the following prompt. + +\footnotesize +\begin{verbatim} +Parameters to modify: + 1: Level + 2: Storage + 3: Job + 4: FileSet + 5: Client + 6: When + 7: Pool +Select parameter to modify (1-7): + +\end{verbatim} +\normalsize + +If you wish to start a job at a later time, you can do so by setting the When +time. Use the {\bf mod} option and select {\bf When} (no. 6). Then enter the +desired start time in YYYY-MM-DD HH:MM:SS format. + +\item [setdebug] + \index[general]{setdebug} + \index[general]{setdebug} + \index[general]{debugging} + \index[general]{debugging Win32} + \index[general]{Windows!debugging} + This command is used to set the debug level in each daemon. The form of this + command is: + +setdebug level=nn [trace=0/1 client=\lt{}client-name\gt{} | dir | director | + storage=\lt{}storage-name\gt{} | all] + + If trace=1 is set, then tracing will be enabled, and the daemon will be + placed in trace mode, which means that all debug output as set by the + debug level will be directed to the file {\bf bacula.trace} in the + current directory of the daemon. Normally, tracing is needed only for + Win32 clients where the debug output cannot be written to a terminal or + redirected to a file. When tracing, each debug output message is + appended to the trace file. You must explicitly delete the file when + you are done. + +\item [show] + \index[general]{show} + \index[general]{show} + The show command will list the Director's resource records as defined in + the Director's configuration file (normally {\bf bacula-dir.conf}). + This command is used mainly for debugging purposes by developers. + The following keywords are accepted on the + show command line: catalogs, clients, counters, devices, directors, + filesets, jobs, messages, pools, schedules, storages, all, help. + Please don't confuse this command + with the {\bf list}, which displays the contents of the catalog. + +\item [sqlquery] + \index[general]{sqlquery} + The sqlquery command puts the Console program into SQL query mode where + each line you enter is concatenated to the previous line until a + semicolon (;) is seen. The semicolon terminates the command, which is + then passed directly to the SQL database engine. When the output from + the SQL engine is displayed, the formation of a new SQL command begins. + To terminate SQL query mode and return to the Console command prompt, + you enter a period (.) in column 1. + + Using this command, you can query the SQL catalog database directly. + Note you should really know what you are doing otherwise you could + damage the catalog database. See the {\bf query} command below for + simpler and safer way of entering SQL queries. + + Depending on what database engine you are using (MySQL, PostgreSQL or + SQLite), you will have somewhat different SQL commands available. For + more detailed information, please refer to the MySQL, PostgreSQL or + SQLite documentation. + +\item [status] + \index[general]{status} + This command will display the status of the next jobs that are scheduled + during the next 24 hours as well as the status of currently + running jobs. The full form of this command is: + +status [all | dir=\lt{}dir-name\gt{} | director | + client=\lt{}client-name\gt{} | storage=\lt{}storage-name\gt{} | + days=nnn] + + If you do a {\bf status dir}, the console will list any currently + running jobs, a summary of all jobs scheduled to be run in the next 24 + hours, and a listing of the last ten terminated jobs with their statuses. + The scheduled jobs summary will include the Volume name to be used. You + should be aware of two things: 1. to obtain the volume name, the code + goes through the same code that will be used when the job runs, but it + does not do pruning nor recycling of Volumes; 2. The Volume listed is + at best a guess. The Volume actually used may be different because of + the time difference (more durations may expire when the job runs) and + another job could completely fill the Volume requiring a new one. + + In the Running Jobs listing, you may find the following types of + information: + + +\footnotesize +\begin{verbatim} +2507 Catalog MatouVerify.2004-03-13_05.05.02 is waiting execution +5349 Full CatalogBackup.2004-03-13_01.10.00 is waiting for higher + priority jobs to finish +5348 Differe Minou.2004-03-13_01.05.09 is waiting on max Storage jobs +5343 Full Rufus.2004-03-13_01.05.04 is running +\end{verbatim} +\normalsize + + Looking at the above listing from bottom to top, obviously JobId 5343 + (Rufus) is running. JobId 5348 (Minou) is waiting for JobId 5343 to + finish because it is using the Storage resource, hence the "waiting on + max Storage jobs". JobId 5349 has a lower priority than all the other + jobs so it is waiting for higher priority jobs to finish, and finally, + JobId 2508 (MatouVerify) is waiting because only one job can run at a + time, hence it is simply "waiting execution" + + If you do a {\bf status dir}, it will by default list the first + occurrence of all jobs that are scheduled today and tomorrow. If you + wish to see the jobs that are scheduled in the next three days (e.g. on + Friday you want to see the first occurrence of what tapes are scheduled + to be used on Friday, the weekend, and Monday), you can add the {\bf + days=3} option. Note, a {\bf days=0} shows the first occurrence of jobs + scheduled today only. If you have multiple run statements, the first + occurrence of each run statement for the job will be displayed for the + period specified. + + If your job seems to be blocked, you can get a general idea of the + problem by doing a {\bf status dir}, but you can most often get a + much more specific indication of the problem by doing a + {\bf status storage=xxx}. For example, on an idle test system, when + I do {\bf status storage=File}, I get: +\footnotesize +\begin{verbatim} +status storage=File +Connecting to Storage daemon File at 192.168.68.112:8103 + +rufus-sd Version: 1.39.6 (24 March 2006) i686-pc-linux-gnu redhat (Stentz) +Daemon started 26-Mar-06 11:06, 0 Jobs run since started. + +Running Jobs: +No Jobs running. +==== + +Jobs waiting to reserve a drive: +==== + +Terminated Jobs: + JobId Level Files Bytes Status Finished Name +====================================================================== + 59 Full 234 4,417,599 OK 15-Jan-06 11:54 kernsave +==== + +Device status: +utochanger "DDS-4-changer" with devices: + "DDS-4" (/dev/nst0) +Device "DDS-4" (/dev/nst0) is mounted with Volume="TestVolume002" +Pool="*unknown*" + Slot 2 is loaded in drive 0. + Total Bytes Read=0 Blocks Read=0 Bytes/block=0 + Positioned at File=0 Block=0 +Device "Dummy" is not open or does not exist. +No DEVICE structure. + +Device "DVD-Writer" (/dev/hdc) is not open. +Device "File" (/tmp) is not open. +==== + +In Use Volume status: +==== +\end{verbatim} +\normalsize + +Now, what this tells me is that no jobs are running and that none of +the devices are in use. Now, if I {\bf unmount} the autochanger, which +will not be used in this example, and then start a Job that uses the +File device, the job will block. When I re-issue the status storage +command, I get for the Device status: + +\footnotesize +\begin{verbatim} +status storage=File +... +Device status: +Autochanger "DDS-4-changer" with devices: + "DDS-4" (/dev/nst0) +Device "DDS-4" (/dev/nst0) is not open. + Device is BLOCKED. User unmounted. + Drive 0 is not loaded. +Device "Dummy" is not open or does not exist. +No DEVICE structure. + +Device "DVD-Writer" (/dev/hdc) is not open. +Device "File" (/tmp) is not open. + Device is BLOCKED waiting for media. +==== +... +\end{verbatim} +\normalsize + +Now, here it should be clear that if a job were running that wanted +to use the Autochanger (with two devices), it would block because +the user unmounted the device. The real problem for the Job I started +using the "File" device is that the device is blocked waiting for +media -- that is Bacula needs you to label a Volume. + +\item [unmount] + \index[general]{unmount} + This command causes the indicated Bacula Storage daemon to unmount the + specified device. The forms of the command are the same as the mount command: +\footnotesize +\begin{verbatim} +unmount storage= [ drive= ] + +unmount [ jobid= | job= ] +\end{verbatim} +\normalsize + + Once you unmount a storage device, Bacula will no longer be able to use + it until you issue a mount command for that device. If Bacula needs to + access that device, it will block and issue mount requests periodically + to the operator. + + If the device you are unmounting is an autochanger, it will unload + the drive you have specified on the command line. If no drive is + specified, it will assume drive 1. + +\label{UpdateCommand} +\item [update] + \index[general]{update} + This command will update the catalog for either a specific Pool record, a Volume + record, or the Slots in an autochanger with barcode capability. In the case + of updating a Pool record, the new information will be automatically taken + from the corresponding Director's configuration resource record. It can be + used to increase the maximum number of volumes permitted or to set a maximum + number of volumes. The following main keywords may be specified: +\footnotesize +\begin{verbatim} + media, volume, pool, slots +\end{verbatim} +\normalsize + +In the case of updating a Volume, you will be prompted for which value you +wish to change. The following Volume parameters may be changed: + +\footnotesize +\begin{verbatim} + + Volume Status + Volume Retention Period + Volume Use Duration + Maximum Volume Jobs + Maximum Volume Files + Maximum Volume Bytes + Recycle Flag + Recycle Pool + Slot + InChanger Flag + Pool + Volume Files + Volume from Pool + All Volumes from Pool + All Volumes from all Pools + +\end{verbatim} +\normalsize + + For slots {\bf update slots}, Bacula will obtain a list of slots and + their barcodes from the Storage daemon, and for each barcode found, it + will automatically update the slot in the catalog Media record to + correspond to the new value. This is very useful if you have moved + cassettes in the magazine, or if you have removed the magazine and + inserted a different one. As the slot of each Volume is updated, the + InChanger flag for that Volume will also be set, and any other Volumes + in the Pool that were last mounted on the same Storage device + will have their InChanger flag turned off. This permits + Bacula to know what magazine (tape holder) is currently in the + autochanger. + + If you do not have barcodes, you can accomplish the same thing in + version 1.33 and later by using the {\bf update slots scan} command. + The {\bf scan} keyword tells Bacula to physically mount each tape and to + read its VolumeName. + + For Pool {\bf update pool}, Bacula will move the Volume record from its + existing pool to the pool specified. + + For {\bf Volume from Pool}, {\bf All Volumes from Pool} and {\bf All Volumes + from all Pools}, the following values are updated from the Pool record: + Recycle, RecyclePool, VolRetention, VolUseDuration, MaxVolJobs, MaxVolFiles, + and MaxVolBytes. (RecyclePool feature is available with bacula 2.1.4 or + higher.) + + The full form of the update command with all command line arguments is: + +\footnotesize +\begin{verbatim} + update volume=xxx pool=yyy slots volstatus=xxx VolRetention=ddd + VolUse=ddd MaxVolJobs=nnn MaxVolBytes=nnn Recycle=yes|no + slot=nnn enabled=n recyclepool=zzz + +\end{verbatim} +\normalsize + +\item [use] + \index[general]{use} + This command allows you to specify which Catalog database to use. Normally, +you will be using only one database so this will be done automatically. In +the case that you are using more than one database, you can use this command +to switch from one to another. + +use \lt{}database-name\gt{} + +\item [var] + \label{var} + \index[general]{var name} + This command takes a string or quoted string and does variable expansion on + it the same way variable expansion is done on the {\bf LabelFormat} string. + Thus, for the most part, you can test your LabelFormat strings. The + difference between the {\bf var} command and the actual LabelFormat process + is that during the var command, no job is running so "dummy" values are + used in place of Job specific variables. Generally, however, you will get a + good idea of what is going to happen in the real case. + +\item [version] + \index[general]{version} + The command prints the Director's version. + +\item [quit] + \index[general]{quit} + This command terminates the console program. The console program sends the + {\bf quit} request to the Director and waits for acknowledgment. If the + Director is busy doing a previous command for you that has not terminated, it + may take some time. You may quit immediately by issuing the {\bf .quit} + command (i.e. quit preceded by a period). + +\item [query] + \index[general]{query} + This command reads a predefined SQL query from the query file (the name and + location of the query file is defined with the QueryFile resource record in + the Director's configuration file). You are prompted to select a query from + the file, and possibly enter one or more parameters, then the command is + submitted to the Catalog database SQL engine. + +The following queries are currently available (version 1.24): + +\footnotesize +\begin{verbatim} +Available queries: + 1: List Job totals: + 2: List where a file is saved: + 3: List where the most recent copies of a file are saved: + 4: List total files/bytes by Job: + 5: List total files/bytes by Volume: + 6: List last 20 Full Backups for a Client: + 7: List Volumes used by selected JobId: + 8: List Volumes to Restore All Files: + 9: List where a File is saved: +Choose a query (1-9): + +\end{verbatim} +\normalsize + +\item [exit] + \index[general]{exit} + This command terminates the console program. + +\item [wait] + \index[general]{wait} + The wait command causes the Director to pause until there are no jobs + running. This command is useful in a batch situation such as regression + testing where you wish to start a job and wait until that job completes + before continuing. This command now has the following options: +\footnotesize +\begin{verbatim} + wait [jobid=nn] [jobuid=unique id] [job=job name] +\end{verbatim} +\normalsize + If specified with a specific JobId, ... the wait command will wait + for that particular job to terminate before continuing. + +\end{description} + +\label{dotcommands} +\section{Special dot Commands} +\index[general]{Commands!Special dot} +\index[general]{Special dot Commands} + +There is a list of commands that are prefixed with a period (.). These +commands are intended to be used either by batch programs or graphical user +interface front-ends. They are not normally used by interactive users. Once +GUI development begins, this list will be considerably expanded. The following +is the list of dot commands: + +\footnotesize +\begin{verbatim} +.backups job=xxx list backups for specified job +.clients list all client names +.defaults client=xxx fileset=yyy list defaults for specified client +.die cause the Director to segment fault (for debugging) +.dir when in tree mode prints the equivalent to the dir command, + but with fields separated by commas rather than spaces. +.exit quit +.filesets list all fileset names +.help help command output +.jobs list all job names +.levels list all levels +.messages get quick messages +.msgs return any queued messages +.pools list all pool names +.quit quit +.status get status output +.storage return storage resource names +.types list job types +\end{verbatim} +\normalsize + +\label{atcommands} + +\section{Special At (@) Commands} +\index[general]{Commands!Special At @} +\index[general]{Special At (@) Commands} + +Normally, all commands entered to the Console program are immediately +forwarded to the Director, which may be on another machine, to be executed. +However, there is a small list of {\bf at} commands, all beginning with an at +character (@), that will not be sent to the Director, but rather interpreted +by the Console program directly. Note, these commands are implemented only in +the tty console program and not in the GNOME Console. These commands are: + +\begin{description} + +\item [@input \lt{}filename\gt{}] + \index[general]{@input \lt{}filename\gt{}} + Read and execute the commands contained in the file specified. + +\item [@output \lt{}filename\gt{} w/a] + \index[general]{@output \lt{}filename\gt{} w/a} + Send all following output to the filename specified either overwriting the +file (w) or appending to the file (a). To redirect the output to the +terminal, simply enter {\bf @output} without a filename specification. +WARNING: be careful not to overwrite a valid file. A typical example during a +regression test might be: + +\footnotesize +\begin{verbatim} + @output /dev/null + commands ... + @output + +\end{verbatim} +\normalsize + +\item [@tee \lt{}filename\gt{} w/a] + \index[general]{@tee \lt{}filename\gt{} w/a} + Send all subsequent output to both the specified file and the terminal. It is + turned off by specifying {\bf @tee} or {\bf @output} without a filename. + +\item [@sleep \lt{}seconds\gt{}] + \index[general]{@sleep \lt{}seconds\gt{}} + Sleep the specified number of seconds. + +\item [@time] + \index[general]{@time} + Print the current time and date. + +\item [@version] + \index[general]{@version} + Print the console's version. + +\item [@quit] + \index[general]{@quit} + quit + +\item [@exit] + \index[general]{@exit} + quit + +\item [@\# anything] + \index[general]{anything} + Comment +\end{description} + +\label{scripting} + +\section{Running the Console from a Shell Script} +\index[general]{Script!Running the Console Program from a Shell} +\index[general]{Running the Console Program from a Shell Script} + +You can automate many Console tasks by running the console program from a +shell script. For example, if you have created a file containing the following +commands: + +\footnotesize +\begin{verbatim} + ./bconsole -c ./bconsole.conf <) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/console/console.css b/docs/manuals/en/console/console.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/console/console.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/console/console.tex b/docs/manuals/en/console/console.tex new file mode 100644 index 00000000..69ce36a3 --- /dev/null +++ b/docs/manuals/en/console/console.tex @@ -0,0 +1,78 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Console and Operators Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{bconsole} +\include{gui} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/en/console/do_echo b/docs/manuals/en/console/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/en/console/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/en/console/fdl.tex b/docs/manuals/en/console/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/en/console/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/console/fix_tex.pl b/docs/manuals/en/console/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/console/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/console/index.perl b/docs/manuals/en/console/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/console/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/console/latex2html-init.pl b/docs/manuals/en/console/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/console/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/console/setup.sm b/docs/manuals/en/console/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/console/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/console/translate_images.pl b/docs/manuals/en/console/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/console/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/console/update_version b/docs/manuals/en/console/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/console/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/console/update_version.in b/docs/manuals/en/console/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/console/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/console/version.tex.in b/docs/manuals/en/console/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/console/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/developers/Makefile.in b/docs/manuals/en/developers/Makefile.in new file mode 100644 index 00000000..eb2c5f0f --- /dev/null +++ b/docs/manuals/en/developers/Makefile.in @@ -0,0 +1,106 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# + +IMAGES=../../../images + +DOC=developers + +first_rule: all + +all: tex web html dvipdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + touch ${DOC}.idx ${DOC}i-general.tex + -latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode ${DOC}.tex + @rm -f *.eps *.old + +pdf: + @echo "Making ${DOC} pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making ${DOC} pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + @rm -f *.eps *.old + +html: + @echo "Making ${DOC} html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @touch ${DOC}.html + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}.html + @rm -f *.eps *.gif *.jpg *.old + +web: + @echo "Making ${DOC} web" + @mkdir -p ${DOC} + @rm -f ${DOC}/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @(if [ -f ${DOC}/imagename_translations ] ; then \ + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html; \ + fi) + @rm -rf ${DOC}/*.html + latex2html -split 4 -local_icons -t "Developer's Guide" -long_titles 4 \ + -contents_in_nav -toc_stars -white -notransparent ${DOC} >/dev/null + ./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html + @cp -f ${DOC}/${DOC}_Guide.html ${DOC}/index.html + @rm -f *.eps *.gif *.jpg ${DOC}/*.eps *.old + @rm -f ${DOC}/idle.png + @rm -f ${DOC}/win32-*.png ${DOC}/wx-console*.png ${DOC}/xp-*.png + @rm -f ${DOC}/*.pl ${DOC}/*.log ${DOC}/*.aux ${DOC}/*.idx + @rm -f ${DOC}/*.out WARNINGS + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf ${DOC} + @rm -f images.tex ${DOC}i.tex + @rm -f ${DOC}i-*.tex + + +distclean: clean + @rm -f ${DOC}.html ${DOC}.pdf + @rm -f Makefile version.tex diff --git a/docs/manuals/en/developers/catalog.tex b/docs/manuals/en/developers/catalog.tex new file mode 100644 index 00000000..f67866b5 --- /dev/null +++ b/docs/manuals/en/developers/catalog.tex @@ -0,0 +1,939 @@ +%% +%% + +\chapter{Catalog Services} +\label{_ChapterStart30} +\index[general]{Services!Catalog } +\index[general]{Catalog Services } + +\section{General} +\index[general]{General } +\addcontentsline{toc}{subsection}{General} + +This chapter is intended to be a technical discussion of the Catalog services +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +The {\bf Bacula Catalog} services consist of the programs that provide the SQL +database engine for storage and retrieval of all information concerning files +that were backed up and their locations on the storage media. + +We have investigated the possibility of using the following SQL engines for +Bacula: Beagle, mSQL, GNU SQL, PostgreSQL, SQLite, Oracle, and MySQL. Each +presents certain problems with either licensing or maturity. At present, we +have chosen for development purposes to use MySQL, PostgreSQL and SQLite. +MySQL was chosen because it is fast, proven to be reliable, widely used, and +actively being developed. MySQL is released under the GNU GPL license. +PostgreSQL was chosen because it is a full-featured, very mature database, and +because Dan Langille did the Bacula driver for it. PostgreSQL is distributed +under the BSD license. SQLite was chosen because it is small, efficient, and +can be directly embedded in {\bf Bacula} thus requiring much less effort from +the system administrator or person building {\bf Bacula}. In our testing +SQLite has performed very well, and for the functions that we use, it has +never encountered any errors except that it does not appear to handle +databases larger than 2GBytes. That said, we would not recommend it for +serious production use. + +The Bacula SQL code has been written in a manner that will allow it to be +easily modified to support any of the current SQL database systems on the +market (for example: mSQL, iODBC, unixODBC, Solid, OpenLink ODBC, EasySoft +ODBC, InterBase, Oracle8, Oracle7, and DB2). + +If you do not specify either {\bf \verb{--{with-mysql} or {\bf \verb{--{with-postgresql} or +{\bf \verb{--{with-sqlite} on the ./configure line, Bacula will use its minimalist +internal database. This database is kept for build reasons but is no longer +supported. Bacula {\bf requires} one of the three databases (MySQL, +PostgreSQL, or SQLite) to run. + +\subsection{Filenames and Maximum Filename Length} +\index[general]{Filenames and Maximum Filename Length } +\index[general]{Length!Filenames and Maximum Filename } +\addcontentsline{toc}{subsubsection}{Filenames and Maximum Filename Length} + +In general, either MySQL, PostgreSQL or SQLite permit storing arbitrary long +path names and file names in the catalog database. In practice, there still +may be one or two places in the Catalog interface code that restrict the +maximum path length to 512 characters and the maximum file name length to 512 +characters. These restrictions are believed to have been removed. Please note, +these restrictions apply only to the Catalog database and thus to your ability +to list online the files saved during any job. All information received and +stored by the Storage daemon (normally on tape) allows and handles arbitrarily +long path and filenames. + +\subsection{Installing and Configuring MySQL} +\index[general]{MySQL!Installing and Configuring } +\index[general]{Installing and Configuring MySQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL} + +For the details of installing and configuring MySQL, please see the +\ilink{Installing and Configuring MySQL}{_ChapterStart} chapter of +this manual. + +\subsection{Installing and Configuring PostgreSQL} +\index[general]{PostgreSQL!Installing and Configuring } +\index[general]{Installing and Configuring PostgreSQL } +\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL} + +For the details of installing and configuring PostgreSQL, please see the +\ilink{Installing and Configuring PostgreSQL}{_ChapterStart10} +chapter of this manual. + +\subsection{Installing and Configuring SQLite} +\index[general]{Installing and Configuring SQLite } +\index[general]{SQLite!Installing and Configuring } +\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite} + +For the details of installing and configuring SQLite, please see the +\ilink{Installing and Configuring SQLite}{_ChapterStart33} chapter of +this manual. + +\subsection{Internal Bacula Catalog} +\index[general]{Catalog!Internal Bacula } +\index[general]{Internal Bacula Catalog } +\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog} + +Please see the +\ilink{Internal Bacula Database}{_ChapterStart42} chapter of this +manual for more details. + +\subsection{Database Table Design} +\index[general]{Design!Database Table } +\index[general]{Database Table Design } +\addcontentsline{toc}{subsubsection}{Database Table Design} + +All discussions that follow pertain to the MySQL database. The details for the +PostgreSQL and SQLite databases are essentially identical except for that all +fields in the SQLite database are stored as ASCII text and some of the +database creation statements are a bit different. The details of the internal +Bacula catalog are not discussed here. + +Because the Catalog database may contain very large amounts of data for large +sites, we have made a modest attempt to normalize the data tables to reduce +redundant information. While reducing the size of the database significantly, +it does, unfortunately, add some complications to the structures. + +In simple terms, the Catalog database must contain a record of all Jobs run by +Bacula, and for each Job, it must maintain a list of all files saved, with +their File Attributes (permissions, create date, ...), and the location and +Media on which the file is stored. This is seemingly a simple task, but it +represents a huge amount interlinked data. Note: the list of files and their +attributes is not maintained when using the internal Bacula database. The data +stored in the File records, which allows the user or administrator to obtain a +list of all files backed up during a job, is by far the largest volume of +information put into the Catalog database. + +Although the Catalog database has been designed to handle backup data for +multiple clients, some users may want to maintain multiple databases, one for +each machine to be backed up. This reduces the risk of confusion of accidental +restoring a file to the wrong machine as well as reducing the amount of data +in a single database, thus increasing efficiency and reducing the impact of a +lost or damaged database. + +\section{Sequence of Creation of Records for a Save Job} +\index[general]{Sequence of Creation of Records for a Save Job } +\index[general]{Job!Sequence of Creation of Records for a Save } +\addcontentsline{toc}{subsection}{Sequence of Creation of Records for a Save +Job} + +Start with StartDate, ClientName, Filename, Path, Attributes, MediaName, +MediaCoordinates. (PartNumber, NumParts). In the steps below, ``Create new'' +means to create a new record whether or not it is unique. ``Create unique'' +means each record in the database should be unique. Thus, one must first +search to see if the record exists, and only if not should a new one be +created, otherwise the existing RecordId should be used. + +\begin{enumerate} +\item Create new Job record with StartDate; save JobId +\item Create unique Media record; save MediaId +\item Create unique Client record; save ClientId +\item Create unique Filename record; save FilenameId +\item Create unique Path record; save PathId +\item Create unique Attribute record; save AttributeId + store ClientId, FilenameId, PathId, and Attributes +\item Create new File record + store JobId, AttributeId, MediaCoordinates, etc +\item Repeat steps 4 through 8 for each file +\item Create a JobMedia record; save MediaId +\item Update Job record filling in EndDate and other Job statistics + \end{enumerate} + +\section{Database Tables} +\index[general]{Database Tables } +\index[general]{Tables!Database } +\addcontentsline{toc}{subsection}{Database Tables} + +\addcontentsline{lot}{table}{Filename Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Filename } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{l| }{\bf Data Type } +& \multicolumn{1}{l| }{\bf Remark } \\ + \hline +{FilenameId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Blob } & {Filename } +\\ \hline + +\end{longtable} + +The {\bf Filename} table shown above contains the name of each file backed up +with the path removed. If different directories or machines contain the same +filename, only one copy will be saved in this table. + +\ + +\addcontentsline{lot}{table}{Path Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Path } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PathId } & {integer } & {Primary Key } \\ + \hline +{Path } & {Blob } & {Full Path } +\\ \hline + +\end{longtable} + +The {\bf Path} table contains shown above the path or directory names of all +directories on the system or systems. The filename and any MSDOS disk name are +stripped off. As with the filename, only one copy of each directory name is +kept regardless of how many machines or drives have the same directory. These +path names should be stored in Unix path name format. + +Some simple testing on a Linux file system indicates that separating the +filename and the path may be more complication than is warranted by the space +savings. For example, this system has a total of 89,097 files, 60,467 of which +have unique filenames, and there are 4,374 unique paths. + +Finding all those files and doing two stats() per file takes an average wall +clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux. + +Finding all those files and putting them directly into a MySQL database with +the path and filename defined as TEXT, which is variable length up to 65,535 +characters takes 19 mins 31 seconds and creates a 27.6 MByte database. + +Doing the same thing, but inserting them into Blob fields with the filename +indexed on the first 30 characters and the path name indexed on the 255 (max) +characters takes 5 mins 18 seconds and creates a 5.24 MB database. Rerunning +the job (with the database already created) takes about 2 mins 50 seconds. + +Running the same as the last one (Path and Filename Blob), but Filename +indexed on the first 30 characters and the Path on the first 50 characters +(linear search done there after) takes 5 mins on the average and creates a 3.4 +MB database. Rerunning with the data already in the DB takes 3 mins 35 +seconds. + +Finally, saving only the full path name rather than splitting the path and the +file, and indexing it on the first 50 characters takes 6 mins 43 seconds and +creates a 7.35 MB database. + +\ + +\addcontentsline{lot}{table}{File Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf File } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileId } & {integer } & {Primary Key } \\ + \hline +{FileIndex } & {integer } & {The sequential file number in the Job } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{PathId } & {integer } & {Link to Path Record } \\ + \hline +{FilenameId } & {integer } & {Link to Filename Record } \\ + \hline +{MarkId } & {integer } & {Used to mark files during Verify Jobs } \\ + \hline +{LStat } & {tinyblob } & {File attributes in base64 encoding } \\ + \hline +{MD5 } & {tinyblob } & {MD5 signature in base64 encoding } +\\ \hline + +\end{longtable} + +The {\bf File} table shown above contains one entry for each file backed up by +Bacula. Thus a file that is backed up multiple times (as is normal) will have +multiple entries in the File table. This will probably be the table with the +most number of records. Consequently, it is essential to keep the size of this +record to an absolute minimum. At the same time, this table must contain all +the information (or pointers to the information) about the file and where it +is backed up. Since a file may be backed up many times without having changed, +the path and filename are stored in separate tables. + +This table contains by far the largest amount of information in the Catalog +database, both from the stand point of number of records, and the stand point +of total database size. As a consequence, the user must take care to +periodically reduce the number of File records using the {\bf retention} +command in the Console program. + +\ + +\addcontentsline{lot}{table}{Job Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf Job } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobId } & {integer } & {Primary Key } \\ + \hline +{Job } & {tinyblob } & {Unique Job Name } \\ + \hline +{Name } & {tinyblob } & {Job Name } \\ + \hline +{PurgedFiles } & {tinyint } & {Used by Bacula for purging/retention periods +} \\ + \hline +{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration +} \\ + \hline +{Level } & {binary(1) } & {Job Level } \\ + \hline +{ClientId } & {integer } & {Client index } \\ + \hline +{JobStatus } & {binary(1) } & {Job Termination Status } \\ + \hline +{SchedTime } & {datetime } & {Time/date when Job scheduled } \\ + \hline +{StartTime } & {datetime } & {Time/date when Job started } \\ + \hline +{EndTime } & {datetime } & {Time/date when Job ended } \\ + \hline +{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for +Retention period. } \\ + \hline +{VolSessionId } & {integer } & {Unique Volume Session ID } \\ + \hline +{VolSessionTime } & {integer } & {Unique Volume Session Time } \\ + \hline +{JobFiles } & {integer } & {Number of files saved in Job } \\ + \hline +{JobBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{JobErrors } & {integer } & {Number of errors during Job } \\ + \hline +{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) } +\\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{FileSetId } & {integer } & {Link to FileSet Record } \\ + \hline +{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\ + \hline +{HasBase } & {tiny integer } & {Set when Base Job run } +\\ \hline + +\end{longtable} + +The {\bf Job} table contains one record for each Job run by Bacula. Thus +normally, there will be one per day per machine added to the database. Note, +the JobId is used to index Job records in the database, and it often is shown +to the user in the Console program. However, care must be taken with its use +as it is not unique from database to database. For example, the user may have +a database for Client data saved on machine Rufus and another database for +Client data saved on machine Roxie. In this case, the two database will each +have JobIds that match those in another database. For a unique reference to a +Job, see Job below. + +The Name field of the Job record corresponds to the Name resource record given +in the Director's configuration file. Thus it is a generic name, and it will +be normal to find many Jobs (or even all Jobs) with the same Name. + +The Job field contains a combination of the Name and the schedule time of the +Job by the Director. Thus for a given Director, even with multiple Catalog +databases, the Job will contain a unique name that represents the Job. + +For a given Storage daemon, the VolSessionId and VolSessionTime form a unique +identification of the Job. This will be the case even if multiple Directors +are using the same Storage daemon. + +The Job Type (or simply Type) can have one of the following values: + +\addcontentsline{lot}{table}{Job Types} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{B } & {Backup Job } \\ + \hline +{V } & {Verify Job } \\ + \hline +{R } & {Restore Job } \\ + \hline +{C } & {Console program (not in database) } \\ + \hline +{D } & {Admin Job } \\ + \hline +{A } & {Archive Job (not implemented) } +\\ \hline + +\end{longtable} + +The JobStatus field specifies how the job terminated, and can be one of the +following: + +\addcontentsline{lot}{table}{Job Statuses} +\begin{longtable}{|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\ + \hline +{C } & {Created but not yet running } \\ + \hline +{R } & {Running } \\ + \hline +{B } & {Blocked } \\ + \hline +{T } & {Terminated normally } \\ + \hline +{E } & {Terminated in Error } \\ + \hline +{e } & {Non-fatal error } \\ + \hline +{f } & {Fatal error } \\ + \hline +{D } & {Verify Differences } \\ + \hline +{A } & {Canceled by the user } \\ + \hline +{F } & {Waiting on the File daemon } \\ + \hline +{S } & {Waiting on the Storage daemon } \\ + \hline +{m } & {Waiting for a new Volume to be mounted } \\ + \hline +{M } & {Waiting for a Mount } \\ + \hline +{s } & {Waiting for Storage resource } \\ + \hline +{j } & {Waiting for Job resource } \\ + \hline +{c } & {Waiting for Client resource } \\ + \hline +{d } & {Wating for Maximum jobs } \\ + \hline +{t } & {Waiting for Start Time } \\ + \hline +{p } & {Waiting for higher priority job to finish } +\\ \hline + +\end{longtable} + +\ + +\addcontentsline{lot}{table}{File Sets Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf FileSet } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{FileSetId } & {integer } & {Primary Key } \\ + \hline +{FileSet } & {tinyblob } & {FileSet name } \\ + \hline +{MD5 } & {tinyblob } & {MD5 checksum of FileSet } \\ + \hline +{CreateTime } & {datetime } & {Time and date Fileset created } +\\ \hline + +\end{longtable} + +The {\bf FileSet} table contains one entry for each FileSet that is used. The +MD5 signature is kept to ensure that if the user changes anything inside the +FileSet, it will be detected and the new FileSet will be used. This is +particularly important when doing an incremental update. If the user deletes a +file or adds a file, we need to ensure that a Full backup is done prior to the +next incremental. + +\ + +\addcontentsline{lot}{table}{JobMedia Table Layout} +\begin{longtable}{|l|l|p{2.5in}|} + \hline +\multicolumn{3}{|l| }{\bf JobMedia } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{JobMediaId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {Link to Job Record } \\ + \hline +{MediaId } & {integer } & {Link to Media Record } \\ + \hline +{FirstIndex } & {integer } & {The index (sequence number) of the first file +written for this Job to the Media } \\ + \hline +{LastIndex } & {integer } & {The index of the last file written for this +Job to the Media } \\ + \hline +{StartFile } & {integer } & {The physical media (tape) file number of the +first block written for this Job } \\ + \hline +{EndFile } & {integer } & {The physical media (tape) file number of the +last block written for this Job } \\ + \hline +{StartBlock } & {integer } & {The number of the first block written for +this Job } \\ + \hline +{EndBlock } & {integer } & {The number of the last block written for this +Job } \\ + \hline +{VolIndex } & {integer } & {The Volume use sequence number within the Job } +\\ \hline + +\end{longtable} + +The {\bf JobMedia} table contains one entry at the following: start of +the job, start of each new tape file, start of each new tape, end of the +job. Since by default, a new tape file is written every 2GB, in general, +you will have more than 2 JobMedia records per Job. The number can be +varied by changing the "Maximum File Size" specified in the Device +resource. This record allows Bacula to efficiently position close to +(within 2GB) any given file in a backup. For restoring a full Job, +these records are not very important, but if you want to retrieve +a single file that was written near the end of a 100GB backup, the +JobMedia records can speed it up by orders of magnitude by permitting +forward spacing files and blocks rather than reading the whole 100GB +backup. + + + +\ + +\addcontentsline{lot}{table}{Media Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Media } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\ +\ \ } & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{MediaId } & {integer } & {Primary Key } \\ + \hline +{VolumeName } & {tinyblob } & {Volume name } \\ + \hline +{Slot } & {integer } & {Autochanger Slot number or zero } \\ + \hline +{PoolId } & {integer } & {Link to Pool Record } \\ + \hline +{MediaType } & {tinyblob } & {The MediaType supplied by the user } \\ + \hline +{FirstWritten } & {datetime } & {Time/date when first written } \\ + \hline +{LastWritten } & {datetime } & {Time/date when last written } \\ + \hline +{LabelDate } & {datetime } & {Time/date when tape labeled } \\ + \hline +{VolJobs } & {integer } & {Number of jobs written to this media } \\ + \hline +{VolFiles } & {integer } & {Number of files written to this media } \\ + \hline +{VolBlocks } & {integer } & {Number of blocks written to this media } \\ + \hline +{VolMounts } & {integer } & {Number of time media mounted } \\ + \hline +{VolBytes } & {bigint } & {Number of bytes saved in Job } \\ + \hline +{VolErrors } & {integer } & {Number of errors during Job } \\ + \hline +{VolWrites } & {integer } & {Number of writes to media } \\ + \hline +{MaxVolBytes } & {bigint } & {Maximum bytes to put on this media } \\ + \hline +{VolCapacityBytes } & {bigint } & {Capacity estimate for this volume } \\ + \hline +{VolStatus } & {enum } & {Status of media: Full, Archive, Append, Recycle, +Read-Only, Disabled, Error, Busy } \\ + \hline +{Recycle } & {tinyint } & {Whether or not Bacula can recycle the Volumes: +Yes, No } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds until expiration } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {maximum jobs to put on Volume } \\ + \hline +{MaxVolFiles } & {integer } & {maximume EOF marks to put on Volume } +\\ \hline + +\end{longtable} + +The {\bf Volume} table (internally referred to as the Media table) contains +one entry for each volume, that is each tape, cassette (8mm, DLT, DAT, ...), +or file on which information is or was backed up. There is one Volume record +created for each of the NumVols specified in the Pool resource record. + +\ + +\addcontentsline{lot}{table}{Pool Table Layout} +\begin{longtable}{|l|l|p{2.4in}|} + \hline +\multicolumn{3}{|l| }{\bf Pool } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{PoolId } & {integer } & {Primary Key } \\ + \hline +{Name } & {Tinyblob } & {Pool Name } \\ + \hline +{NumVols } & {Integer } & {Number of Volumes in the Pool } \\ + \hline +{MaxVols } & {Integer } & {Maximum Volumes in the Pool } \\ + \hline +{UseOnce } & {tinyint } & {Use volume once } \\ + \hline +{UseCatalog } & {tinyint } & {Set to use catalog } \\ + \hline +{AcceptAnyVolume } & {tinyint } & {Accept any volume from Pool } \\ + \hline +{VolRetention } & {bigint } & {64 bit seconds to retain volume } \\ + \hline +{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\ + \hline +{MaxVolJobs } & {integer } & {max jobs on volume } \\ + \hline +{MaxVolFiles } & {integer } & {max EOF marks to put on Volume } \\ + \hline +{MaxVolBytes } & {bigint } & {max bytes to write on Volume } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{Recycle } & {tinyint } & {yes|no for allowing auto recycling of Volume } +\\ + \hline +{PoolType } & {enum } & {Backup, Copy, Cloned, Archive, Migration } \\ + \hline +{LabelFormat } & {Tinyblob } & {Label format } +\\ \hline + +\end{longtable} + +The {\bf Pool} table contains one entry for each media pool controlled by +Bacula in this database. One media record exists for each of the NumVols +contained in the Pool. The PoolType is a Bacula defined keyword. The MediaType +is defined by the administrator, and corresponds to the MediaType specified in +the Director's Storage definition record. The CurrentVol is the sequence +number of the Media record for the current volume. + +\ + +\addcontentsline{lot}{table}{Client Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Client } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{ClientId } & {integer } & {Primary Key } \\ + \hline +{Name } & {TinyBlob } & {File Services Name } \\ + \hline +{UName } & {TinyBlob } & {uname -a from Client (not yet used) } \\ + \hline +{AutoPrune } & {tinyint } & {yes|no for autopruning } \\ + \hline +{FileRetention } & {bigint } & {64 bit seconds to retain Files } \\ + \hline +{JobRetention } & {bigint } & {64 bit seconds to retain Job } +\\ \hline + +\end{longtable} + +The {\bf Client} table contains one entry for each machine backed up by Bacula +in this database. Normally the Name is a fully qualified domain name. + +\ + +\addcontentsline{lot}{table}{Unsaved Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf UnsavedFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{UnsavedId } & {integer } & {Primary Key } \\ + \hline +{JobId } & {integer } & {JobId corresponding to this record } \\ + \hline +{PathId } & {integer } & {Id of path } \\ + \hline +{FilenameId } & {integer } & {Id of filename } +\\ \hline + +\end{longtable} + +The {\bf UnsavedFiles} table contains one entry for each file that was not +saved. Note! This record is not yet implemented. + +\ + +\addcontentsline{lot}{table}{Counter Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Counter } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{Counter } & {tinyblob } & {Counter name } \\ + \hline +{MinValue } & {integer } & {Start/Min value for counter } \\ + \hline +{MaxValue } & {integer } & {Max value for counter } \\ + \hline +{CurrentValue } & {integer } & {Current counter value } \\ + \hline +{WrapCounter } & {tinyblob } & {Name of another counter } +\\ \hline + +\end{longtable} + +The {\bf Counter} table contains one entry for each permanent counter defined +by the user. + +\ + +\addcontentsline{lot}{table}{Version Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf Version } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{VersionId } & {integer } & {Primary Key } +\\ \hline + +\end{longtable} + +The {\bf Version} table defines the Bacula database version number. Bacula +checks this number before reading the database to ensure that it is compatible +with the Bacula binary file. + +\ + +\addcontentsline{lot}{table}{Base Files Table Layout} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{3}{|l| }{\bf BaseFiles } \\ + \hline +\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type +} & \multicolumn{1}{c| }{\bf Remark } \\ + \hline +{BaseId } & {integer } & {Primary Key } \\ + \hline +{BaseJobId } & {integer } & {JobId of Base Job } \\ + \hline +{JobId } & {integer } & {Reference to Job } \\ + \hline +{FileId } & {integer } & {Reference to File } \\ + \hline +{FileIndex } & {integer } & {File Index number } +\\ \hline + +\end{longtable} + +The {\bf BaseFiles} table contains all the File references for a particular +JobId that point to a Base file -- i.e. they were previously saved and hence +were not saved in the current JobId but in BaseJobId under FileId. FileIndex +is the index of the file, and is used for optimization of Restore jobs to +prevent the need to read the FileId record when creating the in memory tree. +This record is not yet implemented. + +\ + +\subsection{MySQL Table Definition} +\index[general]{MySQL Table Definition } +\index[general]{Definition!MySQL Table } +\addcontentsline{toc}{subsubsection}{MySQL Table Definition} + +The commands used to create the MySQL tables are as follows: + +\footnotesize +\begin{verbatim} +USE bacula; +CREATE TABLE Filename ( + FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name BLOB NOT NULL, + PRIMARY KEY(FilenameId), + INDEX (Name(30)) + ); +CREATE TABLE Path ( + PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Path BLOB NOT NULL, + PRIMARY KEY(PathId), + INDEX (Path(50)) + ); +CREATE TABLE File ( + FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0, + LStat TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + PRIMARY KEY(FileId), + INDEX (JobId), + INDEX (PathId), + INDEX (FilenameId) + ); +CREATE TABLE Job ( + JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Job TINYBLOB NOT NULL, + Name TINYBLOB NOT NULL, + Type BINARY(1) NOT NULL, + Level BINARY(1) NOT NULL, + ClientId INTEGER NOT NULL REFERENCES Client, + JobStatus BINARY(1) NOT NULL, + SchedTime DATETIME NOT NULL, + StartTime DATETIME NOT NULL, + EndTime DATETIME NOT NULL, + JobTDate BIGINT UNSIGNED NOT NULL, + VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobBytes BIGINT UNSIGNED NOT NULL, + JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + FileSetId INTEGER UNSIGNED NOT NULL REFERENCES FileSet, + PurgedFiles TINYINT NOT NULL DEFAULT 0, + HasBase TINYINT NOT NULL DEFAULT 0, + PRIMARY KEY(JobId), + INDEX (Name(128)) + ); +CREATE TABLE FileSet ( + FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + FileSet TINYBLOB NOT NULL, + MD5 TINYBLOB NOT NULL, + CreateTime DATETIME NOT NULL, + PRIMARY KEY(FileSetId) + ); +CREATE TABLE JobMedia ( + JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media, + FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0, + StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(JobMediaId), + INDEX (JobId, MediaId) + ); +CREATE TABLE Media ( + MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + VolumeName TINYBLOB NOT NULL, + Slot INTEGER NOT NULL DEFAULT 0, + PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool, + MediaType TINYBLOB NOT NULL, + FirstWritten DATETIME NOT NULL, + LastWritten DATETIME NOT NULL, + LabelDate DATETIME NOT NULL, + VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0, + VolCapacityBytes BIGINT UNSIGNED NOT NULL, + VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged', + 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL, + Recycle TINYINT NOT NULL DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + InChanger TINYINT NOT NULL DEFAULT 0, + MediaAddressing TINYINT NOT NULL DEFAULT 0, + VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0, + PRIMARY KEY(MediaId), + INDEX (PoolId) + ); +CREATE TABLE Pool ( + PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0, + UseOnce TINYINT NOT NULL, + UseCatalog TINYINT NOT NULL, + AcceptAnyVolume TINYINT DEFAULT 0, + VolRetention BIGINT UNSIGNED NOT NULL, + VolUseDuration BIGINT UNSIGNED NOT NULL, + MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0, + MaxVolBytes BIGINT UNSIGNED NOT NULL, + AutoPrune TINYINT DEFAULT 0, + Recycle TINYINT DEFAULT 0, + PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL, + LabelFormat TINYBLOB, + Enabled TINYINT DEFAULT 1, + ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool, + UNIQUE (Name(128)), + PRIMARY KEY (PoolId) + ); +CREATE TABLE Client ( + ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT, + Name TINYBLOB NOT NULL, + Uname TINYBLOB NOT NULL, /* full uname -a of client */ + AutoPrune TINYINT DEFAULT 0, + FileRetention BIGINT UNSIGNED NOT NULL, + JobRetention BIGINT UNSIGNED NOT NULL, + UNIQUE (Name(128)), + PRIMARY KEY(ClientId) + ); +CREATE TABLE BaseFiles ( + BaseId INTEGER UNSIGNED AUTO_INCREMENT, + BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + FileId INTEGER UNSIGNED NOT NULL REFERENCES File, + FileIndex INTEGER UNSIGNED, + PRIMARY KEY(BaseId) + ); +CREATE TABLE UnsavedFiles ( + UnsavedId INTEGER UNSIGNED AUTO_INCREMENT, + JobId INTEGER UNSIGNED NOT NULL REFERENCES Job, + PathId INTEGER UNSIGNED NOT NULL REFERENCES Path, + FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename, + PRIMARY KEY (UnsavedId) + ); +CREATE TABLE Version ( + VersionId INTEGER UNSIGNED NOT NULL + ); +-- Initialize Version +INSERT INTO Version (VersionId) VALUES (7); +CREATE TABLE Counters ( + Counter TINYBLOB NOT NULL, + MinValue INTEGER, + MaxValue INTEGER, + CurrentValue INTEGER, + WrapCounter TINYBLOB NOT NULL, + PRIMARY KEY (Counter(128)) + ); +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/developers/check_tex.pl b/docs/manuals/en/developers/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/developers/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/developers/daemonprotocol.tex b/docs/manuals/en/developers/daemonprotocol.tex new file mode 100644 index 00000000..0354bbd5 --- /dev/null +++ b/docs/manuals/en/developers/daemonprotocol.tex @@ -0,0 +1,284 @@ +%% +%% + +\chapter{Daemon Protocol} +\label{_ChapterStart2} +\index{Protocol!Daemon } +\index{Daemon Protocol } + +\section{General} +\index{General } +\addcontentsline{toc}{subsection}{General} + +This document describes the protocols used between the various daemons. As +Bacula has developed, it has become quite out of date. The general idea still +holds true, but the details of the fields for each command, and indeed the +commands themselves have changed considerably. + +It is intended to be a technical discussion of the general daemon protocols +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +\section{Low Level Network Protocol} +\index{Protocol!Low Level Network } +\index{Low Level Network Protocol } +\addcontentsline{toc}{subsection}{Low Level Network Protocol} + +At the lowest level, the network protocol is handled by {\bf BSOCK} packets +which contain a lot of information about the status of the network connection: +who is at the other end, etc. Each basic {\bf Bacula} network read or write +actually consists of two low level network read/writes. The first write always +sends four bytes of data in machine independent byte order. If data is to +follow, the first four bytes are a positive non-zero integer indicating the +length of the data that follow in the subsequent write. If the four byte +integer is zero or negative, it indicates a special request, a sort of network +signaling capability. In this case, no data packet will follow. The low level +BSOCK routines expect that only a single thread is accessing the socket at a +time. It is advised that multiple threads do not read/write the same socket. +If you must do this, you must provide some sort of locking mechanism. It would +not be appropriate for efficiency reasons to make every call to the BSOCK +routines lock and unlock the packet. + +\section{General Daemon Protocol} +\index{General Daemon Protocol } +\index{Protocol!General Daemon } +\addcontentsline{toc}{subsection}{General Daemon Protocol} + +In general, all the daemons follow the following global rules. There may be +exceptions depending on the specific case. Normally, one daemon will be +sending commands to another daemon (specifically, the Director to the Storage +daemon and the Director to the File daemon). + +\begin{itemize} +\item Commands are always ASCII commands that are upper/lower case dependent + as well as space sensitive. +\item All binary data is converted into ASCII (either with printf statements + or using base64 encoding). +\item All responses to commands sent are always prefixed with a return + numeric code where codes in the 1000's are reserved for the Director, the + 2000's are reserved for the File daemon, and the 3000's are reserved for the +Storage daemon. +\item Any response that is not prefixed with a numeric code is a command (or + subcommand if you like) coming from the other end. For example, while the + Director is corresponding with the Storage daemon, the Storage daemon can +request Catalog services from the Director. This convention permits each side +to send commands to the other daemon while simultaneously responding to +commands. +\item Any response that is of zero length, depending on the context, either + terminates the data stream being sent or terminates command mode prior to + closing the connection. +\item Any response that is of negative length is a special sign that normally + requires a response. For example, during data transfer from the File daemon + to the Storage daemon, normally the File daemon sends continuously without +intervening reads. However, periodically, the File daemon will send a packet +of length -1 indicating that the current data stream is complete and that the +Storage daemon should respond to the packet with an OK, ABORT JOB, PAUSE, +etc. This permits the File daemon to efficiently send data while at the same +time occasionally ``polling'' the Storage daemon for his status or any +special requests. + +Currently, these negative lengths are specific to the daemon, but shortly, +the range 0 to -999 will be standard daemon wide signals, while -1000 to +-1999 will be for Director user, -2000 to -2999 for the File daemon, and +-3000 to -3999 for the Storage daemon. +\end{itemize} + +\section{The Protocol Used Between the Director and the Storage Daemon} +\index{Daemon!Protocol Used Between the Director and the Storage } +\index{Protocol Used Between the Director and the Storage Daemon } +\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the +Storage Daemon} + +Before sending commands to the File daemon, the Director opens a Message +channel with the Storage daemon, identifies itself and presents its password. +If the password check is OK, the Storage daemon accepts the Director. The +Director then passes the Storage daemon, the JobId to be run as well as the +File daemon authorization (append, read all, or read for a specific session). +The Storage daemon will then pass back to the Director a enabling key for this +JobId that must be presented by the File daemon when opening the job. Until +this process is complete, the Storage daemon is not available for use by File +daemons. + +\footnotesize +\begin{verbatim} +SD: listens +DR: makes connection +DR: Hello calling +SD: 3000 OK Hello +DR: JobId=nnn Allow=(append, read) Session=(*, SessionId) + (Session not implemented yet) +SD: 3000 OK Job Authorization= +DR: use device= media_type= + pool_name= pool_type= +SD: 3000 OK use device +\end{verbatim} +\normalsize + +For the Director to be authorized, the \lt{}Director-name\gt{} and the +\lt{}password\gt{} must match the values in one of the Storage daemon's +Director resources (there may be several Directors that can access a single +Storage daemon). + +\section{The Protocol Used Between the Director and the File Daemon} +\index{Daemon!Protocol Used Between the Director and the File } +\index{Protocol Used Between the Director and the File Daemon } +\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the +File Daemon} + +A typical conversation might look like the following: + +\footnotesize +\begin{verbatim} +FD: listens +DR: makes connection +DR: Hello calling +FD: 2000 OK Hello +DR: JobId=nnn Authorization= +FD: 2000 OK Job +DR: storage address = port = + name = mediatype = +FD: 2000 OK storage +DR: include +DR: +DR: + ... +DR: Null packet +FD: 2000 OK include +DR: exclude +DR: +DR: + ... +DR: Null packet +FD: 2000 OK exclude +DR: full +FD: 2000 OK full +DR: save +FD: 2000 OK save +FD: Attribute record for each file as sent to the + Storage daemon (described above). +FD: Null packet +FD: + e.g. + 3000 OK Volumes = + 3001 Volume = + + 3002 Volume data = + + ... additional Volume / Volume data pairs for volumes 2 .. n +FD: Null packet +FD: close socket +\end{verbatim} +\normalsize + +\section{The Save Protocol Between the File Daemon and the Storage Daemon} +\index{Save Protocol Between the File Daemon and the Storage Daemon } +\index{Daemon!Save Protocol Between the File Daemon and the Storage } +\addcontentsline{toc}{subsection}{Save Protocol Between the File Daemon and +the Storage Daemon} + +Once the Director has send a {\bf save} command to the File daemon, the File +daemon will contact the Storage daemon to begin the save. + +In what follows: FD: refers to information set via the network from the File +daemon to the Storage daemon, and SD: refers to information set from the +Storage daemon to the File daemon. + +\subsection{Command and Control Information} +\index{Information!Command and Control } +\index{Command and Control Information } +\addcontentsline{toc}{subsubsection}{Command and Control Information} + +Command and control information is exchanged in human readable ASCII commands. + + +\footnotesize +\begin{verbatim} +FD: listens +SD: makes connection +FD: append open session = [] +SD: 3000 OK ticket = +FD: append data +SD: 3000 OK data address = port = +\end{verbatim} +\normalsize + +\subsection{Data Information} +\index{Information!Data } +\index{Data Information } +\addcontentsline{toc}{subsubsection}{Data Information} + +The Data information consists of the file attributes and data to the Storage +daemon. For the most part, the data information is sent one way: from the File +daemon to the Storage daemon. This allows the File daemon to transfer +information as fast as possible without a lot of handshaking and network +overhead. + +However, from time to time, the File daemon needs to do a sort of checkpoint +of the situation to ensure that everything is going well with the Storage +daemon. To do so, the File daemon sends a packet with a negative length +indicating that he wishes the Storage daemon to respond by sending a packet of +information to the File daemon. The File daemon then waits to receive a packet +from the Storage daemon before continuing. + +All data sent are in binary format except for the header packet, which is in +ASCII. There are two packet types used data transfer mode: a header packet, +the contents of which are known to the Storage daemon, and a data packet, the +contents of which are never examined by the Storage daemon. + +The first data packet to the Storage daemon will be an ASCII header packet +consisting of the following data. + +\lt{}File-Index\gt{} \lt{}Stream-Id\gt{} \lt{}Info\gt{} where {\bf +\lt{}File-Index\gt{}} is a sequential number beginning from one that +increments with each file (or directory) sent. + +where {\bf \lt{}Stream-Id\gt{}} will be 1 for the Attributes record and 2 for +uncompressed File data. 3 is reserved for the MD5 signature for the file. + +where {\bf \lt{}Info\gt{}} transmit information about the Stream to the +Storage Daemon. It is a character string field where each character has a +meaning. The only character currently defined is 0 (zero), which is simply a +place holder (a no op). In the future, there may be codes indicating +compressed data, encrypted data, etc. + +Immediately following the header packet, the Storage daemon will expect any +number of data packets. The series of data packets is terminated by a zero +length packet, which indicates to the Storage daemon that the next packet will +be another header packet. As previously mentioned, a negative length packet is +a request for the Storage daemon to temporarily enter command mode and send a +reply to the File daemon. Thus an actual conversation might contain the +following exchanges: + +\footnotesize +\begin{verbatim} +FD: <1 1 0> (header packet) +FD: +FD: Null packet +FD: <1 2 0> +FD: +FD: Packet length = -1 +SD: 3000 OK +FD: <2 1 0> +FD: +FD: Null packet +FD: <2 2 0> +FD: +FD: Null packet +FD: Null packet +FD: append end session +SD: 3000 OK end +FD: append close session +SD: 3000 OK Volumes = +SD: 3001 Volume = + +SD: 3002 Volume data = + +SD: ... additional Volume / Volume data pairs for + volumes 2 .. n +FD: close socket +\end{verbatim} +\normalsize + +The information returned to the File daemon by the Storage daemon in response +to the {\bf append close session} is transmit in turn to the Director. diff --git a/docs/manuals/en/developers/developers.css b/docs/manuals/en/developers/developers.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/developers/developers.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/developers/developers.tex b/docs/manuals/en/developers/developers.tex new file mode 100644 index 00000000..840b1a0a --- /dev/null +++ b/docs/manuals/en/developers/developers.tex @@ -0,0 +1,88 @@ +%% +%% + +\documentclass[11pt,a4paper]{report} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Developers' Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{generaldevel} +\include{platformsupport} +\include{daemonprotocol} +\include{director} +\include{file} +\include{storage} +\include{catalog} +\include{mediaformat} +\include{porting} +\include{gui-interface} +\include{tls-techdoc} +\include{regression} +\include{md5} +\include{mempool} +\include{netprotocol} +\include{smartall} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex + +\end{document} diff --git a/docs/manuals/en/developers/director.tex b/docs/manuals/en/developers/director.tex new file mode 100644 index 00000000..d8c4cd0f --- /dev/null +++ b/docs/manuals/en/developers/director.tex @@ -0,0 +1,18 @@ +%% +%% + +\chapter{Director Services Daemon} +\label{_ChapterStart6} +\index{Daemon!Director Services } +\index{Director Services Daemon } +\addcontentsline{toc}{section}{Director Services Daemon} + +This chapter is intended to be a technical discussion of the Director services +and as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +The {\bf Bacula Director} services consist of the program that supervises all +the backup and restore operations. + +To be written ... diff --git a/docs/manuals/en/developers/fdl.tex b/docs/manuals/en/developers/fdl.tex new file mode 100644 index 00000000..9304bb60 --- /dev/null +++ b/docs/manuals/en/developers/fdl.tex @@ -0,0 +1,511 @@ +%---------The file header--------------------------------------------- + +%% \usepackage[english]{babel} %language selection +%% \usepackage[T1]{fontenc} + +%%\pagenumbering{arabic} + +%% \usepackage{hyperref} +%% \hypersetup{colorlinks, +%% citecolor=black, +%% filecolor=black, +%% linkcolor=black, +%% urlcolor=black, +%% pdftex} + + +%--------------------------------------------------------------------- +\chapter{GNU Free Documentation License} +\index[general]{GNU ree Documentation License} +\index[general]{License!GNU ree Documentation} +\addcontentsline{toc}{section}{GNU ree Documentation License} + +%\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\addcontentsline{toc}{section}{1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\addcontentsline{toc}{section}{2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\addcontentsline{toc}{section}{3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\addcontentsline{toc}{section}{4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\addcontentsline{toc}{section}{5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\addcontentsline{toc}{section}{6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\addcontentsline{toc}{section}{7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\addcontentsline{toc}{section}{8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\addcontentsline{toc}{section}{9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\addcontentsline{toc}{section}{10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +\addcontentsline{toc}{section}{ADDENDUM: How to use this License for your documents} +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/developers/file.tex b/docs/manuals/en/developers/file.tex new file mode 100644 index 00000000..ee89577b --- /dev/null +++ b/docs/manuals/en/developers/file.tex @@ -0,0 +1,68 @@ +%% +%% + +\chapter{File Services Daemon} +\label{_ChapterStart11} +\index{File Services Daemon } +\index{Daemon!File Services } +\addcontentsline{toc}{section}{File Services Daemon} + +Please note, this section is somewhat out of date as the code has evolved +significantly. The basic idea has not changed though. + +This chapter is intended to be a technical discussion of the File daemon +services and as such is not targeted at end users but rather at developers and +system administrators that want or need to know more of the working details of +{\bf Bacula}. + +The {\bf Bacula File Services} consist of the programs that run on the system +to be backed up and provide the interface between the Host File system and +Bacula -- in particular, the Director and the Storage services. + +When time comes for a backup, the Director gets in touch with the File daemon +on the client machine and hands it a set of ``marching orders'' which, if +written in English, might be something like the following: + +OK, {\bf File daemon}, it's time for your daily incremental backup. I want you +to get in touch with the Storage daemon on host archive.mysite.com and perform +the following save operations with the designated options. You'll note that +I've attached include and exclude lists and patterns you should apply when +backing up the file system. As this is an incremental backup, you should save +only files modified since the time you started your last backup which, as you +may recall, was 2000-11-19-06:43:38. Please let me know when you're done and +how it went. Thank you. + +So, having been handed everything it needs to decide what to dump and where to +store it, the File daemon doesn't need to have any further contact with the +Director until the backup is complete providing there are no errors. If there +are errors, the error messages will be delivered immediately to the Director. +While the backup is proceeding, the File daemon will send the file coordinates +and data for each file being backed up to the Storage daemon, which will in +turn pass the file coordinates to the Director to put in the catalog. + +During a {\bf Verify} of the catalog, the situation is different, since the +File daemon will have an exchange with the Director for each file, and will +not contact the Storage daemon. + +A {\bf Restore} operation will be very similar to the {\bf Backup} except that +during the {\bf Restore} the Storage daemon will not send storage coordinates +to the Director since the Director presumably already has them. On the other +hand, any error messages from either the Storage daemon or File daemon will +normally be sent directly to the Directory (this, of course, depends on how +the Message resource is defined). + +\section{Commands Received from the Director for a Backup} +\index{Backup!Commands Received from the Director for a } +\index{Commands Received from the Director for a Backup } +\addcontentsline{toc}{subsection}{Commands Received from the Director for a +Backup} + +To be written ... + +\section{Commands Received from the Director for a Restore} +\index{Commands Received from the Director for a Restore } +\index{Restore!Commands Received from the Director for a } +\addcontentsline{toc}{subsection}{Commands Received from the Director for a +Restore} + +To be written ... diff --git a/docs/manuals/en/developers/fix_tex.pl b/docs/manuals/en/developers/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/developers/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/developers/generaldevel.tex b/docs/manuals/en/developers/generaldevel.tex new file mode 100644 index 00000000..9404e57e --- /dev/null +++ b/docs/manuals/en/developers/generaldevel.tex @@ -0,0 +1,1403 @@ +%% +%% + +\chapter{Bacula Developer Notes} +\label{_ChapterStart10} +\index{Bacula Developer Notes} +\index{Notes!Bacula Developer} +\addcontentsline{toc}{section}{Bacula Developer Notes} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document is intended mostly for developers and describes the the general +framework of making Bacula source changes. + +\subsection{Contributions} +\index{Contributions} +\addcontentsline{toc}{subsubsection}{Contributions} + +Contributions from programmers are broken into two groups. The first are +contributions that are aids and not essential to Bacula. In general, these +will be scripts or will go into and examples or contributions directory. +For these kinds of non-essential contributions there is no obligation to do +a copyright assignment as described below. However, a copyright assignment +would still be appreciated. + +The second class of contributions are those which will be integrated with +Bacula and become an essential part. Within this class of contributions, there +are two hurdles to surmount. One is getting your patch accepted, and two is +dealing with copyright issues. The following text describes some of the +requirements for such code. + +\subsection{Patches} +\index{Patches} +\addcontentsline{toc}{subsubsection}{Patches} + +Subject to the copyright assignment described below, your patches should be +sent in {\bf diff -u} format relative to the current contents of the Source +Forge SVN, which is the easiest to understand and integrate. +Please be sure to use the Bacula indenting standard (see below). +If you have checked out the source with SVN, you can get a diff using: + +\begin{verbatim} +svn update +svn diff > change.patch +\end{verbatim} + +If you plan on doing significant development work over a period of time, +after having your first patch reviewed and approved, you will be eligible +for having developer SVN access so that you can commit your changes +directly to the SVN repository. To do so, you will need a userid on Source +Forge. + +\subsection{Copyrights} +\index{Copyrights} +\addcontentsline{toc}{subsubsection}{Copyrights} + +To avoid future problems concerning changing licensing or +copyrights, all code contributions more than a hand full of lines +must be in the Public Domain or have the copyright transferred to +the Free Software Foundation Europe e.V. with a Fiduciary License +Agreement (FLA) as in the current code. Note, prior to +November 2004, the code was copyrighted by Kern Sibbald and John +Walker. After November 2004, the code was copyrighted by Kern +Sibbald, then on the 15th of November 2006, the copyright was +transferred to the Free Software Foundation Europe e.V. + +Your name should be clearly indicated as the author of the code, and you +must be extremely careful not to violate any copyrights or use other +people's code without acknowledging it. The purpose of this requirement is +to avoid future copyright, patent, or intellectual property problems. +Please read the LICENSE agreement in the main source code +directory. When you sign the Fiduciary License Agreement (FLA) +and send it in, you are argeeing to the terms of that LICENSE +file. + +To understand the possible source of future problems, please +examine the difficulties Mozilla is (was?) having finding +previous contributors at \elink{ +http://www.mozilla.org/MPL/missing.html} +{http://www.mozilla.org/MPL/missing.html}. The other important issue is to +avoid copyright, patent, or intellectual property violations as are currently +(May 2003) being claimed by SCO against IBM. + +Although the copyright will be held by the Free Software +Foundation Europe e.V., each developer is expected to indicate +that he wrote and/or modified a particular module (or file) and +any other sources. The copyright assignment may seem a bit +unusual, but in reality, it is not. Most large projects require +this. + +If you have any doubts about this, please don't hesitate to ask. The +objective is to assure the long term servival of the Bacula project. + +Items not needing a copyright assignment are: most small changes, +enhancements, or bug fixes of 5-10 lines of code, which amount to +less than 20% of any particular file. + +\subsection{Copyright Assignment -- Fiduciary License Agreement} +\index{Copyright Assignment} +\index{Assignment!Copyright} +\addcontentsline{toc}{subsubsection}{Copyright Assignment -- Fiduciary License Agreement} + +Since this is not a commercial enterprise, and we prefer to believe in +everyone's good faith, previously developers could assign the copyright by +explicitly acknowledging that they do so in their first submission. This +was sufficient if the developer is independent, or an employee of a +not-for-profit organization or a university. However, in an effort to +ensure that the Bacula code is really clean, beginning in August 2006, all +previous and future developers with SVN access will be asked to submit a +copyright assignment (or Fiduciary License Agreement -- FLA), +which means you agree to the LICENSE in the main source +directory. It also means that you receive back the right to use +the code that you have submitted. + +Any developer who wants to contribute and is employed by a company should +either list the employer as the owner of the code, or get +explicit permission from him to sign the copyright assignment. +This is because in many +countries, all work that an employee does whether on company time or in the +employee's free time is considered to be Intellectual Property of the +company. Obtaining official approval or an FLA from the company will avoid +misunderstandings between the employee, the company, and the Bacula +project. A good number of companies have already followed this procedure. + +The Fiduciary License Agreement is posted on the Bacula web site at: +\elink{http://www.bacula.org/FLA-bacula.en.pdf}{http://www.bacula.org/FLA-bacula.en.pdf} + +The instructions for filling out this agreement are also at: +\elink{http://www.bacula.org/?page=fsfe}{http://www.bacula.org/?page=fsfe} + +It should be filled out, then sent to: + +\begin{verbatim} + Free Software Foundation Europe + Freedom Task Force + Sumatrastrasse 25 + 8006 Zürich + Switzerland +\end{verbatim} + +Please note that the above address is different from the officially +registered office mentioned in the document. When you send in such a +complete document, please notify me: kern at sibbald dot com. + + + +\section{The Development Cycle} +\index{Developement Cycle} +\index{Cycle!Developement} +\addcontentsline{toc}{subsubsection}{Development Cycle} + +As I noted in the 1.38 ReleaseNotes, version 1.38 was different from prior +versions because it had a lot more contributions. I expect that this trend +will continue. As a consequence, I am going to modify how I normally do +development, and instead of making a list of all the features that I will +implement in the next version, I will personally sign up for one (maybe +two) projects at a time, and when they are complete, I will release a new +version. + +The difference is that I will have more time to review the new code that is +being contributed, and will be able to devote more time to a smaller number +of projects (1.38 had too many new features for me to handle correctly). + +I expect that future release schedules will be much the same, and the +number of new features will also be much the same providing that the +contributions continue to come -- and they show no signs of let up :-) + +\index{Feature Requests} +{\bf Feature Requests:} \\ +In addition, I would like to "formalize" the feature requests a bit. + +Instead of me maintaining an informal list of everything I run into +(kernstodo), I would like to maintain a "formal" list of projects. This +means that all new feature requests, including those recently discussed on +the email lists, must be formally submitted and approved. + +Formal submission of feature requests will take two forms: \\ +1. non-mandatory, but highly recommended is to discuss proposed new features +on the mailing list.\\ +2. Formal submission of an Feature Request in a special format. +I'll give an example of this below, but you can also find it on the web +site under "Support -\gt{} Feature Requests". Since it takes a bit of time to +properly fill out a Feature Request form, you probably should check on the email list +first. + +Once the Feature Request is received by the keeper of the projects list, it +will be sent to me, and I will either accept it, send it back +asking for clarification, send it to the email list asking for opinions, or +reject it. + +If it is accepted, it will go in the "projects" file (a simple ASCII file) +maintained in the main Bacula source directory. + +{\bf Implementation of Feature Requests:}\\ +Any qualified developer can sign up for a project. The project must have +an entry in the projects file, and the developer's name will appear in the +Status field. + +{\bf How Feature Requests are accepted:}\\ +Acceptance of Feature Requests depends on several things: \\ +1. feedback from users. If it is negative, the Feature Request will probably not be +accepted. \\ +2. the difficulty of the project. A project that is so +difficult that I cannot imagine finding someone to implement probably won't +be accepted. \\ + 3. whether or not the Feature Request fits within the +current stategy of Bacula (for example an Feature Request that requests changing the +tape to tar format would not be accepted, ...) + +{\bf How Feature Requests are prioritized:}\\ +Once an Feature Request is accepted, it needs to be implemented. If you +can find a developer for it, or one signs up for implementing it, then the +Feature Request becomes top priority (at least for that developer). + +Between releases of Bacula, we will generally solicit Feature Request input +for the next version, and by way of this email, we suggest that you send +discuss and send in your Feature Requests for the next release. Please +verify that the Feature Request is not in the current list (attached to this email). + +Once users have had several weeks to submit Feature Requests, the keeper of the +projects list will +organize them, and request users to vote on them. This will allow fixing +prioritizing the Feature Requests. Having a priority is one thing, but +getting it implement is another thing -- we are hoping that the Bacula +community will take more responsibility for assuring the implementation of +accepted Feature Requests. + +Feature Request format: +\begin{verbatim} +============= Empty Feature Request form =========== +Item n: One line summary ... + Date: Date submitted + Origin: Name and email of originator. + Status: + + What: More detailed explanation ... + + Why: Why it is important ... + + Notes: Additional notes or features (omit if not used) +============== End Feature Request form ============== +\end{verbatim} + +\begin{verbatim} +============= Example Completed Feature Request form =========== +Item 1: Implement a Migration job type that will move the job + data from one device to another. + Origin: Sponsored by Riege Sofware International GmbH. Contact: + Daniel Holtkamp + Date: 28 October 2005 + Status: Partially coded in 1.37 -- much more to do. Assigned to + Kern. + + What: The ability to copy, move, or archive data that is on a + device to another device is very important. + + Why: An ISP might want to backup to disk, but after 30 days + migrate the data to tape backup and delete it from + disk. Bacula should be able to handle this + automatically. It needs to know what was put where, + and when, and what to migrate -- it is a bit like + retention periods. Doing so would allow space to be + freed up for current backups while maintaining older + data on tape drives. + + Notes: Migration could be triggered by: + Number of Jobs + Number of Volumes + Age of Jobs + Highwater size (keep total size) + Lowwater mark +================================================= +\end{verbatim} + + +\section{Bacula Code Submissions and Projects} +\index{Submissions and Projects} +\addcontentsline{toc}{subsection}{Code Submissions and Projects} + +Getting code implemented in Bacula works roughly as follows: + +\begin{itemize} + +\item Kern is the project manager, but prefers not to be a "gate keeper". + This means that the developers are expected to be self-motivated, + and once they have experience submit directly to the SVN. However, + it is a good idea to have your patches reviewed prior to submitting, + and it is a bad idea to submit monster patches because no one will + be able to properly review them. See below for more details on this. + +\item There are growing numbers of contributions (very good). + +\item Some contributions come in the form of relatively small patches, + which Kern reviews, integrates, documents, tests, and maintains. + +\item All Bacula developers take full + responsibility for writing the code, posting as patches so that I can + review it as time permits, integrating it at an appropriate time, + responding to my requests for tweaking it (name changes, ...), + document it in the code, document it in the manual (even though + their mother tongue is not English), test it, develop and commit + regression scripts, and answer in a timely fashion all bug reports -- + even occassionally accepting additional bugs :-) + + This is a sustainable way of going forward with Bacula, and the + direction that the project will be taking more and more. For + example, in the past, we have had some very dedicated programmers + who did major projects. However, these + programmers due to outside obligations (job responsibilities change of + job, school duties, ...) could not continue to maintain the code. In + those cases, the code suffers from lack of maintenance, sometimes I + patch it, sometimes not. In the end, the code gets dropped from the + project (there are two such contributions that are heading in that + direction). When ever possible, we would like to avoid this, and + ensure a continuation of the code and a sharing of the development, + debugging, documentation, and maintenance responsibilities. +\end{itemize} + +\section{Patches for Released Versions} +\index{Patches for Released Versions} +\addcontentsline{toc}{subsection}{Patches for Released Versions} +If you fix a bug in a released version, you should, unless it is +an absolutely trivial bug, create and release a patch file for the +bug. The procedure is as follows: + +Fix the bug in the branch and in the trunk. + +Make a patch file for the branch and add the branch patch to +the patches directory in both the branch and the trunk. +The name should be 2.2.4-xxx.patch where xxx is unique, in this case it can +be "restore", e.g. 2.2.4-restore.patch. Add to the top of the +file a brief description and instructions for applying it -- see for example +2.2.4-poll-mount.patch. The best way to create the patch file is as +follows: + +\begin{verbatim} + (edit) 2.2.4-restore.patch + (input description) + (end edit) + + svn diff >>2.2.4-restore.patch +\end{verbatim} + +check to make sure no extra junk got put into the patch file (i.e. +it should have the patch for that bug only). + +If there is not a bug report on the problem, create one, then add the +patch to the bug report. + +Uthen upload it to the 2.2.x release of bacula-patches. + +So, end the end, the patch file is: +\begin{itemize} +\item Attached to the bug report + +\item In Branch-2.2/bacula/patches/... + +\item In the trunk + +\item Loaded on Source Forge bacula-patches 2.2.x release. When + you add it, click on the check box to send an Email so that all the + users that are monitoring SF patches get notified. +\end{itemize} + + + +\section{SVN Usage} +\index{SVN Usage} +\addcontentsline{toc}{subsection}{SVN Usage} + +Please note that if you are familar with CVS, SVN is very +similar (and better), but there can be a few surprising +differences. + +The *entire* Bacula SourceForge.net Subversion repository can be +checked out through SVN with the following command: + +\begin{verbatim} +svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula bacula +\end{verbatim} + +With the above command, you will get everything, which is a very large +amount of data: + +\begin{verbatim} +branches/ + Branch-1.32a/ + ... + Branch-2.0/ + import/ + vendor/ +tags/ + Release-1.1/ + ... + Release-2.0.2/ +trunk/ + bacula/ + docs/ + gui/ + regress/ + rescue/ +\end{verbatim} + +Note, you should NEVER commit code to any checkout that you have +done of a tag. All tags (e.g. Release-1.1, ... Release-2.0.2) +should be considered read-only. + +You may commit code to the most recent item in +branches (in the above the most recent one is Branch-2.0). If +you want to commit code to an older branch, then please contact +Kern first. + +You may create your own tags and/or branches, but they should +have a name clearly distinctive from Branch-, Release-, or Beta-, +which are official names used by the project. If you create a +tag, then you should NEVER commit code to it, for the same +reason noted above -- it should serve as a marker for something +you released. If you create a branch, then you are free to +commit to it as you wish. + +You may, of course, commit to the trunk. + +In summary: + +\begin{verbatim} +branches + Branch-nnn +tags + Release-nnn + Beta-nnn +\end{verbatim} + +are reserved names to be created only by the project manager (or +with his OK), where the nnn is any sequence of numbers and +periods (e.g. 2.0, 2.0.1, ...). + +In addition all tags even those that you create are read-only +forever. Typically tags represent release points either in the +trunc or in a branch. + + +Coming back to getting source code. +If you only want the current Bacula source code, you could use: + +\begin{verbatim} +svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula bacula +\end{verbatim} + +To view what is in the SVN, point your browser at the following URL: +http://bacula.svn.sourceforge.net/viewvc/bacula/ + +Many of the Subversion (svn) commands are almost identical to those that +you have used for cvs, but some (such as a checkout) can have surprising +results, so you should take a careful look at the documentation. + +Robert has kindly provided the following documentation on the new +svn repository and how to use it: + +Here is the list of branches: +\begin{verbatim} + Branch-1.32a + Branch-1.32e + Branch-1.34.2 + Branch-1.34.5 + Branch-1.36 + Branch-1.36.1 + Branch-1.36.2 + Branch-1.38 + Branch-2.0 + import + vendor +\end{verbatim} + +The list of tags is: +\begin{verbatim} + Release-1.1 Release-1.19 Release-1.19a Release-1.19b + Release-1.20 Release-1.21 Release-1.22 Release-1.23 + Release-1.23a Release-1.24 Release-1.25 Release-1.25a + Release-1.26 Release-1.27 Release-1.27a Release-1.27b + Release-1.27c Release-1.28 Release-1.29 Release-1.30 + Release-1.31 Release-1.31a Release-1.32 Release-1.32a + Release-1.32b Release-1.32c Release-1.32d Release-1.32e + Release-1.32f Release-1.32f-2 Release-1.32f-3 Release-1.32f-4 + Release-1.32f-5 Release-1.34.0 Release-1.34.1 Release-1.34.3 + Release-1.34.4 Release-1.34.5 Release-1.34.6 Release-1.35.1 + Release-1.35.2 Release-1.35.3 Release-1.35.6 Release-1.35.7 + Release-1.35.8 Release-1.36.0 Release-1.36.1 Release-1.36.2 + Release-1.36.3 Release-1.38.0 Release-1.38.1 Release-1.38.10 + Release-1.38.11 Release-1.38.2 Release-1.38.3 Release-1.38.4 + Release-1.38.5 Release-1.38.6 Release-1.38.7 Release-1.38.8 + Release-1.38.9 Release-1.8.1 Release-1.8.2 Release-1.8.3 + Release-1.8.4 Release-1.8.5 Release-1.8.6 Release-2.0.0 + Release-2.0.1 Release-2.0.2 +\end{verbatim} + +Here is a list of commands to get you started. The recommended book is +"Version Control with Subversion", by Ben Collins-Sussmann, +Brian W. Fitzpatrick, and Michael Pilato, O'Reilly. The book is +Open Source, so it is also available on line at: + +\begin{verbatim} + http://svnbook.red-bean.com +\end{verbatim} + +Get a list of commands + +\begin{verbatim} + svn help +\end{verbatim} + +Get a help with a command + +\begin{verbatim} + svn help command +\end{verbatim} + +Checkout the HEAD revision of all modules from the project into the +directory bacula-new + +\begin{verbatim} + svn co https://bacula.svn.sourceforge.net/svnroot/bacula/trunk bacula.new +\end{verbatim} + +Checkout the HEAD revision of the bacula module into the bacula subdirectory + +\begin{verbatim} + svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula +\end{verbatim} + +See which files have changed in the working copy + +\begin{verbatim} + svn status +\end{verbatim} + +See which files are out of date + +\begin{verbatim} + svn status -u +\end{verbatim} + +Add a new file file.c + +\begin{verbatim} + svn add file.c +\end{verbatim} + +Create a new directory + +\begin{verbatim} + svn mkdir newdir +\end{verbatim} + +Delete an obsolete file + +\begin{verbatim} + svn delete file.c +\end{verbatim} + +Rename a file + +\begin{verbatim} + svn move file.c newfile.c +\end{verbatim} + +Move a file to a new location + +\begin{verbatim} + svn move file.c ../newdir/file.c +\end{verbatim} + +Copy a file retaining the original history in the new file + +\begin{verbatim} + svn copy file.c newfile.c +\end{verbatim} + +Update the working copy with the outstanding changes + +\begin{verbatim} + svn update +\end{verbatim} + +Compare working copy with the repository + +\begin{verbatim} + svn diff file.c +\end{verbatim} + +Commit the changes in the local working copy + +\begin{verbatim} + svn commit +\end{verbatim} + +Specify which files are ignored in the current directory + +\begin{verbatim} + svn propedit svn:ignore . +\end{verbatim} + +Mark a file to be executable + +\begin{verbatim} + svn propset svn:executable '*' prog.sh +\end{verbatim} + +Unmark a file as executable + +\begin{verbatim} + svn propdel svn:executable prog.sh +\end{verbatim} + +List a file's properties + +\begin{verbatim} + svn proplist file.c +\end{verbatim} + +Create a branch for a new version + +\begin{verbatim} + svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/trunk \ + https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1 +\end{verbatim} + +Tag a version for a new release + +\begin{verbatim} + svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1 \ + https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Release-2.1 +\end{verbatim} + + +Let's say you are working in the directory scripts. You would then do: + +\begin{verbatim} +cd scripts +(edit some files) +\end{verbatim} + +when you are happy with your changes, you can do the following: + +\begin{verbatim} +cd bacula (to your top level directory) +svn diff my-changes.patch +\end{verbatim} + +When the command is done, you can look in the file my-changes.patch +and you will see all the changes you have made to your copy of the +repository. Make sure that you understand all the changes that +it reports before proceeding. If you modified files that you do +do not want to commit to the main repository, you can simply delete +them from your local directory, and they will be restored from the +repository with the "svn update" that is shown below. Normally, you +should not find changes to files that you do not want to commit, and +if you find yourself in that position a lot, you are probably doing +something wrong. + +Let's assume that now you want to commit your changes to the main +SVN repository. + +First do: + +\begin{verbatim} +cd bacula +svn update +\end{verbatim} + +When you do this, it will pull any changes made by other developers into +your local copy of the repository, and it will check for conflicts. If there +are any, it will tell you, and you will need to resolve them. The problems +of resolving conflicts are a bit more than this document can cover, but +you can examine the files it claims have conflicts and look for \lt{}\lt{}\lt{}\lt{} +or look in the .rej files that it creates. If you have problems, just ask +on the developer's list. + +Note, doing the above "svn update" is not absolutely necessary. There are +times when you may be working on code and you want to commit it, but you +explicitly do not want to move up to the latest version of the code in +the SVN. If that is the case, you can simply skip the "svn update" and +do the commit shown below. If the commit fails because of a conflict, it +will tell you, and you must resolve the conflict before it will permit +you to do the commit. + +Once your local copy of the repository has been updated, you can now +commit your changes: + +\begin{verbatim} +svn commit -m "Some comment about what you changed" +\end{verbatim} + +or if you really only want to commit a single file, you can +do: + +\begin{verbatim} +svn commit -m "comment" scripts/file-I-edited +\end{verbatim} + +Note, if you have done a build in your directory, or you have added +other new files, the commit will update only the files that are +actually in the repository. For example, none of the object files +are stored in the repository, so when you do a commit, those object +files will simply be ignored. + +If you want to add new files or remove files from the main SVN +repository, and you are not experienced with SVN, please ask Kern +to do it. If you follow the simple steps above, it is unlikely that +you will do any damage to the repository, and if you do, it is always +possible for us to recover, but it can be painful. + +If you are only working in one subdirectory of say the bacula project, +for example, the scripts directory, you can do your commit from +that subdirectory, and only the changes in that directory and all its +subdirectories will be committed. This can be helpful for translators. +If you are doing a French translation, you will be working in +docs/manual-fr, and if you are always cd'ed into that directory when +doing your commits, your commit will effect only that directory. As +long as you are careful only to change files that you want changed, +you have little to worry about. + +\section{Subversion Resources} +\index{Subversion (svn) Resources} +\addcontentsline{toc}{subsection}{Subversion Resources} + +\begin{verbatim} +cvs2svn Statistics: +------------------ +Total CVS Files: 3286 +Total CVS Revisions: 28924 +Total Unique Tags: 63 +Total Unique Branches: 11 +CVS Repos Size in KB: 232421 +Total SVN Commits: 4116 +First Revision Date: Tue Apr 23 12:42:57 2002 +Last Revision Date: Tue Feb 6 06:37:57 2007 +\end{verbatim} + +The new Subversion repository size on Robert's machine: + +\begin{verbatim} +4.0K bacula-tst/dav +12K bacula-tst/locks +40K bacula-tst/hooks +16K bacula-tst/conf +190M bacula-tst/db/revs +17M bacula-tst/db/revprops +4.0K bacula-tst/db/transactions +206M bacula-tst/db +206M bacula-tst +\end{verbatim} + + +Main Subversion Web Page +\elink{http://subversion.tigris.org}{http://subversion.tigris.org} + +Subversion Book +\elink{http://svnbook.red-bean.com}{http://svnbook.red-bean.com} + +Subversion Clients +\elink{http://subversion.tigris.org/project\_packages.html}{http://subversion.tigris.org/project\_packages.html} + + (For Windows users the TortoiseSVN package is awesome) + +GUI UNIX client link +\elink{http://rapidsvn.tigris.org/}{http://rapidsvn.tigris.org/} + +A nice KDE GUI client: +kdesvn + + + +\section{Developing Bacula} +\index{Developing Bacula} +\index{Bacula!Developing} +\addcontentsline{toc}{subsubsection}{Developing Bacula} + +Typically the simplest way to develop Bacula is to open one xterm window +pointing to the source directory you wish to update; a second xterm window at +the top source directory level, and a third xterm window at the bacula +directory \lt{}top\gt{}/src/bacula. After making source changes in one of the +directories, in the top source directory xterm, build the source, and start +the daemons by entering: + +make and + +./startit then in the enter: + +./console or + +./gnome-console to start the Console program. Enter any commands for testing. +For example: run kernsverify full. + +Note, the instructions here to use {\bf ./startit} are different from using a +production system where the administrator starts Bacula by entering {\bf +./bacula start}. This difference allows a development version of {\bf Bacula} +to be run on a computer at the same time that a production system is running. +The {\bf ./startit} strip starts {\bf Bacula} using a different set of +configuration files, and thus permits avoiding conflicts with any production +system. + +To make additional source changes, exit from the Console program, and in the +top source directory, stop the daemons by entering: + +./stopit then repeat the process. + +\subsection{Debugging} +\index{Debugging} +\addcontentsline{toc}{subsubsection}{Debugging} + +Probably the first thing to do is to turn on debug output. + +A good place to start is with a debug level of 20 as in {\bf ./startit -d20}. +The startit command starts all the daemons with the same debug level. +Alternatively, you can start the appropriate daemon with the debug level you +want. If you really need more info, a debug level of 60 is not bad, and for +just about everything a level of 200. + +\subsection{Using a Debugger} +\index{Using a Debugger} +\index{Debugger!Using a} +\addcontentsline{toc}{subsubsection}{Using a Debugger} + +If you have a serious problem such as a segmentation fault, it can usually be +found quickly using a good multiple thread debugger such as {\bf gdb}. For +example, suppose you get a segmentation violation in {\bf bacula-dir}. You +might use the following to find the problem: + +\lt{}start the Storage and File daemons\gt{} +cd dird +gdb ./bacula-dir +run -f -s -c ./dird.conf +\lt{}it dies with a segmentation fault\gt{} +where +The {\bf -f} option is specified on the {\bf run} command to inhibit {\bf +dird} from going into the background. You may also want to add the {\bf -s} +option to the run command to disable signals which can potentially interfere +with the debugging. + +As an alternative to using the debugger, each {\bf Bacula} daemon has a built +in back trace feature when a serious error is encountered. It calls the +debugger on itself, produces a back trace, and emails the report to the +developer. For more details on this, please see the chapter in the main Bacula +manual entitled ``What To Do When Bacula Crashes (Kaboom)''. + +\subsection{Memory Leaks} +\index{Leaks!Memory} +\index{Memory Leaks} +\addcontentsline{toc}{subsubsection}{Memory Leaks} + +Because Bacula runs routinely and unattended on client and server machines, it +may run for a long time. As a consequence, from the very beginning, Bacula +uses SmartAlloc to ensure that there are no memory leaks. To make detection of +memory leaks effective, all Bacula code that dynamically allocates memory MUST +have a way to release it. In general when the memory is no longer needed, it +should be immediately released, but in some cases, the memory will be held +during the entire time that Bacula is executing. In that case, there MUST be a +routine that can be called at termination time that releases the memory. In +this way, we will be able to detect memory leaks. Be sure to immediately +correct any and all memory leaks that are printed at the termination of the +daemons. + +\subsection{Special Files} +\index{Files!Special} +\index{Special Files} +\addcontentsline{toc}{subsubsection}{Special Files} + +Kern uses files named 1, 2, ... 9 with any extension as scratch files. Thus +any files with these names are subject to being rudely deleted at any time. + +\subsection{When Implementing Incomplete Code} +\index{Code!When Implementing Incomplete} +\index{When Implementing Incomplete Code} +\addcontentsline{toc}{subsubsection}{When Implementing Incomplete Code} + +Please identify all incomplete code with a comment that contains + +\begin{verbatim} +***FIXME*** +\end{verbatim} + +where there are three asterisks (*) before and after the word +FIXME (in capitals) and no intervening spaces. This is important as it allows +new programmers to easily recognize where things are partially implemented. + +\subsection{Bacula Source File Structure} +\index{Structure!Bacula Source File} +\index{Bacula Source File Structure} +\addcontentsline{toc}{subsubsection}{Bacula Source File Structure} + +The distribution generally comes as a tar file of the form {\bf +bacula.x.y.z.tar.gz} where x, y, and z are the version, release, and update +numbers respectively. + +Once you detar this file, you will have a directory structure as follows: + +\footnotesize +\begin{verbatim} +| +Tar file: +|- depkgs + |- mtx (autochanger control program + tape drive info) + |- sqlite (SQLite database program) + +Tar file: +|- depkgs-win32 + |- pthreads (Native win32 pthreads library -- dll) + |- zlib (Native win32 zlib library) + |- wx (wxWidgets source code) + +Project bacula: +|- bacula (main source directory containing configuration + | and installation files) + |- autoconf (automatic configuration files, not normally used + | by users) + |- intl (programs used to translate) + |- platforms (OS specific installation files) + |- redhat (Red Hat installation) + |- solaris (Sun installation) + |- freebsd (FreeBSD installation) + |- irix (Irix installation -- not tested) + |- unknown (Default if system not identified) + |- po (translations of source strings) + |- src (source directory; contains global header files) + |- cats (SQL catalog database interface directory) + |- console (bacula user agent directory) + |- dird (Director daemon) + |- filed (Unix File daemon) + |- win32 (Win32 files to make bacula-fd be a service) + |- findlib (Unix file find library for File daemon) + |- gnome-console (GNOME version of console program) + |- lib (General Bacula library) + |- stored (Storage daemon) + |- tconsole (Tcl/tk console program -- not yet working) + |- testprogs (test programs -- normally only in Kern's tree) + |- tools (Various tool programs) + |- win32 (Native Win32 File daemon) + |- baculafd (Visual Studio project file) + |- compat (compatibility interface library) + |- filed (links to src/filed) + |- findlib (links to src/findlib) + |- lib (links to src/lib) + |- console (beginning of native console program) + |- wx-console (wxWidget console Win32 specific parts) + |- wx-console (wxWidgets console main source program) + +Project regress: +|- regress (Regression scripts) + |- bin (temporary directory to hold Bacula installed binaries) + |- build (temporary directory to hold Bacula source) + |- scripts (scripts and .conf files) + |- tests (test scripts) + |- tmp (temporary directory for temp files) + |- working (temporary working directory for Bacula daemons) + +Project docs: +|- docs (documentation directory) + |- developers (Developer's guide) + |- home-page (Bacula's home page source) + |- manual (html document directory) + |- manual-fr (French translation) + |- manual-de (German translation) + |- techlogs (Technical development notes); + +Project rescue: +|- rescue (Bacula rescue CDROM) + |- linux (Linux rescue CDROM) + |- cdrom (Linux rescue CDROM code) + ... + |- solaris (Solaris rescue -- incomplete) + |- freebsd (FreeBSD rescue -- incomplete) + +Project gui: +|- gui (Bacula GUI projects) + |- bacula-web (Bacula web php management code) + |- bimagemgr (Web application for burning CDROMs) + + +\end{verbatim} +\normalsize + +\subsection{Header Files} +\index{Header Files} +\index{Files!Header} +\addcontentsline{toc}{subsubsection}{Header Files} + +Please carefully follow the scheme defined below as it permits in general only +two header file includes per C file, and thus vastly simplifies programming. +With a large complex project like Bacula, it isn't always easy to ensure that +the right headers are invoked in the right order (there are a few kludges to +make this happen -- i.e. in a few include files because of the chicken and egg +problem, certain references to typedefs had to be replaced with {\bf void} ). + +Every file should include {\bf bacula.h}. It pulls in just about everything, +with very few exceptions. If you have system dependent ifdefing, please do it +in {\bf baconfig.h}. The version number and date are kept in {\bf version.h}. + +Each of the subdirectories (console, cats, dird, filed, findlib, lib, stored, +...) contains a single directory dependent include file generally the name of +the directory, which should be included just after the include of {\bf +bacula.h}. This file (for example, for the dird directory, it is {\bf dird.h}) +contains either definitions of things generally needed in this directory, or +it includes the appropriate header files. It always includes {\bf protos.h}. +See below. + +Each subdirectory contains a header file named {\bf protos.h}, which contains +the prototypes for subroutines exported by files in that directory. {\bf +protos.h} is always included by the main directory dependent include file. + +\subsection{Programming Standards} +\index{Standards!Programming} +\index{Programming Standards} +\addcontentsline{toc}{subsubsection}{Programming Standards} + +For the most part, all code should be written in C unless there is a burning +reason to use C++, and then only the simplest C++ constructs will be used. +Note, Bacula is slowly evolving to use more and more C++. + +Code should have some documentation -- not a lot, but enough so that I can +understand it. Look at the current code, and you will see that I document more +than most, but am definitely not a fanatic. + +I prefer simple linear code where possible. Gotos are strongly discouraged +except for handling an error to either bail out or to retry some code, and +such use of gotos can vastly simplify the program. + +Remember this is a C program that is migrating to a {\bf tiny} subset of C++, +so be conservative in your use of C++ features. + +\subsection{Do Not Use} +\index{Use!Do Not} +\index{Do Not Use} +\addcontentsline{toc}{subsubsection}{Do Not Use} + +\begin{itemize} + \item STL -- it is totally incomprehensible. +\end{itemize} + +\subsection{Avoid if Possible} +\index{Possible!Avoid if} +\index{Avoid if Possible} +\addcontentsline{toc}{subsubsection}{Avoid if Possible} + +\begin{itemize} +\item Using {\bf void *} because this generally means that one must + using casting, and in C++ casting is rather ugly. It is OK to use + void * to pass structure address where the structure is not known + to the routines accepting the packet (typically callback routines). + However, declaring "void *buf" is a bad idea. Please use the + correct types whenever possible. + +\item Using undefined storage specifications such as (short, int, long, + long long, size\_t ...). The problem with all these is that the number of bytes + they allocate depends on the compiler and the system. Instead use + Bacula's types (int8\_t, uint8\_t, int32\_t, uint32\_t, int64\_t, and + uint64\_t). This guarantees that the variables are given exactly the + size you want. Please try at all possible to avoid using size\_t ssize\_t + and the such. They are very system dependent. However, some system + routines may need them, so their use is often unavoidable. + +\item Returning a malloc'ed buffer from a subroutine -- someone will forget + to release it. + +\item Heap allocation (malloc) unless needed -- it is expensive. Use + POOL\_MEM instead. + +\item Templates -- they can create portability problems. + +\item Fancy or tricky C or C++ code, unless you give a good explanation of + why you used it. + +\item Too much inheritance -- it can complicate the code, and make reading it + difficult (unless you are in love with colons) + +\end{itemize} + +\subsection{Do Use Whenever Possible} +\index{Possible!Do Use Whenever} +\index{Do Use Whenever Possible} +\addcontentsline{toc}{subsubsection}{Do Use Whenever Possible} + +\begin{itemize} +\item Locking and unlocking within a single subroutine. + +\item A single point of exit from all subroutines. A goto is + perfectly OK to use to get out early, but only to a label + named bail\_out, and possibly an ok\_out. See current code + examples. + +\item Malloc and free within a single subroutine. + +\item Comments and global explanations on what your code or algorithm does. + +\end{itemize} + +\subsection{Indenting Standards} +\index{Standards!Indenting} +\index{Indenting Standards} +\addcontentsline{toc}{subsubsection}{Indenting Standards} + +I cannot stand code indented 8 columns at a time. This makes the code +unreadable. Even 4 at a time uses a lot of space, so I have adopted indenting +3 spaces at every level. Note, indention is the visual appearance of the +source on the page, while tabbing is replacing a series of up to 8 spaces from +a tab character. + +The closest set of parameters for the Linux {\bf indent} program that will +produce reasonably indented code are: + +\footnotesize +\begin{verbatim} +-nbad -bap -bbo -nbc -br -brs -c36 -cd36 -ncdb -ce -ci3 -cli0 +-cp36 -d0 -di1 -ndj -nfc1 -nfca -hnl -i3 -ip0 -l85 -lp -npcs +-nprs -npsl -saf -sai -saw -nsob -nss -nbc -ncs -nbfda +\end{verbatim} +\normalsize + +You can put the above in your .indent.pro file, and then just invoke indent on +your file. However, be warned. This does not produce perfect indenting, and it +will mess up C++ class statements pretty badly. + +Braces are required in all if statements (missing in some very old code). To +avoid generating too many lines, the first brace appears on the first line +(e.g. of an if), and the closing brace is on a line by itself. E.g. + +\footnotesize +\begin{verbatim} + if (abc) { + some_code; + } +\end{verbatim} +\normalsize + +Just follow the convention in the code. Originally I indented case clauses +under a switch(), but now I prefer non-indented cases. + +\footnotesize +\begin{verbatim} + switch (code) { + case 'A': + do something + break; + case 'B': + again(); + break; + default: + break; + } +\end{verbatim} +\normalsize + +Avoid using // style comments except for temporary code or turning off debug +code. Standard C comments are preferred (this also keeps the code closer to +C). + +Attempt to keep all lines less than 85 characters long so that the whole line +of code is readable at one time. This is not a rigid requirement. + +Always put a brief description at the top of any new file created describing +what it does and including your name and the date it was first written. Please +don't forget any Copyrights and acknowledgments if it isn't 100\% your code. +Also, include the Bacula copyright notice that is in {\bf src/c}. + +In general you should have two includes at the top of the an include for the +particular directory the code is in, for includes are needed, but this should +be rare. + +In general (except for self-contained packages), prototypes should all be put +in {\bf protos.h} in each directory. + +Always put space around assignment and comparison operators. + +\footnotesize +\begin{verbatim} + a = 1; + if (b >= 2) { + cleanup(); + } +\end{verbatim} +\normalsize + +but your can compress things in a {\bf for} statement: + +\footnotesize +\begin{verbatim} + for (i=0; i < del.num_ids; i++) { + ... +\end{verbatim} +\normalsize + +Don't overuse the inline if (?:). A full {\bf if} is preferred, except in a +print statement, e.g.: + +\footnotesize +\begin{verbatim} + if (ua->verbose \&& del.num_del != 0) { + bsendmsg(ua, _("Pruned %d %s on Volume %s from catalog.\n"), del.num_del, + del.num_del == 1 ? "Job" : "Jobs", mr->VolumeName); + } +\end{verbatim} +\normalsize + +Leave a certain amount of debug code (Dmsg) in code you submit, so that future +problems can be identified. This is particularly true for complicated code +likely to break. However, try to keep the debug code to a minimum to avoid +bloating the program and above all to keep the code readable. + +Please keep the same style in all new code you develop. If you include code +previously written, you have the option of leaving it with the old indenting +or re-indenting it. If the old code is indented with 8 spaces, then please +re-indent it to Bacula standards. + +If you are using {\bf vim}, simply set your tabstop to 8 and your shiftwidth +to 3. + +\subsection{Tabbing} +\index{Tabbing} +\addcontentsline{toc}{subsubsection}{Tabbing} + +Tabbing (inserting the tab character in place of spaces) is as normal on all +Unix systems -- a tab is converted space up to the next column multiple of 8. +My editor converts strings of spaces to tabs automatically -- this results in +significant compression of the files. Thus, you can remove tabs by replacing +them with spaces if you wish. Please don't confuse tabbing (use of tab +characters) with indenting (visual alignment of the code). + +\subsection{Don'ts} +\index{Don'ts} +\addcontentsline{toc}{subsubsection}{Don'ts} + +Please don't use: + +\footnotesize +\begin{verbatim} +strcpy() +strcat() +strncpy() +strncat(); +sprintf() +snprintf() +\end{verbatim} +\normalsize + +They are system dependent and un-safe. These should be replaced by the Bacula +safe equivalents: + +\footnotesize +\begin{verbatim} +char *bstrncpy(char *dest, char *source, int dest_size); +char *bstrncat(char *dest, char *source, int dest_size); +int bsnprintf(char *buf, int32_t buf_len, const char *fmt, ...); +int bvsnprintf(char *str, int32_t size, const char *format, va_list ap); +\end{verbatim} +\normalsize + +See src/lib/bsys.c for more details on these routines. + +Don't use the {\bf \%lld} or the {\bf \%q} printf format editing types to edit +64 bit integers -- they are not portable. Instead, use {\bf \%s} with {\bf +edit\_uint64()}. For example: + +\footnotesize +\begin{verbatim} + char buf[100]; + uint64_t num = something; + char ed1[50]; + bsnprintf(buf, sizeof(buf), "Num=%s\n", edit_uint64(num, ed1)); +\end{verbatim} +\normalsize + +The edit buffer {\bf ed1} must be at least 27 bytes long to avoid overflow. +See src/lib/edit.c for more details. If you look at the code, don't start +screaming that I use {\bf lld}. I actually use subtle trick taught to me by +John Walker. The {\bf lld} that appears in the editing routine is actually +{\bf \#define} to a what is needed on your OS (usually ``lld'' or ``q'') and +is defined in autoconf/configure.in for each OS. C string concatenation causes +the appropriate string to be concatenated to the ``\%''. + +Also please don't use the STL or Templates or any complicated C++ code. + +\subsection{Message Classes} +\index{Classes!Message} +\index{Message Classes} +\addcontentsline{toc}{subsubsection}{Message Classes} + +Currently, there are five classes of messages: Debug, Error, Job, Memory, +and Queued. + +\subsection{Debug Messages} +\index{Messages!Debug} +\index{Debug Messages} +\addcontentsline{toc}{subsubsection}{Debug Messages} + +Debug messages are designed to be turned on at a specified debug level and are +always sent to STDOUT. There are designed to only be used in the development +debug process. They are coded as: + +DmsgN(level, message, arg1, ...) where the N is a number indicating how many +arguments are to be substituted into the message (i.e. it is a count of the +number arguments you have in your message -- generally the number of percent +signs (\%)). {\bf level} is the debug level at which you wish the message to +be printed. message is the debug message to be printed, and arg1, ... are the +arguments to be substituted. Since not all compilers support \#defines with +varargs, you must explicitly specify how many arguments you have. + +When the debug message is printed, it will automatically be prefixed by the +name of the daemon which is running, the filename where the Dmsg is, and the +line number within the file. + +Some actual examples are: + +Dmsg2(20, ``MD5len=\%d MD5=\%s\textbackslash{}n'', strlen(buf), buf); + +Dmsg1(9, ``Created client \%s record\textbackslash{}n'', client->hdr.name); + +\subsection{Error Messages} +\index{Messages!Error} +\index{Error Messages} +\addcontentsline{toc}{subsubsection}{Error Messages} + +Error messages are messages that are related to the daemon as a whole rather +than a particular job. For example, an out of memory condition my generate an +error message. They should be very rarely needed. In general, you should be +using Job and Job Queued messages (Jmsg and Qmsg). They are coded as: + +EmsgN(error-code, level, message, arg1, ...) As with debug messages, you must +explicitly code the of arguments to be substituted in the message. error-code +indicates the severity or class of error, and it may be one of the following: + +\addcontentsline{lot}{table}{Message Error Code Classes} +\begin{longtable}{lp{3in}} +{{\bf M\_ABORT} } & {Causes the daemon to immediately abort. This should be +used only in extreme cases. It attempts to produce a traceback. } \\ +{{\bf M\_ERROR\_TERM} } & {Causes the daemon to immediately terminate. This +should be used only in extreme cases. It does not produce a traceback. } \\ +{{\bf M\_FATAL} } & {Causes the daemon to terminate the current job, but the +daemon keeps running } \\ +{{\bf M\_ERROR} } & {Reports the error. The daemon and the job continue +running } \\ +{{\bf M\_WARNING} } & {Reports an warning message. The daemon and the job +continue running } \\ +{{\bf M\_INFO} } & {Reports an informational message.} + +\end{longtable} + +There are other error message classes, but they are in a state of being +redesigned or deprecated, so please do not use them. Some actual examples are: + + +Emsg1(M\_ABORT, 0, ``Cannot create message thread: \%s\textbackslash{}n'', +strerror(status)); + +Emsg3(M\_WARNING, 0, ``Connect to File daemon \%s at \%s:\%d failed. Retrying +...\textbackslash{}n'', client-\gt{}hdr.name, client-\gt{}address, +client-\gt{}port); + +Emsg3(M\_FATAL, 0, ``bdird\lt{}filed: bad response from Filed to \%s command: +\%d \%s\textbackslash{}n'', cmd, n, strerror(errno)); + +\subsection{Job Messages} +\index{Job Messages} +\index{Messages!Job} +\addcontentsline{toc}{subsubsection}{Job Messages} + +Job messages are messages that pertain to a particular job such as a file that +could not be saved, or the number of files and bytes that were saved. They +Are coded as: +\begin{verbatim} +Jmsg(jcr, M\_FATAL, 0, "Text of message"); +\end{verbatim} +A Jmsg with M\_FATAL will fail the job. The Jmsg() takes varargs so can +have any number of arguments for substituted in a printf like format. +Output from the Jmsg() will go to the Job report. +
+If the Jmsg is followed with a number such as Jmsg1(...), the number +indicates the number of arguments to be substituted (varargs is not +standard for \#defines), and what is more important is that the file and +line number will be prefixed to the message. This permits a sort of debug +from user's output. + +\subsection{Queued Job Messages} +\index{Queued Job Messages} +\index{Messages!Job} +\addcontentsline{toc}{subsubsection}{Queued Job Messages} +Queued Job messages are similar to Jmsg()s except that the message is +Queued rather than immediately dispatched. This is necessary within the +network subroutines and in the message editing routines. This is to prevent +recursive loops, and to ensure that messages can be delivered even in the +event of a network error. + + +\subsection{Memory Messages} +\index{Messages!Memory} +\index{Memory Messages} +\addcontentsline{toc}{subsubsection}{Memory Messages} + +Memory messages are messages that are edited into a memory buffer. Generally +they are used in low level routines such as the low level device file dev.c in +the Storage daemon or in the low level Catalog routines. These routines do not +generally have access to the Job Control Record and so they return error +essages reformatted in a memory buffer. Mmsg() is the way to do this. diff --git a/docs/manuals/en/developers/index.perl b/docs/manuals/en/developers/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/developers/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/developers/latex2html-init.pl b/docs/manuals/en/developers/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/developers/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/developers/md5.tex b/docs/manuals/en/developers/md5.tex new file mode 100644 index 00000000..aed995b4 --- /dev/null +++ b/docs/manuals/en/developers/md5.tex @@ -0,0 +1,184 @@ +%% +%% + +\chapter{Bacula MD5 Algorithm} +\label{MD5Chapter} +\addcontentsline{toc}{section}{} + +\section{Command Line Message Digest Utility } +\index{Utility!Command Line Message Digest } +\index{Command Line Message Digest Utility } +\addcontentsline{toc}{subsection}{Command Line Message Digest Utility} + + +This page describes {\bf md5}, a command line utility usable on either Unix or +MS-DOS/Windows, which generates and verifies message digests (digital +signatures) using the MD5 algorithm. This program can be useful when +developing shell scripts or Perl programs for software installation, file +comparison, and detection of file corruption and tampering. + +\subsection{Name} +\index{Name} +\addcontentsline{toc}{subsubsection}{Name} + +{\bf md5} - generate / check MD5 message digest + +\subsection{Synopsis} +\index{Synopsis } +\addcontentsline{toc}{subsubsection}{Synopsis} + +{\bf md5} [ {\bf -c}{\it signature} ] [ {\bf -u} ] [ {\bf -d}{\it input\_text} +| {\it infile} ] [ {\it outfile} ] + +\subsection{Description} +\index{Description } +\addcontentsline{toc}{subsubsection}{Description} + +A {\it message digest} is a compact digital signature for an arbitrarily long +stream of binary data. An ideal message digest algorithm would never generate +the same signature for two different sets of input, but achieving such +theoretical perfection would require a message digest as long as the input +file. Practical message digest algorithms compromise in favour of a digital +signature of modest size created with an algorithm designed to make +preparation of input text with a given signature computationally infeasible. +Message digest algorithms have much in common with techniques used in +encryption, but to a different end; verification that data have not been +altered since the signature was published. + +Many older programs requiring digital signatures employed 16 or 32 bit {\it +cyclical redundancy codes} (CRC) originally developed to verify correct +transmission in data communication protocols, but these short codes, while +adequate to detect the kind of transmission errors for which they were +intended, are insufficiently secure for applications such as electronic +commerce and verification of security related software distributions. + +The most commonly used present-day message digest algorithm is the 128 bit MD5 +algorithm, developed by Ron Rivest of the +\elink{MIT}{http://web.mit.edu/} +\elink{Laboratory for Computer Science}{http://www.lcs.mit.edu/} and +\elink{RSA Data Security, Inc.}{http://www.rsa.com/} The algorithm, with a +reference implementation, was published as Internet +\elink{RFC 1321}{http://www.fourmilab.ch/md5/rfc1321.html} in April 1992, and +was placed into the public domain at that time. Message digest algorithms such +as MD5 are not deemed ``encryption technology'' and are not subject to the +export controls some governments impose on other data security products. +(Obviously, the responsibility for obeying the laws in the jurisdiction in +which you reside is entirely your own, but many common Web and Mail utilities +use MD5, and I am unaware of any restrictions on their distribution and use.) + +The MD5 algorithm has been implemented in numerous computer languages +including C, +\elink{Perl}{http://www.perl.org/}, and +\elink{Java}{http://www.javasoft.com/}; if you're writing a program in such a +language, track down a suitable subroutine and incorporate it into your +program. The program described on this page is a {\it command line} +implementation of MD5, intended for use in shell scripts and Perl programs (it +is much faster than computing an MD5 signature directly in Perl). This {\bf +md5} program was originally developed as part of a suite of tools intended to +monitor large collections of files (for example, the contents of a Web site) +to detect corruption of files and inadvertent (or perhaps malicious) changes. +That task is now best accomplished with more comprehensive packages such as +\elink{Tripwire}{ftp://coast.cs.purdue.edu/pub/COAST/Tripwire/}, but the +command line {\bf md5} component continues to prove useful for verifying +correct delivery and installation of software packages, comparing the contents +of two different systems, and checking for changes in specific files. + +\subsection{Options} +\index{Options } +\addcontentsline{toc}{subsubsection}{Options} + +\begin{description} + +\item [{\bf -c}{\it signature} ] + \index{-csignature } + Computes the signature of the specified {\it infile} or the string supplied +by the {\bf -d} option and compares it against the specified {\it signature}. +If the two signatures match, the exit status will be zero, otherwise the exit +status will be 1. No signature is written to {\it outfile} or standard +output; only the exit status is set. The signature to be checked must be +specified as 32 hexadecimal digits. + +\item [{\bf -d}{\it input\_text} ] + \index{-dinput\_text } + A signature is computed for the given {\it input\_text} (which must be quoted +if it contains white space characters) instead of input from {\it infile} or +standard input. If input is specified with the {\bf -d} option, no {\it +infile} should be specified. + +\item [{\bf -u} ] + Print how-to-call information. + \end{description} + +\subsection{Files} +\index{Files } +\addcontentsline{toc}{subsubsection}{Files} + +If no {\it infile} or {\bf -d} option is specified or {\it infile} is a single +``-'', {\bf md5} reads from standard input; if no {\it outfile} is given, or +{\it outfile} is a single ``-'', output is sent to standard output. Input and +output are processed strictly serially; consequently {\bf md5} may be used in +pipelines. + +\subsection{Bugs} +\index{Bugs } +\addcontentsline{toc}{subsubsection}{Bugs} + +The mechanism used to set standard input to binary mode may be specific to +Microsoft C; if you rebuild the DOS/Windows version of the program from source +using another compiler, be sure to verify binary files work properly when read +via redirection or a pipe. + +This program has not been tested on a machine on which {\tt int} and/or {\tt +long} are longer than 32 bits. + +\section{ +\elink{Download md5.zip}{http://www.fourmilab.ch/md5/md5.zip} (Zipped +archive)} +\index{Archive!Download md5.zip Zipped } +\index{Download md5.zip (Zipped archive) } +\addcontentsline{toc}{subsection}{Download md5.zip (Zipped archive)} + +The program is provided as +\elink{md5.zip}{http://www.fourmilab.ch/md5/md5.zip}, a +\elink{Zipped}{http://www.pkware.com/} archive containing an ready-to-run +Win32 command-line executable program, {\tt md5.exe} (compiled using Microsoft +Visual C++ 5.0), and in source code form along with a {\tt Makefile} to build +the program under Unix. + +\subsection{See Also} +\index{ALSO!SEE } +\index{See Also } +\addcontentsline{toc}{subsubsection}{SEE ALSO} + +{\bf sum}(1) + +\subsection{Exit Status} +\index{Status!Exit } +\index{Exit Status } +\addcontentsline{toc}{subsubsection}{Exit Status} + +{\bf md5} returns status 0 if processing was completed without errors, 1 if +the {\bf -c} option was specified and the given signature does not match that +of the input, and 2 if processing could not be performed at all due, for +example, to a nonexistent input file. + +\subsection{Copying} +\index{Copying } +\addcontentsline{toc}{subsubsection}{Copying} + +\begin{quote} +This software is in the public domain. Permission to use, copy, modify, and +distribute this software and its documentation for any purpose and without +fee is hereby granted, without any conditions or restrictions. This software +is provided ``as is'' without express or implied warranty. +\end{quote} + +\subsection{Acknowledgements} +\index{Acknowledgements } +\addcontentsline{toc}{subsubsection}{Acknowledgements} + +The MD5 algorithm was developed by Ron Rivest. The public domain C language +implementation used in this program was written by Colin Plumb in 1993. +{\it +\elink{by John Walker}{http://www.fourmilab.ch/} +January 6th, MIM } diff --git a/docs/manuals/en/developers/mediaformat.tex b/docs/manuals/en/developers/mediaformat.tex new file mode 100644 index 00000000..cc824f78 --- /dev/null +++ b/docs/manuals/en/developers/mediaformat.tex @@ -0,0 +1,1115 @@ +%% +%% + +\chapter{Storage Media Output Format} +\label{_ChapterStart9} +\index{Format!Storage Media Output} +\index{Storage Media Output Format} +\addcontentsline{toc}{section}{Storage Media Output Format} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the media format written by the Storage daemon. The +Storage daemon reads and writes in units of blocks. Blocks contain records. +Each block has a block header followed by records, and each record has a +record header followed by record data. + +This chapter is intended to be a technical discussion of the Media Format and +as such is not targeted at end users but rather at developers and system +administrators that want or need to know more of the working details of {\bf +Bacula}. + +\section{Definitions} +\index{Definitions} +\addcontentsline{toc}{subsection}{Definitions} + +\begin{description} + +\item [Block] + \index{Block} + A block represents the primitive unit of information that the Storage daemon +reads and writes to a physical device. Normally, for a tape device, it will +be the same as a tape block. The Storage daemon always reads and writes +blocks. A block consists of block header information followed by records. +Clients of the Storage daemon (the File daemon) normally never see blocks. +However, some of the Storage tools (bls, bscan, bextract, ...) may be use +block header information. In older Bacula tape versions, a block could +contain records (see record definition below) from multiple jobs. However, +all blocks currently written by Bacula are block level BB02, and a given +block contains records for only a single job. Different jobs simply have +their own private blocks that are intermingled with the other blocks from +other jobs on the Volume (previously the records were intermingled within +the blocks). Having only records from a single job in any give block +permitted moving the VolumeSessionId and VolumeSessionTime (see below) from +each record heading to the Block header. This has two advantages: 1. a block +can be quickly rejected based on the contents of the header without reading +all the records. 2. because there is on the average more than one record per +block, less data is written to the Volume for each job. + +\item [Record] + \index{Record} + A record consists of a Record Header, which is managed by the Storage daemon +and Record Data, which is the data received from the Client. A record is the +primitive unit of information sent to and from the Storage daemon by the +Client (File daemon) programs. The details are described below. + +\item [JobId] + \index{JobId} + A number assigned by the Director daemon for a particular job. This number +will be unique for that particular Director (Catalog). The daemons use this +number to keep track of individual jobs. Within the Storage daemon, the JobId +may not be unique if several Directors are accessing the Storage daemon +simultaneously. + +\item [Session] + \index{Session} + A Session is a concept used in the Storage daemon corresponds one to one to a +Job with the exception that each session is uniquely identified within the +Storage daemon by a unique SessionId/SessionTime pair (see below). + +\item [VolSessionId] + \index{VolSessionId} + A unique number assigned by the Storage daemon to a particular session (Job) +it is having with a File daemon. This number by itself is not unique to the +given Volume, but with the VolSessionTime, it is unique. + +\item [VolSessionTime] + \index{VolSessionTime} + A unique number assigned by the Storage daemon to a particular Storage daemon +execution. It is actually the Unix time\_t value of when the Storage daemon +began execution cast to a 32 bit unsigned integer. The combination of the +{\bf VolSessionId} and the {\bf VolSessionTime} for a given Storage daemon is +guaranteed to be unique for each Job (or session). + +\item [FileIndex] + \index{FileIndex} + A sequential number beginning at one assigned by the File daemon to the files +within a job that are sent to the Storage daemon for backup. The Storage +daemon ensures that this number is greater than zero and sequential. Note, +the Storage daemon uses negative FileIndexes to flag Session Start and End +Labels as well as End of Volume Labels. Thus, the combination of +VolSessionId, VolSessionTime, and FileIndex uniquely identifies the records +for a single file written to a Volume. + +\item [Stream] + \index{Stream} + While writing the information for any particular file to the Volume, there +can be any number of distinct pieces of information about that file, e.g. the +attributes, the file data, ... The Stream indicates what piece of data it +is, and it is an arbitrary number assigned by the File daemon to the parts +(Unix attributes, Win32 attributes, data, compressed data,\ ...) of a file +that are sent to the Storage daemon. The Storage daemon has no knowledge of +the details of a Stream; it simply represents a numbered stream of bytes. The +data for a given stream may be passed to the Storage daemon in single record, +or in multiple records. + +\item [Block Header] + \index{Block Header} + A block header consists of a block identification (``BB02''), a block length +in bytes (typically 64,512) a checksum, and sequential block number. Each +block starts with a Block Header and is followed by Records. Current block +headers also contain the VolSessionId and VolSessionTime for the records +written to that block. + +\item [Record Header] + \index{Record Header} + A record header contains the Volume Session Id, the Volume Session Time, the +FileIndex, the Stream, and the size of the data record which follows. The +Record Header is always immediately followed by a Data Record if the size +given in the Header is greater than zero. Note, for Block headers of level +BB02 (version 1.27 and later), the Record header as written to tape does not +contain the Volume Session Id and the Volume Session Time as these two +fields are stored in the BB02 Block header. The in-memory record header does +have those fields for convenience. + +\item [Data Record] + \index{Data Record} + A data record consists of a binary stream of bytes and is always preceded by +a Record Header. The details of the meaning of the binary stream of bytes are +unknown to the Storage daemon, but the Client programs (File daemon) defines +and thus knows the details of each record type. + +\item [Volume Label] + \index{Volume Label} + A label placed by the Storage daemon at the beginning of each storage volume. +It contains general information about the volume. It is written in Record +format. The Storage daemon manages Volume Labels, and if the client wants, he +may also read them. + +\item [Begin Session Label] + \index{Begin Session Label} + The Begin Session Label is a special record placed by the Storage daemon on +the storage medium as the first record of an append session job with a File +daemon. This record is useful for finding the beginning of a particular +session (Job), since no records with the same VolSessionId and VolSessionTime +will precede this record. This record is not normally visible outside of the +Storage daemon. The Begin Session Label is similar to the Volume Label except +that it contains additional information pertaining to the Session. + +\item [End Session Label] + \index{End Session Label} + The End Session Label is a special record placed by the Storage daemon on the +storage medium as the last record of an append session job with a File +daemon. The End Session Record is distinguished by a FileIndex with a value +of minus two (-2). This record is useful for detecting the end of a +particular session since no records with the same VolSessionId and +VolSessionTime will follow this record. This record is not normally visible +outside of the Storage daemon. The End Session Label is similar to the Volume +Label except that it contains additional information pertaining to the +Session. +\end{description} + +\section{Storage Daemon File Output Format} +\index{Format!Storage Daemon File Output} +\index{Storage Daemon File Output Format} +\addcontentsline{toc}{subsection}{Storage Daemon File Output Format} + +The file storage and tape storage formats are identical except that tape +records are by default blocked into blocks of 64,512 bytes, except for the +last block, which is the actual number of bytes written rounded up to a +multiple of 1024 whereas the last record of file storage is not rounded up. +The default block size of 64,512 bytes may be overridden by the user (some +older tape drives only support block sizes of 32K). Each Session written to +tape is terminated with an End of File mark (this will be removed later). +Sessions written to file are simply appended to the end of the file. + +\section{Overall Format} +\index{Format!Overall} +\index{Overall Format} +\addcontentsline{toc}{subsection}{Overall Format} + +A Bacula output file consists of Blocks of data. Each block contains a block +header followed by records. Each record consists of a record header followed +by the record data. The first record on a tape will always be the Volume Label +Record. + +No Record Header will be split across Bacula blocks. However, Record Data may +be split across any number of Bacula blocks. Obviously this will not be the +case for the Volume Label which will always be smaller than the Bacula Block +size. + +To simplify reading tapes, the Start of Session (SOS) and End of Session (EOS) +records are never split across blocks. If this is about to happen, Bacula will +write a short block before writing the session record (actually, the SOS +record should always be the first record in a block, excepting perhaps the +Volume label). + +Due to hardware limitations, the last block written to the tape may not be +fully written. If your drive permits backspace record, Bacula will backup over +the last record written on the tape, re-read it and verify that it was +correctly written. + +When a new tape is mounted Bacula will write the full contents of the +partially written block to the new tape ensuring that there is no loss of +data. When reading a tape, Bacula will discard any block that is not totally +written, thus ensuring that there is no duplication of data. In addition, +since Bacula blocks are sequentially numbered within a Job, it is easy to +ensure that no block is missing or duplicated. + +\section{Serialization} +\index{Serialization} +\addcontentsline{toc}{subsection}{Serialization} + +All Block Headers, Record Headers, and Label Records are written using +Bacula's serialization routines. These routines guarantee that the data is +written to the output volume in a machine independent format. + +\section{Block Header} +\index{Header!Block} +\index{Block Header} +\addcontentsline{toc}{subsection}{Block Header} + +The format of the Block Header (version 1.27 and later) is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB02"; /* Identification and block level */ + uint32_t VolSessionId; /* Session Id for Job */ + uint32_t VolSessionTime; /* Session Time for Job */ +\end{verbatim} +\normalsize + +The Block header is a fixed length and fixed format and is followed by Record +Headers and Record Data. The CheckSum field is a 32 bit checksum of the block +data and the block header but not including the CheckSum field. The Block +Header is always immediately followed by a Record Header. If the tape is +damaged, a Bacula utility will be able to recover as much information as +possible from the tape by recovering blocks which are valid. The Block header +is written using the Bacula serialization routines and thus is guaranteed to +be in machine independent format. See below for version 2 of the block header. + + +\section{Record Header} +\index{Header!Record} +\index{Record Header} +\addcontentsline{toc}{subsection}{Record Header} + +Each binary data record is preceded by a Record Header. The Record Header is +fixed length and fixed format, whereas the binary data record is of variable +length. The Record Header is written using the Bacula serialization routines +and thus is guaranteed to be in machine independent format. + +The format of the Record Header (version 1.27 or later) is: + +\footnotesize +\begin{verbatim} + int32_t FileIndex; /* File index supplied by File daemon */ + int32_t Stream; /* Stream number supplied by File daemon */ + uint32_t DataSize; /* size of following data record in bytes */ +\end{verbatim} +\normalsize + +This record is followed by the binary Stream data of DataSize bytes, followed +by another Record Header record and the binary stream data. For the definitive +definition of this record, see record.h in the src/stored directory. + +Additional notes on the above: + +\begin{description} + +\item [The {\bf VolSessionId} ] + \index{VolSessionId} + is a unique sequential number that is assigned by the Storage Daemon to a +particular Job. This number is sequential since the start of execution of the +daemon. + +\item [The {\bf VolSessionTime} ] + \index{VolSessionTime} + is the time/date that the current execution of the Storage Daemon started. It +assures that the combination of VolSessionId and VolSessionTime is unique for +every jobs written to the tape, even if there was a machine crash between two +writes. + +\item [The {\bf FileIndex} ] + \index{FileIndex} + is a sequential file number within a job. The Storage daemon requires this +index to be greater than zero and sequential. Note, however, that the File +daemon may send multiple Streams for the same FileIndex. In addition, the +Storage daemon uses negative FileIndices to hold the Begin Session Label, the +End Session Label, and the End of Volume Label. + +\item [The {\bf Stream} ] + \index{Stream} + is defined by the File daemon and is used to identify separate parts of the +data saved for each file (Unix attributes, Win32 attributes, file data, +compressed file data, sparse file data, ...). The Storage Daemon has no idea +of what a Stream is or what it contains except that the Stream is required to +be a positive integer. Negative Stream numbers are used internally by the +Storage daemon to indicate that the record is a continuation of the previous +record (the previous record would not entirely fit in the block). + +For Start Session and End Session Labels (where the FileIndex is negative), +the Storage daemon uses the Stream field to contain the JobId. The current +stream definitions are: + +\footnotesize +\begin{verbatim} +#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */ +#define STREAM_FILE_DATA 2 /* Standard uncompressed data */ +#define STREAM_MD5_SIGNATURE 3 /* MD5 signature for the file */ +#define STREAM_GZIP_DATA 4 /* GZip compressed file data */ +/* Extended Unix attributes with Win32 Extended data. Deprecated. */ +#define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX */ +#define STREAM_SPARSE_DATA 6 /* Sparse data stream */ +#define STREAM_SPARSE_GZIP_DATA 7 +#define STREAM_PROGRAM_NAMES 8 /* program names for program data */ +#define STREAM_PROGRAM_DATA 9 /* Data needing program */ +#define STREAM_SHA1_SIGNATURE 10 /* SHA1 signature for the file */ +#define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */ +#define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */ +#define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */ +#define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */ +#define STREAM_UNIX_ATTRIBUTES_ACCESS_ACL 15 /* Standard ACL attributes on UNIX */ +#define STREAM_UNIX_ATTRIBUTES_DEFAULT_ACL 16 /* Default ACL attributes on UNIX */ +\end{verbatim} +\normalsize + +\item [The {\bf DataSize} ] + \index{DataSize} + is the size in bytes of the binary data record that follows the Session +Record header. The Storage Daemon has no idea of the actual contents of the +binary data record. For standard Unix files, the data record typically +contains the file attributes or the file data. For a sparse file the first +64 bits of the file data contains the storage address for the data block. +\end{description} + +The Record Header is never split across two blocks. If there is not enough +room in a block for the full Record Header, the block is padded to the end +with zeros and the Record Header begins in the next block. The data record, on +the other hand, may be split across multiple blocks and even multiple physical +volumes. When a data record is split, the second (and possibly subsequent) +piece of the data is preceded by a new Record Header. Thus each piece of data +is always immediately preceded by a Record Header. When reading a record, if +Bacula finds only part of the data in the first record, it will automatically +read the next record and concatenate the data record to form a full data +record. + +\section{Version BB02 Block Header} +\index{Version BB02 Block Header} +\index{Header!Version BB02 Block} +\addcontentsline{toc}{subsection}{Version BB02 Block Header} + +Each session or Job has its own private block. As a consequence, the SessionId +and SessionTime are written once in each Block Header and not in the Record +Header. So, the second and current version of the Block Header BB02 is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB02"; /* Identification and block level */ + uint32_t VolSessionId; /* Applies to all records */ + uint32_t VolSessionTime; /* contained in this block */ +\end{verbatim} +\normalsize + +As with the previous version, the BB02 Block header is a fixed length and +fixed format and is followed by Record Headers and Record Data. The CheckSum +field is a 32 bit CRC checksum of the block data and the block header but not +including the CheckSum field. The Block Header is always immediately followed +by a Record Header. If the tape is damaged, a Bacula utility will be able to +recover as much information as possible from the tape by recovering blocks +which are valid. The Block header is written using the Bacula serialization +routines and thus is guaranteed to be in machine independent format. + +\section{Version 2 Record Header} +\index{Version 2 Record Header} +\index{Header!Version 2 Record} +\addcontentsline{toc}{subsection}{Version 2 Record Header} + +Version 2 Record Header is written to the medium when using Version BB02 Block +Headers. The memory representation of the record is identical to the old BB01 +Record Header, but on the storage medium, the first two fields, namely +VolSessionId and VolSessionTime are not written. The Block Header is filled +with these values when the First user record is written (i.e. non label +record) so that when the block is written, it will have the current and unique +VolSessionId and VolSessionTime. On reading each record from the Block, the +VolSessionId and VolSessionTime is filled in the Record Header from the Block +Header. + +\section{Volume Label Format} +\index{Volume Label Format} +\index{Format!Volume Label} +\addcontentsline{toc}{subsection}{Volume Label Format} + +Tape volume labels are created by the Storage daemon in response to a {\bf +label} command given to the Console program, or alternatively by the {\bf +btape} program. created. Each volume is labeled with the following information +using the Bacula serialization routines, which guarantee machine byte order +independence. + +For Bacula versions 1.27 and later, the Volume Label Format is: + +\footnotesize +\begin{verbatim} + char Id[32]; /* Bacula 1.0 Immortal\n */ + uint32_t VerNum; /* Label version number */ + /* VerNum 11 and greater Bacula 1.27 and later */ + btime_t label_btime; /* Time/date tape labeled */ + btime_t write_btime; /* Time/date tape first written */ + /* The following are 0 in VerNum 11 and greater */ + float64_t write_date; /* Date this label written */ + float64_t write_time; /* Time this label written */ + char VolName[128]; /* Volume name */ + char PrevVolName[128]; /* Previous Volume Name */ + char PoolName[128]; /* Pool name */ + char PoolType[128]; /* Pool type */ + char MediaType[128]; /* Type of this media */ + char HostName[128]; /* Host name of writing computer */ + char LabelProg[32]; /* Label program name */ + char ProgVersion[32]; /* Program version */ + char ProgDate[32]; /* Program build date/time */ +\end{verbatim} +\normalsize + +Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, ...) +is stored in the record FileIndex field of the Record Header and does not +appear in the data part of the record. + +\section{Session Label} +\index{Label!Session} +\index{Session Label} +\addcontentsline{toc}{subsection}{Session Label} + +The Session Label is written at the beginning and end of each session as well +as the last record on the physical medium. It has the following binary format: + + +\footnotesize +\begin{verbatim} + char Id[32]; /* Bacula Immortal ... */ + uint32_t VerNum; /* Label version number */ + uint32_t JobId; /* Job id */ + uint32_t VolumeIndex; /* sequence no of vol */ + /* Prior to VerNum 11 */ + float64_t write_date; /* Date this label written */ + /* VerNum 11 and greater */ + btime_t write_btime; /* time/date record written */ + /* The following is zero VerNum 11 and greater */ + float64_t write_time; /* Time this label written */ + char PoolName[128]; /* Pool name */ + char PoolType[128]; /* Pool type */ + char JobName[128]; /* base Job name */ + char ClientName[128]; + /* Added in VerNum 10 */ + char Job[128]; /* Unique Job name */ + char FileSetName[128]; /* FileSet name */ + uint32_t JobType; + uint32_t JobLevel; +\end{verbatim} +\normalsize + +In addition, the EOS label contains: + +\footnotesize +\begin{verbatim} + /* The remainder are part of EOS label only */ + uint32_t JobFiles; + uint64_t JobBytes; + uint32_t start_block; + uint32_t end_block; + uint32_t start_file; + uint32_t end_file; + uint32_t JobErrors; +\end{verbatim} +\normalsize + +In addition, for VerNum greater than 10, the EOS label contains (in addition +to the above): + +\footnotesize +\begin{verbatim} + uint32_t JobStatus /* Job termination code */ +\end{verbatim} +\normalsize + +: Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, +...) is stored in the record FileIndex field and does not appear in the data +part of the record. Also, the Stream field of the Record Header contains the +JobId. This permits quick filtering without actually reading all the session +data in many cases. + +\section{Overall Storage Format} +\index{Format!Overall Storage} +\index{Overall Storage Format} +\addcontentsline{toc}{subsection}{Overall Storage Format} + +\footnotesize +\begin{verbatim} + Current Bacula Tape Format + 6 June 2001 + Version BB02 added 28 September 2002 + Version BB01 is the old deprecated format. + A Bacula tape is composed of tape Blocks. Each block + has a Block header followed by the block data. Block + Data consists of Records. Records consist of Record + Headers followed by Record Data. + :=======================================================: + | | + | Block Header (24 bytes) | + | | + |-------------------------------------------------------| + | | + | Record Header (12 bytes) | + | | + |-------------------------------------------------------| + | | + | Record Data | + | | + |-------------------------------------------------------| + | | + | Record Header (12 bytes) | + | | + |-------------------------------------------------------| + | | + | ... | + Block Header: the first item in each block. The format is + shown below. + Partial Data block: occurs if the data from a previous + block spills over to this block (the normal case except + for the first block on a tape). However, this partial + data block is always preceded by a record header. + Record Header: identifies the Volume Session, the Stream + and the following Record Data size. See below for format. + Record data: arbitrary binary data. + Block Header Format BB02 + :=======================================================: + | CheckSum (uint32_t) | + |-------------------------------------------------------| + | BlockSize (uint32_t) | + |-------------------------------------------------------| + | BlockNumber (uint32_t) | + |-------------------------------------------------------| + | "BB02" (char [4]) | + |-------------------------------------------------------| + | VolSessionId (uint32_t) | + |-------------------------------------------------------| + | VolSessionTime (uint32_t) | + :=======================================================: + BBO2: Serves to identify the block as a + Bacula block and also servers as a block format identifier + should we ever need to change the format. + BlockSize: is the size in bytes of the block. When reading + back a block, if the BlockSize does not agree with the + actual size read, Bacula discards the block. + CheckSum: a checksum for the Block. + BlockNumber: is the sequential block number on the tape. + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + Record Header Format BB02 + :=======================================================: + | FileIndex (int32_t) | + |-------------------------------------------------------| + | Stream (int32_t) | + |-------------------------------------------------------| + | DataSize (uint32_t) | + :=======================================================: + FileIndex: a sequential file number within a job. The + Storage daemon enforces this index to be + greater than zero and sequential. Note, + however, that the File daemon may send + multiple Streams for the same FileIndex. + The Storage Daemon uses negative FileIndices + to identify Session Start and End labels + as well as the End of Volume labels. + Stream: defined by the File daemon and is intended to be + used to identify separate parts of the data + saved for each file (attributes, file data, + ...). The Storage Daemon has no idea of + what a Stream is or what it contains. + DataSize: the size in bytes of the binary data record + that follows the Session Record header. + The Storage Daemon has no idea of the + actual contents of the binary data record. + For standard Unix files, the data record + typically contains the file attributes or + the file data. For a sparse file + the first 64 bits of the data contains + the storage address for the data block. + Volume Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | label_date (float64_t) | + | label_btime (btime_t VerNum 11 | + |-------------------------------------------------------| + | label_time (float64_t) | + | write_btime (btime_t VerNum 11 | + |-------------------------------------------------------| + | write_date (float64_t) | + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | write_time (float64_t) | + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | VolName (128 bytes) | + |-------------------------------------------------------| + | PrevVolName (128 bytes) | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | MediaType (128 bytes) | + |-------------------------------------------------------| + | HostName (128 bytes) | + |-------------------------------------------------------| + | LabelProg (32 bytes) | + |-------------------------------------------------------| + | ProgVersion (32 bytes) | + |-------------------------------------------------------| + | ProgDate (32 bytes) | + |-------------------------------------------------------| + :=======================================================: + + Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n" + (old version also recognized:) + Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n" + LabelType (Saved in the FileIndex of the Header record). + PRE_LABEL -1 Volume label on unwritten tape + VOL_LABEL -2 Volume label after tape written + EOM_LABEL -3 Label at EOM (not currently implemented) + SOS_LABEL -4 Start of Session label (format given below) + EOS_LABEL -5 End of Session label (format given below) + VerNum: 11 + label_date: Julian day tape labeled + label_time: Julian time tape labeled + write_date: Julian date tape first used (data written) + write_time: Julian time tape first used (data written) + VolName: "Physical" Volume name + PrevVolName: The VolName of the previous tape (if this tape is + a continuation of the previous one). + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + HostName: Name of host that is first writing the tape + LabelProg: Name of the program that labeled the tape + ProgVersion: Version of the label program + ProgDate: Date Label program built + Session Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | JobId (uint32_t) | + |-------------------------------------------------------| + | write_btime (btime_t) VerNum 11 | + |-------------------------------------------------------| + | 0 (float64_t) VerNum 11 | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | JobName (128 bytes) | + |-------------------------------------------------------| + | ClientName (128 bytes) | + |-------------------------------------------------------| + | Job (128 bytes) | + |-------------------------------------------------------| + | FileSetName (128 bytes) | + |-------------------------------------------------------| + | JobType (uint32_t) | + |-------------------------------------------------------| + | JobLevel (uint32_t) | + |-------------------------------------------------------| + | FileSetMD5 (50 bytes) VerNum 11 | + |-------------------------------------------------------| + Additional fields in End Of Session Label + |-------------------------------------------------------| + | JobFiles (uint32_t) | + |-------------------------------------------------------| + | JobBytes (uint32_t) | + |-------------------------------------------------------| + | start_block (uint32_t) | + |-------------------------------------------------------| + | end_block (uint32_t) | + |-------------------------------------------------------| + | start_file (uint32_t) | + |-------------------------------------------------------| + | end_file (uint32_t) | + |-------------------------------------------------------| + | JobErrors (uint32_t) | + |-------------------------------------------------------| + | JobStatus (uint32_t) VerNum 11 | + :=======================================================: + * => fields deprecated + Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n" + LabelType (in FileIndex field of Header): + EOM_LABEL -3 Label at EOM + SOS_LABEL -4 Start of Session label + EOS_LABEL -5 End of Session label + VerNum: 11 + JobId: JobId + write_btime: Bacula time/date this tape record written + write_date: Julian date tape this record written - deprecated + write_time: Julian time tape this record written - deprecated. + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + ClientName: Name of File daemon or Client writing this session + Not used for EOM_LABEL. +\end{verbatim} +\normalsize + +\section{Unix File Attributes} +\index{Unix File Attributes} +\index{Attributes!Unix File} +\addcontentsline{toc}{subsection}{Unix File Attributes} + +The Unix File Attributes packet consists of the following: + +\lt{}File-Index\gt{} \lt{}Type\gt{} +\lt{}Filename\gt{}@\lt{}File-Attributes\gt{}@\lt{}Link\gt{} +@\lt{}Extended-Attributes@\gt{} where + +\begin{description} + +\item [@] + represents a byte containing a binary zero. + +\item [FileIndex] + \index{FileIndex} + is the sequential file index starting from one assigned by the File daemon. + +\item [Type] + \index{Type} + is one of the following: + +\footnotesize +\begin{verbatim} +#define FT_LNKSAVED 1 /* hard link to file already saved */ +#define FT_REGE 2 /* Regular file but empty */ +#define FT_REG 3 /* Regular file */ +#define FT_LNK 4 /* Soft Link */ +#define FT_DIR 5 /* Directory */ +#define FT_SPEC 6 /* Special file -- chr, blk, fifo, sock */ +#define FT_NOACCESS 7 /* Not able to access */ +#define FT_NOFOLLOW 8 /* Could not follow link */ +#define FT_NOSTAT 9 /* Could not stat file */ +#define FT_NOCHG 10 /* Incremental option, file not changed */ +#define FT_DIRNOCHG 11 /* Incremental option, directory not changed */ +#define FT_ISARCH 12 /* Trying to save archive file */ +#define FT_NORECURSE 13 /* No recursion into directory */ +#define FT_NOFSCHG 14 /* Different file system, prohibited */ +#define FT_NOOPEN 15 /* Could not open directory */ +#define FT_RAW 16 /* Raw block device */ +#define FT_FIFO 17 /* Raw fifo device */ +\end{verbatim} +\normalsize + +\item [Filename] + \index{Filename} + is the fully qualified filename. + +\item [File-Attributes] + \index{File-Attributes} + consists of the 13 fields of the stat() buffer in ASCII base64 format +separated by spaces. These fields and their meanings are shown below. This +stat() packet is in Unix format, and MUST be provided (constructed) for ALL +systems. + +\item [Link] + \index{Link} + when the FT code is FT\_LNK or FT\_LNKSAVED, the item in question is a Unix +link, and this field contains the fully qualified link name. When the FT code +is not FT\_LNK or FT\_LNKSAVED, this field is null. + +\item [Extended-Attributes] + \index{Extended-Attributes} + The exact format of this field is operating system dependent. It contains +additional or extended attributes of a system dependent nature. Currently, +this field is used only on WIN32 systems where it contains a ASCII base64 +representation of the WIN32\_FILE\_ATTRIBUTE\_DATA structure as defined by +Windows. The fields in the base64 representation of this structure are like +the File-Attributes separated by spaces. +\end{description} + +The File-attributes consist of the following: + +\addcontentsline{lot}{table}{File Attributes} +\begin{longtable}{|p{0.6in}|p{0.7in}|p{1in}|p{1in}|p{1.4in}|} + \hline +\multicolumn{1}{|c|}{\bf Field No. } & \multicolumn{1}{c|}{\bf Stat Name } +& \multicolumn{1}{c|}{\bf Unix } & \multicolumn{1}{c|}{\bf Win98/NT } & +\multicolumn{1}{c|}{\bf MacOS } \\ + \hline +\multicolumn{1}{|c|}{1 } & {st\_dev } & {Device number of filesystem } & +{Drive number } & {vRefNum } \\ + \hline +\multicolumn{1}{|c|}{2 } & {st\_ino } & {Inode number } & {Always 0 } & +{fileID/dirID } \\ + \hline +\multicolumn{1}{|c|}{3 } & {st\_mode } & {File mode } & {File mode } & +{777 dirs/apps; 666 docs; 444 locked docs } \\ + \hline +\multicolumn{1}{|c|}{4 } & {st\_nlink } & {Number of links to the file } & +{Number of link (only on NTFS) } & {Always 1 } \\ + \hline +\multicolumn{1}{|c|}{5 } & {st\_uid } & {Owner ID } & {Always 0 } & +{Always 0 } \\ + \hline +\multicolumn{1}{|c|}{6 } & {st\_gid } & {Group ID } & {Always 0 } & +{Always 0 } \\ + \hline +\multicolumn{1}{|c|}{7 } & {st\_rdev } & {Device ID for special files } & +{Drive No. } & {Always 0 } \\ + \hline +\multicolumn{1}{|c|}{8 } & {st\_size } & {File size in bytes } & {File +size in bytes } & {Data fork file size in bytes } \\ + \hline +\multicolumn{1}{|c|}{9 } & {st\_blksize } & {Preferred block size } & +{Always 0 } & {Preferred block size } \\ + \hline +\multicolumn{1}{|c|}{10 } & {st\_blocks } & {Number of blocks allocated } +& {Always 0 } & {Number of blocks allocated } \\ + \hline +\multicolumn{1}{|c|}{11 } & {st\_atime } & {Last access time since epoch } +& {Last access time since epoch } & {Last access time -66 years } \\ + \hline +\multicolumn{1}{|c|}{12 } & {st\_mtime } & {Last modify time since epoch } +& {Last modify time since epoch } & {Last access time -66 years } \\ + \hline +\multicolumn{1}{|c|}{13 } & {st\_ctime } & {Inode change time since epoch +} & {File create time since epoch } & {File create time -66 years} +\\ \hline + +\end{longtable} + +\section{Old Depreciated Tape Format} +\index{Old Depreciated Tape Format} +\index{Format!Old Depreciated Tape} +\addcontentsline{toc}{subsection}{Old Depreciated Tape Format} + +The format of the Block Header (version 1.26 and earlier) is: + +\footnotesize +\begin{verbatim} + uint32_t CheckSum; /* Block check sum */ + uint32_t BlockSize; /* Block byte size including the header */ + uint32_t BlockNumber; /* Block number */ + char ID[4] = "BB01"; /* Identification and block level */ +\end{verbatim} +\normalsize + +The format of the Record Header (version 1.26 or earlier) is: + +\footnotesize +\begin{verbatim} + uint32_t VolSessionId; /* Unique ID for this session */ + uint32_t VolSessionTime; /* Start time/date of session */ + int32_t FileIndex; /* File index supplied by File daemon */ + int32_t Stream; /* Stream number supplied by File daemon */ + uint32_t DataSize; /* size of following data record in bytes */ +\end{verbatim} +\normalsize + +\footnotesize +\begin{verbatim} + Current Bacula Tape Format + 6 June 2001 + Version BB01 is the old deprecated format. + A Bacula tape is composed of tape Blocks. Each block + has a Block header followed by the block data. Block + Data consists of Records. Records consist of Record + Headers followed by Record Data. + :=======================================================: + | | + | Block Header | + | (16 bytes version BB01) | + |-------------------------------------------------------| + | | + | Record Header | + | (20 bytes version BB01) | + |-------------------------------------------------------| + | | + | Record Data | + | | + |-------------------------------------------------------| + | | + | Record Header | + | (20 bytes version BB01) | + |-------------------------------------------------------| + | | + | ... | + Block Header: the first item in each block. The format is + shown below. + Partial Data block: occurs if the data from a previous + block spills over to this block (the normal case except + for the first block on a tape). However, this partial + data block is always preceded by a record header. + Record Header: identifies the Volume Session, the Stream + and the following Record Data size. See below for format. + Record data: arbitrary binary data. + Block Header Format BB01 (deprecated) + :=======================================================: + | CheckSum (uint32_t) | + |-------------------------------------------------------| + | BlockSize (uint32_t) | + |-------------------------------------------------------| + | BlockNumber (uint32_t) | + |-------------------------------------------------------| + | "BB01" (char [4]) | + :=======================================================: + BBO1: Serves to identify the block as a + Bacula block and also servers as a block format identifier + should we ever need to change the format. + BlockSize: is the size in bytes of the block. When reading + back a block, if the BlockSize does not agree with the + actual size read, Bacula discards the block. + CheckSum: a checksum for the Block. + BlockNumber: is the sequential block number on the tape. + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + Record Header Format BB01 (deprecated) + :=======================================================: + | VolSessionId (uint32_t) | + |-------------------------------------------------------| + | VolSessionTime (uint32_t) | + |-------------------------------------------------------| + | FileIndex (int32_t) | + |-------------------------------------------------------| + | Stream (int32_t) | + |-------------------------------------------------------| + | DataSize (uint32_t) | + :=======================================================: + VolSessionId: a unique sequential number that is assigned + by the Storage Daemon to a particular Job. + This number is sequential since the start + of execution of the daemon. + VolSessionTime: the time/date that the current execution + of the Storage Daemon started. It assures + that the combination of VolSessionId and + VolSessionTime is unique for all jobs + written to the tape, even if there was a + machine crash between two writes. + FileIndex: a sequential file number within a job. The + Storage daemon enforces this index to be + greater than zero and sequential. Note, + however, that the File daemon may send + multiple Streams for the same FileIndex. + The Storage Daemon uses negative FileIndices + to identify Session Start and End labels + as well as the End of Volume labels. + Stream: defined by the File daemon and is intended to be + used to identify separate parts of the data + saved for each file (attributes, file data, + ...). The Storage Daemon has no idea of + what a Stream is or what it contains. + DataSize: the size in bytes of the binary data record + that follows the Session Record header. + The Storage Daemon has no idea of the + actual contents of the binary data record. + For standard Unix files, the data record + typically contains the file attributes or + the file data. For a sparse file + the first 64 bits of the data contains + the storage address for the data block. + Volume Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | label_date (float64_t) | + |-------------------------------------------------------| + | label_time (float64_t) | + |-------------------------------------------------------| + | write_date (float64_t) | + |-------------------------------------------------------| + | write_time (float64_t) | + |-------------------------------------------------------| + | VolName (128 bytes) | + |-------------------------------------------------------| + | PrevVolName (128 bytes) | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | MediaType (128 bytes) | + |-------------------------------------------------------| + | HostName (128 bytes) | + |-------------------------------------------------------| + | LabelProg (32 bytes) | + |-------------------------------------------------------| + | ProgVersion (32 bytes) | + |-------------------------------------------------------| + | ProgDate (32 bytes) | + |-------------------------------------------------------| + :=======================================================: + + Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n" + (old version also recognized:) + Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n" + LabelType (Saved in the FileIndex of the Header record). + PRE_LABEL -1 Volume label on unwritten tape + VOL_LABEL -2 Volume label after tape written + EOM_LABEL -3 Label at EOM (not currently implemented) + SOS_LABEL -4 Start of Session label (format given below) + EOS_LABEL -5 End of Session label (format given below) + label_date: Julian day tape labeled + label_time: Julian time tape labeled + write_date: Julian date tape first used (data written) + write_time: Julian time tape first used (data written) + VolName: "Physical" Volume name + PrevVolName: The VolName of the previous tape (if this tape is + a continuation of the previous one). + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + HostName: Name of host that is first writing the tape + LabelProg: Name of the program that labeled the tape + ProgVersion: Version of the label program + ProgDate: Date Label program built + Session Label + :=======================================================: + | Id (32 bytes) | + |-------------------------------------------------------| + | VerNum (uint32_t) | + |-------------------------------------------------------| + | JobId (uint32_t) | + |-------------------------------------------------------| + | *write_date (float64_t) VerNum 10 | + |-------------------------------------------------------| + | *write_time (float64_t) VerNum 10 | + |-------------------------------------------------------| + | PoolName (128 bytes) | + |-------------------------------------------------------| + | PoolType (128 bytes) | + |-------------------------------------------------------| + | JobName (128 bytes) | + |-------------------------------------------------------| + | ClientName (128 bytes) | + |-------------------------------------------------------| + | Job (128 bytes) | + |-------------------------------------------------------| + | FileSetName (128 bytes) | + |-------------------------------------------------------| + | JobType (uint32_t) | + |-------------------------------------------------------| + | JobLevel (uint32_t) | + |-------------------------------------------------------| + | FileSetMD5 (50 bytes) VerNum 11 | + |-------------------------------------------------------| + Additional fields in End Of Session Label + |-------------------------------------------------------| + | JobFiles (uint32_t) | + |-------------------------------------------------------| + | JobBytes (uint32_t) | + |-------------------------------------------------------| + | start_block (uint32_t) | + |-------------------------------------------------------| + | end_block (uint32_t) | + |-------------------------------------------------------| + | start_file (uint32_t) | + |-------------------------------------------------------| + | end_file (uint32_t) | + |-------------------------------------------------------| + | JobErrors (uint32_t) | + |-------------------------------------------------------| + | JobStatus (uint32_t) VerNum 11 | + :=======================================================: + * => fields deprecated + Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n" + LabelType (in FileIndex field of Header): + EOM_LABEL -3 Label at EOM + SOS_LABEL -4 Start of Session label + EOS_LABEL -5 End of Session label + VerNum: 11 + JobId: JobId + write_btime: Bacula time/date this tape record written + write_date: Julian date tape this record written - deprecated + write_time: Julian time tape this record written - deprecated. + PoolName: Pool Name + PoolType: Pool Type + MediaType: Media Type + ClientName: Name of File daemon or Client writing this session + Not used for EOM_LABEL. +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/developers/mempool.tex b/docs/manuals/en/developers/mempool.tex new file mode 100644 index 00000000..a8130200 --- /dev/null +++ b/docs/manuals/en/developers/mempool.tex @@ -0,0 +1,234 @@ +%% +%% + +\chapter{Bacula Memory Management} +\label{_ChapterStart7} +\index{Management!Bacula Memory} +\index{Bacula Memory Management} +\addcontentsline{toc}{section}{Bacula Memory Management} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the memory management routines that are used in Bacula +and is meant to be a technical discussion for developers rather than part of +the user manual. + +Since Bacula may be called upon to handle filenames of varying and more or +less arbitrary length, special attention needs to be used in the code to +ensure that memory buffers are sufficiently large. There are four +possibilities for memory usage within {\bf Bacula}. Each will be described in +turn. They are: + +\begin{itemize} +\item Statically allocated memory. +\item Dynamically allocated memory using malloc() and free(). +\item Non-pooled memory. +\item Pooled memory. + \end{itemize} + +\subsection{Statically Allocated Memory} +\index{Statically Allocated Memory} +\index{Memory!Statically Allocated} +\addcontentsline{toc}{subsubsection}{Statically Allocated Memory} + +Statically allocated memory is of the form: + +\footnotesize +\begin{verbatim} +char buffer[MAXSTRING]; +\end{verbatim} +\normalsize + +The use of this kind of memory is discouraged except when you are 100\% sure +that the strings to be used will be of a fixed length. One example of where +this is appropriate is for {\bf Bacula} resource names, which are currently +limited to 127 characters (MAX\_NAME\_LENGTH). Although this maximum size may +change, particularly to accommodate Unicode, it will remain a relatively small +value. + +\subsection{Dynamically Allocated Memory} +\index{Dynamically Allocated Memory} +\index{Memory!Dynamically Allocated} +\addcontentsline{toc}{subsubsection}{Dynamically Allocated Memory} + +Dynamically allocated memory is obtained using the standard malloc() routines. +As in: + +\footnotesize +\begin{verbatim} +char *buf; +buf = malloc(256); +\end{verbatim} +\normalsize + +This kind of memory can be released with: + +\footnotesize +\begin{verbatim} +free(buf); +\end{verbatim} +\normalsize + +It is recommended to use this kind of memory only when you are sure that you +know the memory size needed and the memory will be used for short periods of +time -- that is it would not be appropriate to use statically allocated +memory. An example might be to obtain a large memory buffer for reading and +writing files. When {\bf SmartAlloc} is enabled, the memory obtained by +malloc() will automatically be checked for buffer overwrite (overflow) during +the free() call, and all malloc'ed memory that is not released prior to +termination of the program will be reported as Orphaned memory. + +\subsection{Pooled and Non-pooled Memory} +\index{Memory!Pooled and Non-pooled} +\index{Pooled and Non-pooled Memory} +\addcontentsline{toc}{subsubsection}{Pooled and Non-pooled Memory} + +In order to facility the handling of arbitrary length filenames and to +efficiently handle a high volume of dynamic memory usage, we have implemented +routines between the C code and the malloc routines. The first is called +``Pooled'' memory, and is memory, which once allocated and then released, is +not returned to the system memory pool, but rather retained in a Bacula memory +pool. The next request to acquire pooled memory will return any free memory +block. In addition, each memory block has its current size associated with the +block allowing for easy checking if the buffer is of sufficient size. This +kind of memory would normally be used in high volume situations (lots of +malloc()s and free()s) where the buffer length may have to frequently change +to adapt to varying filename lengths. + +The non-pooled memory is handled by routines similar to those used for pooled +memory, allowing for easy size checking. However, non-pooled memory is +returned to the system rather than being saved in the Bacula pool. This kind +of memory would normally be used in low volume situations (few malloc()s and +free()s), but where the size of the buffer might have to be adjusted +frequently. + +\paragraph*{Types of Memory Pool:} + +Currently there are three memory pool types: + +\begin{itemize} +\item PM\_NOPOOL -- non-pooled memory. +\item PM\_FNAME -- a filename pool. +\item PM\_MESSAGE -- a message buffer pool. +\item PM\_EMSG -- error message buffer pool. + \end{itemize} + +\paragraph*{Getting Memory:} + +To get memory, one uses: + +\footnotesize +\begin{verbatim} +void *get_pool_memory(pool); +\end{verbatim} +\normalsize + +where {\bf pool} is one of the above mentioned pool names. The size of the +memory returned will be determined by the system to be most appropriate for +the application. + +If you wish non-pooled memory, you may alternatively call: + +\footnotesize +\begin{verbatim} +void *get_memory(size_t size); +\end{verbatim} +\normalsize + +The buffer length will be set to the size specified, and it will be assigned +to the PM\_NOPOOL pool (no pooling). + +\paragraph*{Releasing Memory:} + +To free memory acquired by either of the above two calls, use: + +\footnotesize +\begin{verbatim} +void free_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +where buffer is the memory buffer returned when the memory was acquired. If +the memory was originally allocated as type PM\_NOPOOL, it will be released to +the system, otherwise, it will be placed on the appropriate Bacula memory pool +free chain to be used in a subsequent call for memory from that pool. + +\paragraph*{Determining the Memory Size:} + +To determine the memory buffer size, use: + +\footnotesize +\begin{verbatim} +size_t sizeof_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +\paragraph*{Resizing Pool Memory:} + +To resize pool memory, use: + +\footnotesize +\begin{verbatim} +void *realloc_pool_memory(void *buffer); +\end{verbatim} +\normalsize + +The buffer will be reallocated, and the contents of the original buffer will +be preserved, but the address of the buffer may change. + +\paragraph*{Automatic Size Adjustment:} + +To have the system check and if necessary adjust the size of your pooled +memory buffer, use: + +\footnotesize +\begin{verbatim} +void *check_pool_memory_size(void *buffer, size_t new-size); +\end{verbatim} +\normalsize + +where {\bf new-size} is the buffer length needed. Note, if the buffer is +already equal to or larger than {\bf new-size} no buffer size change will +occur. However, if a buffer size change is needed, the original contents of +the buffer will be preserved, but the buffer address may change. Many of the +low level Bacula subroutines expect to be passed a pool memory buffer and use +this call to ensure the buffer they use is sufficiently large. + +\paragraph*{Releasing All Pooled Memory:} + +In order to avoid orphaned buffer error messages when terminating the program, +use: + +\footnotesize +\begin{verbatim} +void close_memory_pool(); +\end{verbatim} +\normalsize + +to free all unused memory retained in the Bacula memory pool. Note, any memory +not returned to the pool via free\_pool\_memory() will not be released by this +call. + +\paragraph*{Pooled Memory Statistics:} + +For debugging purposes and performance tuning, the following call will print +the current memory pool statistics: + +\footnotesize +\begin{verbatim} +void print_memory_pool_stats(); +\end{verbatim} +\normalsize + +an example output is: + +\footnotesize +\begin{verbatim} +Pool Maxsize Maxused Inuse + 0 256 0 0 + 1 256 1 0 + 2 256 1 0 +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/developers/netprotocol.tex b/docs/manuals/en/developers/netprotocol.tex new file mode 100644 index 00000000..45c2a8ed --- /dev/null +++ b/docs/manuals/en/developers/netprotocol.tex @@ -0,0 +1,224 @@ +%% +%% + +\chapter{TCP/IP Network Protocol} +\label{_ChapterStart5} +\index{TCP/IP Network Protocol} +\index{Protocol!TCP/IP Network} +\addcontentsline{toc}{section}{TCP/IP Network Protocol} + +\section{General} +\index{General} +\addcontentsline{toc}{subsection}{General} + +This document describes the TCP/IP protocol used by Bacula to communicate +between the various daemons and services. The definitive definition of the +protocol can be found in src/lib/bsock.h, src/lib/bnet.c and +src/lib/bnet\_server.c. + +Bacula's network protocol is basically a ``packet oriented'' protocol built on +a standard TCP/IP streams. At the lowest level all packet transfers are done +with read() and write() requests on system sockets. Pipes are not used as they +are considered unreliable for large serial data transfers between various +hosts. + +Using the routines described below (bnet\_open, bnet\_write, bnet\_recv, and +bnet\_close) guarantees that the number of bytes you write into the socket +will be received as a single record on the other end regardless of how many +low level write() and read() calls are needed. All data transferred are +considered to be binary data. + +\section{bnet and Threads} +\index{Threads!bnet and} +\index{Bnet and Threads} +\addcontentsline{toc}{subsection}{bnet and Threads} + +These bnet routines work fine in a threaded environment. However, they assume +that there is only one reader or writer on the socket at any time. It is +highly recommended that only a single thread access any BSOCK packet. The +exception to this rule is when the socket is first opened and it is waiting +for a job to start. The wait in the Storage daemon is done in one thread and +then passed to another thread for subsequent handling. + +If you envision having two threads using the same BSOCK, think twice, then you +must implement some locking mechanism. However, it probably would not be +appropriate to put locks inside the bnet subroutines for efficiency reasons. + +\section{bnet\_open} +\index{Bnet\_open} +\addcontentsline{toc}{subsection}{bnet\_open} + +To establish a connection to a server, use the subroutine: + +BSOCK *bnet\_open(void *jcr, char *host, char *service, int port, int *fatal) +bnet\_open(), if successful, returns the Bacula sock descriptor pointer to be +used in subsequent bnet\_send() and bnet\_read() requests. If not successful, +bnet\_open() returns a NULL. If fatal is set on return, it means that a fatal +error occurred and that you should not repeatedly call bnet\_open(). Any error +message will generally be sent to the JCR. + +\section{bnet\_send} +\index{Bnet\_send} +\addcontentsline{toc}{subsection}{bnet\_send} + +To send a packet, one uses the subroutine: + +int bnet\_send(BSOCK *sock) This routine is equivalent to a write() except +that it handles the low level details. The data to be sent is expected to be +in sock-\gt{}msg and be sock-\gt{}msglen bytes. To send a packet, bnet\_send() +first writes four bytes in network byte order than indicate the size of the +following data packet. It returns: + +\footnotesize +\begin{verbatim} + Returns 0 on failure + Returns 1 on success +\end{verbatim} +\normalsize + +In the case of a failure, an error message will be sent to the JCR contained +within the bsock packet. + +\section{bnet\_fsend} +\index{Bnet\_fsend} +\addcontentsline{toc}{subsection}{bnet\_fsend} + +This form uses: + +int bnet\_fsend(BSOCK *sock, char *format, ...) and it allows you to send a +formatted messages somewhat like fprintf(). The return status is the same as +bnet\_send. + +\section{Additional Error information} +\index{Information!Additional Error} +\index{Additional Error information} +\addcontentsline{toc}{subsection}{Additional Error information} + +Fro additional error information, you can call {\bf is\_bnet\_error(BSOCK +*bsock)} which will return 0 if there is no error or non-zero if there is an +error on the last transmission. The {\bf is\_bnet\_stop(BSOCK *bsock)} +function will return 0 if there no errors and you can continue sending. It +will return non-zero if there are errors or the line is closed (no more +transmissions should be sent). + +\section{bnet\_recv} +\index{Bnet\_recv} +\addcontentsline{toc}{subsection}{bnet\_recv} + +To read a packet, one uses the subroutine: + +int bnet\_recv(BSOCK *sock) This routine is similar to a read() except that it +handles the low level details. bnet\_read() first reads packet length that +follows as four bytes in network byte order. The data is read into +sock-\gt{}msg and is sock-\gt{}msglen bytes. If the sock-\gt{}msg is not large +enough, bnet\_recv() realloc() the buffer. It will return an error (-2) if +maxbytes is less than the record size sent. It returns: + +\footnotesize +\begin{verbatim} + * Returns number of bytes read + * Returns 0 on end of file + * Returns -1 on hard end of file (i.e. network connection close) + * Returns -2 on error +\end{verbatim} +\normalsize + +It should be noted that bnet\_recv() is a blocking read. + +\section{bnet\_sig} +\index{Bnet\_sig} +\addcontentsline{toc}{subsection}{bnet\_sig} + +To send a ``signal'' from one daemon to another, one uses the subroutine: + +int bnet\_sig(BSOCK *sock, SIGNAL) where SIGNAL is one of the following: + +\begin{enumerate} +\item BNET\_EOF - deprecated use BNET\_EOD +\item BNET\_EOD - End of data stream, new data may follow +\item BNET\_EOD\_POLL - End of data and poll all in one +\item BNET\_STATUS - Request full status +\item BNET\_TERMINATE - Conversation terminated, doing close() +\item BNET\_POLL - Poll request, I'm hanging on a read +\item BNET\_HEARTBEAT - Heartbeat Response requested +\item BNET\_HB\_RESPONSE - Only response permitted to HB +\item BNET\_PROMPT - Prompt for UA + \end{enumerate} + +\section{bnet\_strerror} +\index{Bnet\_strerror} +\addcontentsline{toc}{subsection}{bnet\_strerror} + +Returns a formated string corresponding to the last error that occurred. + +\section{bnet\_close} +\index{Bnet\_close} +\addcontentsline{toc}{subsection}{bnet\_close} + +The connection with the server remains open until closed by the subroutine: + +void bnet\_close(BSOCK *sock) + +\section{Becoming a Server} +\index{Server!Becoming a} +\index{Becoming a Server} +\addcontentsline{toc}{subsection}{Becoming a Server} + +The bnet\_open() and bnet\_close() routines described above are used on the +client side to establish a connection and terminate a connection with the +server. To become a server (i.e. wait for a connection from a client), use the +routine {\bf bnet\_thread\_server}. The calling sequence is a bit complicated, +please refer to the code in bnet\_server.c and the code at the beginning of +each daemon as examples of how to call it. + +\section{Higher Level Conventions} +\index{Conventions!Higher Level} +\index{Higher Level Conventions} +\addcontentsline{toc}{subsection}{Higher Level Conventions} + +Within Bacula, we have established the convention that any time a single +record is passed, it is sent with bnet\_send() and read with bnet\_recv(). +Thus the normal exchange between the server (S) and the client (C) are: + +\footnotesize +\begin{verbatim} +S: wait for connection C: attempt connection +S: accept connection C: bnet_send() send request +S: bnet_recv() wait for request +S: act on request +S: bnet_send() send ack C: bnet_recv() wait for ack +\end{verbatim} +\normalsize + +Thus a single command is sent, acted upon by the server, and then +acknowledged. + +In certain cases, such as the transfer of the data for a file, all the +information or data cannot be sent in a single packet. In this case, the +convention is that the client will send a command to the server, who knows +that more than one packet will be returned. In this case, the server will +enter a loop: + +\footnotesize +\begin{verbatim} +while ((n=bnet_recv(bsock)) > 0) { + act on request +} +if (n < 0) + error +\end{verbatim} +\normalsize + +The client will perform the following: + +\footnotesize +\begin{verbatim} +bnet_send(bsock); +bnet_send(bsock); +... +bnet_sig(bsock, BNET_EOD); +\end{verbatim} +\normalsize + +Thus the client will send multiple packets and signal to the server when all +the packets have been sent by sending a zero length record. diff --git a/docs/manuals/en/developers/platformsupport.tex b/docs/manuals/en/developers/platformsupport.tex new file mode 100644 index 00000000..a04e56f7 --- /dev/null +++ b/docs/manuals/en/developers/platformsupport.tex @@ -0,0 +1,107 @@ +%% +%% + +\chapter{Platform Support} +\label{_PlatformChapter} +\index{Support!Platform} +\index{Platform Support} +\addcontentsline{toc}{section}{Platform Support} + +\section{General} +\index{General } +\addcontentsline{toc}{subsection}{General} + +This chapter describes the requirements for having a +supported platform (Operating System). In general, Bacula is +quite portable. It supports 32 and 64 bit architectures as well +as bigendian and littleendian machines. For full +support, the platform (Operating System) must implement POSIX Unix +system calls. However, for File daemon support only, a small +compatibility library can be written to support almost any +architecture. + +Currently Linux, FreeBSD, and Solaris are fully supported +platforms, which means that the code has been tested on those +machines and passes a full set of regression tests. + +In addition, the Windows File daemon is supported on most versions +of Windows, and finally, there are a number of other platforms +where the File daemon (client) is known to run: NetBSD, OpenBSD, +Mac OSX, SGI, ... + +\section{Requirements to become a Supported Platform} +\index{Requirements!Platform} +\index{Platform Requirements} +\addcontentsline{toc}{subsection}{Platform Requirements} + +As mentioned above, in order to become a fully supported platform, it +must support POSIX Unix system calls. In addition, the following +requirements must be met: + +\begin{itemize} +\item The principal developer (currently Kern) must have + non-root ssh access to a test machine running the platform. +\item The ideal requirements and minimum requirements + for this machine are given below. +\item There must be a defined platform champion who is normally + a system administrator for the machine that is available. This + person need not be a developer/programmer but must be familiar + with system administration of the platform. +\item There must be at least one person designated who will + run regression tests prior to each release. Releases occur + approximately once every 6 months, but can be more frequent. + It takes at most a day's effort to setup the regression scripts + in the beginning, and after that, they can either be run daily + or on demand before a release. Running the regression scripts + involves only one or two command line commands and is fully + automated. +\item Ideally there are one or more persons who will package + each Bacula release. +\item Ideally there are one or more developers who can respond to + and fix platform specific bugs. +\end{itemize} + +Ideal requirements for a test machine: +\begin{itemize} +\item The principal developer will have non-root ssh access to + the test machine at all times. +\item The pricipal developer will have a root password. +\item The test machine will provide approximately 200 MB of + disk space for continual use. +\item The test machine will have approximately 500 MB of free + disk space for temporary use. +\item The test machine will run the most common version of the OS. +\item The test machine will have an autochanger of DDS-4 technology + or later having two or more tapes. +\item The test machine will have MySQL and/or PostgreSQL database + access for account "bacula" available. +\item The test machine will have sftp access. +\item The test machine will provide an smtp server. +\end{itemize} + +Minimum requirements for a test machine: +\begin{itemize} +\item The principal developer will have non-root ssh access to + the test machine when requested approximately once a month. +\item The pricipal developer not have root access. +\item The test machine will provide approximately 80 MB of + disk space for continual use. +\item The test machine will have approximately 300 MB of free + disk space for temporary use. +\item The test machine will run the the OS. +\item The test machine will have a tape drive of DDS-4 technology + or later that can be scheduled for access. +\item The test machine will not have MySQL and/or PostgreSQL database + access. +\item The test machine will have no sftp access. +\item The test machine will provide no email access. +\end{itemize} + +Bare bones test machine requirements: +\begin{itemize} +\item The test machine is available only to a designated + test person (your own machine). +\item The designated test person runs the regession + tests on demand. +\item The test machine has a tape drive available. +\end{itemize} diff --git a/docs/manuals/en/developers/porting.tex b/docs/manuals/en/developers/porting.tex new file mode 100644 index 00000000..278f0e5d --- /dev/null +++ b/docs/manuals/en/developers/porting.tex @@ -0,0 +1,173 @@ +%% +%% + +\chapter{Bacula Porting Notes} +\label{_ChapterStart1} +\index{Notes!Bacula Porting} +\index{Bacula Porting Notes} +\addcontentsline{toc}{section}{Bacula Porting Notes} + +This document is intended mostly for developers who wish to port Bacula to a +system that is not {\bf officially} supported. + +It is hoped that Bacula clients will eventually run on every imaginable system +that needs backing up (perhaps even a Palm). It is also hoped that the Bacula +Directory and Storage daemons will run on every system capable of supporting +them. + +\section{Porting Requirements} +\index{Requirements!Porting} +\index{Porting Requirements} +\addcontentsline{toc}{section}{Porting Requirements} + +In General, the following holds true: + +\begin{itemize} +\item {\bf Bacula} has been compiled and run on Linux RedHat, FreeBSD, and + Solaris systems. +\item In addition, clients exist on Win32, and Irix +\item It requires GNU C++ to compile. You can try with other compilers, but + you are on your own. The Irix client is built with the Irix complier, but, in + general, you will need GNU. +\item Your compiler must provide support for 64 bit signed and unsigned + integers. +\item You will need a recent copy of the {\bf autoconf} tools loaded on your + system (version 2.13 or later). The {\bf autoconf} tools are used to build + the configuration program, but are not part of the Bacula source +distribution. +\item There are certain third party packages that Bacula needs. Except for + MySQL, they can all be found in the {\bf depkgs} and {\bf depkgs1} releases. +\item To build the Win32 binaries, we use Microsoft VC++ standard + 2003. Please see the instructions in + bacula-source/src/win32/README.win32 for more details. If you + want to use VC++ Express, please see README.vc8. Our build is + done under the most recent version of Cygwin, but Cygwin is + not used in the Bacula binaries that are produced. + Unfortunately, we do not have the resources to help you build + your own version of the Win32 FD, so you are pretty much on + your own. You can ask the bacula-devel list for help, but + please don't expect much. +\item {\bf Bacula} requires a good implementation of pthreads to work. +\item The source code has been written with portability in mind and is mostly + POSIX compatible. Thus porting to any POSIX compatible operating system + should be relatively easy. +\end{itemize} + +\section{Steps to Take for Porting} +\index{Porting!Steps to Take for} +\index{Steps to Take for Porting} +\addcontentsline{toc}{section}{Steps to Take for Porting} + +\begin{itemize} +\item The first step is to ensure that you have version 2.13 or later of the + {\bf autoconf} tools loaded. You can skip this step, but making changes to + the configuration program will be difficult or impossible. +\item The run a {\bf ./configure} command in the main source directory and + examine the output. It should look something like the following: + +\footnotesize +\begin{verbatim} +Configuration on Mon Oct 28 11:42:27 CET 2002: + Host: i686-pc-linux-gnu -- redhat 7.3 + Bacula version: 1.27 (26 October 2002) + Source code location: . + Install binaries: /sbin + Install config files: /etc/bacula + C Compiler: gcc + C++ Compiler: c++ + Compiler flags: -g -O2 + Linker flags: + Libraries: -lpthread + Statically Linked Tools: no + Database found: no + Database type: Internal + Database lib: + Job Output Email: root@localhost + Traceback Email: root@localhost + SMTP Host Address: localhost + Director Port 9101 + File daemon Port 9102 + Storage daemon Port 9103 + Working directory /etc/bacula/working + SQL binaries Directory + Large file support: yes + readline support: yes + cweb support: yes /home/kern/bacula/depkgs/cweb + TCP Wrappers support: no + ZLIB support: yes + enable-smartalloc: yes + enable-gnome: no + gmp support: yes +\end{verbatim} +\normalsize + +The details depend on your system. The first thing to check is that it +properly identified your host on the {\bf Host:} line. The first part (added +in version 1.27) is the GNU four part identification of your system. The part +after the -- is your system and the system version. Generally, if your system +is not yet supported, you must correct these. +\item If the {\bf ./configure} does not function properly, you must determine + the cause and fix it. Generally, it will be because some required system + routine is not available on your machine. +\item To correct problems with detection of your system type or with routines + and libraries, you must edit the file {\bf + \lt{}bacula-src\gt{}/autoconf/configure.in}. This is the ``source'' from +which {\bf configure} is built. In general, most of the changes for your +system will be made in {\bf autoconf/aclocal.m4} in the routine {\bf +BA\_CHECK\_OPSYS} or in the routine {\bf BA\_CHECK\_OPSYS\_DISTNAME}. I have +already added the necessary code for most systems, but if yours shows up as +{\bf unknown} you will need to make changes. Then as mentioned above, you +will need to set a number of system dependent items in {\bf configure.in} in +the {\bf case} statement at approximately line 1050 (depending on the Bacula +release). +\item The items to in the case statement that corresponds to your system are + the following: + +\begin{itemize} +\item DISTVER -- set to the version of your operating system. Typically some + form of {\bf uname} obtains it. +\item TAPEDRIVE -- the default tape drive. Not too important as the user can + set it as an option. +\item PSCMD -- set to the {\bf ps} command that will provide the PID in the + first field and the program name in the second field. If this is not set + properly, the {\bf bacula stop} script will most likely not be able to stop +Bacula in all cases. +\item hostname -- command to return the base host name (non-qualified) of + your system. This is generally the machine name. Not too important as the + user can correct this in his configuration file. +\item CFLAGS -- set any special compiler flags needed. Many systems need a + special flag to make pthreads work. See cygwin for an example. +\item LDFLAGS -- set any special loader flags. See cygwin for an example. +\item PTHREAD\_LIB -- set for any special pthreads flags needed during + linking. See freebsd as an example. +\item lld -- set so that a ``long long int'' will be properly edited in a + printf() call. +\item llu -- set so that a ``long long unsigned'' will be properly edited in + a printf() call. +\item PFILES -- set to add any files that you may define is your platform + subdirectory. These files are used for installation of automatic system + startup of Bacula daemons. +\end{itemize} + +\item To rebuild a new version of {\bf configure} from a changed {\bf + autoconf/configure.in} you enter {\bf make configure} in the top level Bacula + source directory. You must have done a ./configure prior to trying to rebuild + the configure script or it will get into an infinite loop. +\item If the {\bf make configure} gets into an infinite loop, ctl-c it, then + do {\bf ./configure} (no options are necessary) and retry the {\bf make + configure}, which should now work. +\item To rebuild {\bf configure} you will need to have {\bf autoconf} version + 2.57-3 or higher loaded. Older versions of autoconf will complain about + unknown or bad options, and won't work. +\item After you have a working {\bf configure} script, you may need to make a + few system dependent changes to the way Bacula works. Generally, these are + done in {\bf src/baconfig.h}. You can find a few examples of system dependent +changes toward the end of this file. For example, on Irix systems, there is +no definition for {\bf socklen\_t}, so it is made in this file. If your +system has structure alignment requirements, check the definition of BALIGN +in this file. Currently, all Bacula allocated memory is aligned on a {\bf +double} boundary. +\item If you are having problems with Bacula's type definitions, you might + look at {\bf src/bc\_types.h} where all the types such as {\bf uint32\_t}, + {\bf uint64\_t}, etc. that Bacula uses are defined. +\end{itemize} diff --git a/docs/manuals/en/developers/setup.sm b/docs/manuals/en/developers/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/developers/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/developers/smartall.tex b/docs/manuals/en/developers/smartall.tex new file mode 100644 index 00000000..41f66f08 --- /dev/null +++ b/docs/manuals/en/developers/smartall.tex @@ -0,0 +1,432 @@ +%% +%% + +\addcontentsline{lof}{figure}{Smart Memory Allocation with Orphaned Buffer +Detection} +\includegraphics{./smartall.eps} + +\chapter{Smart Memory Allocation} +\label{_ChapterStart4} +\index{Detection!Smart Memory Allocation With Orphaned Buffer } +\index{Smart Memory Allocation With Orphaned Buffer Detection } +\addcontentsline{toc}{section}{Smart Memory Allocation With Orphaned Buffer +Detection} + +Few things are as embarrassing as a program that leaks, yet few errors are so +easy to commit or as difficult to track down in a large, complicated program +as failure to release allocated memory. SMARTALLOC replaces the standard C +library memory allocation functions with versions which keep track of buffer +allocations and releases and report all orphaned buffers at the end of program +execution. By including this package in your program during development and +testing, you can identify code that loses buffers right when it's added and +most easily fixed, rather than as part of a crisis debugging push when the +problem is identified much later in the testing cycle (or even worse, when the +code is in the hands of a customer). When program testing is complete, simply +recompiling with different flags removes SMARTALLOC from your program, +permitting it to run without speed or storage penalties. + +In addition to detecting orphaned buffers, SMARTALLOC also helps to find other +common problems in management of dynamic storage including storing before the +start or beyond the end of an allocated buffer, referencing data through a +pointer to a previously released buffer, attempting to release a buffer twice +or releasing storage not obtained from the allocator, and assuming the initial +contents of storage allocated by functions that do not guarantee a known +value. SMARTALLOC's checking does not usually add a large amount of overhead +to a program (except for programs which use {\tt realloc()} extensively; see +below). SMARTALLOC focuses on proper storage management rather than internal +consistency of the heap as checked by the malloc\_debug facility available on +some systems. SMARTALLOC does not conflict with malloc\_debug and both may be +used together, if you wish. SMARTALLOC makes no assumptions regarding the +internal structure of the heap and thus should be compatible with any C +language implementation of the standard memory allocation functions. + +\subsection{ Installing SMARTALLOC} +\index{SMARTALLOC!Installing } +\index{Installing SMARTALLOC } +\addcontentsline{toc}{subsection}{Installing SMARTALLOC} + +SMARTALLOC is provided as a Zipped archive, +\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}; see the +download instructions below. + +To install SMARTALLOC in your program, simply add the statement: + +to every C program file which calls any of the memory allocation functions +({\tt malloc}, {\tt calloc}, {\tt free}, etc.). SMARTALLOC must be used for +all memory allocation with a program, so include file for your entire program, +if you have such a thing. Next, define the symbol SMARTALLOC in the +compilation before the inclusion of smartall.h. I usually do this by having my +Makefile add the ``{\tt -DSMARTALLOC}'' option to the C compiler for +non-production builds. You can define the symbol manually, if you prefer, by +adding the statement: + +{\tt \#define SMARTALLOC} + +At the point where your program is all done and ready to relinquish control to +the operating system, add the call: + +{\tt \ \ \ \ \ \ \ \ sm\_dump(}{\it datadump}{\tt );} + +where {\it datadump} specifies whether the contents of orphaned buffers are to +be dumped in addition printing to their size and place of allocation. The data +are dumped only if {\it datadump} is nonzero, so most programs will normally +use ``{\tt sm\_dump(0);}''. If a mysterious orphaned buffer appears that can't +be identified from the information this prints about it, replace the statement +with ``{\tt sm\_dump(1)};''. Usually the dump of the buffer's data will +furnish the additional clues you need to excavate and extirpate the elusive +error that left the buffer allocated. + +Finally, add the files ``smartall.h'' and ``smartall.c'' from this release to +your source directory, make dependencies, and linker input. You needn't make +inclusion of smartall.c in your link optional; if compiled with SMARTALLOC not +defined it generates no code, so you may always include it knowing it will +waste no storage in production builds. Now when you run your program, if it +leaves any buffers around when it's done, each will be reported by {\tt +sm\_dump()} on stderr as follows: + +\footnotesize +\begin{verbatim} +Orphaned buffer: 120 bytes allocated at line 50 of gutshot.c +\end{verbatim} +\normalsize + +\subsection{ Squelching a SMARTALLOC} +\index{SMARTALLOC!Squelching a } +\index{Squelching a SMARTALLOC } +\addcontentsline{toc}{subsection}{Squelching a SMARTALLOC} + +Usually, when you first install SMARTALLOC in an existing program you'll find +it nattering about lots of orphaned buffers. Some of these turn out to be +legitimate errors, but some are storage allocated during program +initialisation that, while dynamically allocated, is logically static storage +not intended to be released. Of course, you can get rid of the complaints +about these buffers by adding code to release them, but by doing so you're +adding unnecessary complexity and code size to your program just to silence +the nattering of a SMARTALLOC, so an escape hatch is provided to eliminate the +need to release these buffers. + +Normally all storage allocated with the functions {\tt malloc()}, {\tt +calloc()}, and {\tt realloc()} is monitored by SMARTALLOC. If you make the +function call: + +\footnotesize +\begin{verbatim} + sm_static(1); +\end{verbatim} +\normalsize + +you declare that subsequent storage allocated by {\tt malloc()}, {\tt +calloc()}, and {\tt realloc()} should not be considered orphaned if found to +be allocated when {\tt sm\_dump()} is called. I use a call on ``{\tt +sm\_static(1);}'' before I allocate things like program configuration tables +so I don't have to add code to release them at end of program time. After +allocating unmonitored data this way, be sure to add a call to: + +\footnotesize +\begin{verbatim} + sm_static(0); +\end{verbatim} +\normalsize + +to resume normal monitoring of buffer allocations. Buffers allocated while +{\tt sm\_static(1}) is in effect are not checked for having been orphaned but +all the other safeguards provided by SMARTALLOC remain in effect. You may +release such buffers, if you like; but you don't have to. + +\subsection{ Living with Libraries} +\index{Libraries!Living with } +\index{Living with Libraries } +\addcontentsline{toc}{subsection}{Living with Libraries} + +Some library functions for which source code is unavailable may gratuitously +allocate and return buffers that contain their results, or require you to pass +them buffers which they subsequently release. If you have source code for the +library, by far the best approach is to simply install SMARTALLOC in it, +particularly since this kind of ill-structured dynamic storage management is +the source of so many storage leaks. Without source code, however, there's no +option but to provide a way to bypass SMARTALLOC for the buffers the library +allocates and/or releases with the standard system functions. + +For each function {\it xxx} redefined by SMARTALLOC, a corresponding routine +named ``{\tt actually}{\it xxx}'' is furnished which provides direct access to +the underlying system function, as follows: + +\begin{quote} + +\begin{longtable}{ll} +\multicolumn{1}{l }{\bf Standard function } & \multicolumn{1}{l }{\bf Direct +access function } \\ +{{\tt malloc(}{\it size}{\tt )} } & {{\tt actuallymalloc(}{\it size}{\tt )} +} \\ +{{\tt calloc(}{\it nelem}{\tt ,} {\it elsize}{\tt )} } & {{\tt +actuallycalloc(}{\it nelem}, {\it elsize}{\tt )} } \\ +{{\tt realloc(}{\it ptr}{\tt ,} {\it size}{\tt )} } & {{\tt +actuallyrealloc(}{\it ptr}, {\it size}{\tt )} } \\ +{{\tt free(}{\it ptr}{\tt )} } & {{\tt actuallyfree(}{\it ptr}{\tt )} } + +\end{longtable} + +\end{quote} + +For example, suppose there exists a system library function named ``{\tt +getimage()}'' which reads a raster image file and returns the address of a +buffer containing it. Since the library routine allocates the image directly +with {\tt malloc()}, you can't use SMARTALLOC's {\tt free()}, as that call +expects information placed in the buffer by SMARTALLOC's special version of +{\tt malloc()}, and hence would report an error. To release the buffer you +should call {\tt actuallyfree()}, as in this code fragment: + +\footnotesize +\begin{verbatim} + struct image *ibuf = getimage("ratpack.img"); + display_on_screen(ibuf); + actuallyfree(ibuf); +\end{verbatim} +\normalsize + +Conversely, suppose we are to call a library function, ``{\tt putimage()}'', +which writes an image buffer into a file and then releases the buffer with +{\tt free()}. Since the system {\tt free()} is being called, we can't pass a +buffer allocated by SMARTALLOC's allocation routines, as it contains special +information that the system {\tt free()} doesn't expect to be there. The +following code uses {\tt actuallymalloc()} to obtain the buffer passed to such +a routine. + +\footnotesize +\begin{verbatim} + struct image *obuf = + (struct image *) actuallymalloc(sizeof(struct image)); + dump_screen_to_image(obuf); + putimage("scrdump.img", obuf); /* putimage() releases obuf */ +\end{verbatim} +\normalsize + +It's unlikely you'll need any of the ``actually'' calls except under very odd +circumstances (in four products and three years, I've only needed them once), +but they're there for the rare occasions that demand them. Don't use them to +subvert the error checking of SMARTALLOC; if you want to disable orphaned +buffer detection, use the {\tt sm\_static(1)} mechanism described above. That +way you don't forfeit all the other advantages of SMARTALLOC as you do when +using {\tt actuallymalloc()} and {\tt actuallyfree()}. + +\subsection{ SMARTALLOC Details} +\index{SMARTALLOC Details } +\index{Details!SMARTALLOC } +\addcontentsline{toc}{subsection}{SMARTALLOC Details} + +When you include ``smartall.h'' and define SMARTALLOC, the following standard +system library functions are redefined with the \#define mechanism to call +corresponding functions within smartall.c instead. (For details of the +redefinitions, please refer to smartall.h.) + +\footnotesize +\begin{verbatim} + void *malloc(size_t size) + void *calloc(size_t nelem, size_t elsize) + void *realloc(void *ptr, size_t size) + void free(void *ptr) + void cfree(void *ptr) +\end{verbatim} +\normalsize + +{\tt cfree()} is a historical artifact identical to {\tt free()}. + +In addition to allocating storage in the same way as the standard library +functions, the SMARTALLOC versions expand the buffers they allocate to include +information that identifies where each buffer was allocated and to chain all +allocated buffers together. When a buffer is released, it is removed from the +allocated buffer chain. A call on {\tt sm\_dump()} is able, by scanning the +chain of allocated buffers, to find all orphaned buffers. Buffers allocated +while {\tt sm\_static(1)} is in effect are specially flagged so that, despite +appearing on the allocated buffer chain, {\tt sm\_dump()} will not deem them +orphans. + +When a buffer is allocated by {\tt malloc()} or expanded with {\tt realloc()}, +all bytes of newly allocated storage are set to the hexadecimal value 0x55 +(alternating one and zero bits). Note that for {\tt realloc()} this applies +only to the bytes added at the end of buffer; the original contents of the +buffer are not modified. Initializing allocated storage to a distinctive +nonzero pattern is intended to catch code that erroneously assumes newly +allocated buffers are cleared to zero; in fact their contents are random. The +{\tt calloc()} function, defined as returning a buffer cleared to zero, +continues to zero its buffers under SMARTALLOC. + +Buffers obtained with the SMARTALLOC functions contain a special sentinel byte +at the end of the user data area. This byte is set to a special key value +based upon the buffer's memory address. When the buffer is released, the key +is tested and if it has been overwritten an assertion in the {\tt free} +function will fail. This catches incorrect program code that stores beyond the +storage allocated for the buffer. At {\tt free()} time the queue links are +also validated and an assertion failure will occur if the program has +destroyed them by storing before the start of the allocated storage. + +In addition, when a buffer is released with {\tt free()}, its contents are +immediately destroyed by overwriting them with the hexadecimal pattern 0xAA +(alternating bits, the one's complement of the initial value pattern). This +will usually trip up code that keeps a pointer to a buffer that's been freed +and later attempts to reference data within the released buffer. Incredibly, +this is {\it legal} in the standard Unix memory allocation package, which +permits programs to free() buffers, then raise them from the grave with {\tt +realloc()}. Such program ``logic'' should be fixed, not accommodated, and +SMARTALLOC brooks no such Lazarus buffer`` nonsense. + +Some C libraries allow a zero size argument in calls to {\tt malloc()}. Since +this is far more likely to indicate a program error than a defensible +programming stratagem, SMARTALLOC disallows it with an assertion. + +When the standard library {\tt realloc()} function is called to expand a +buffer, it attempts to expand the buffer in place if possible, moving it only +if necessary. Because SMARTALLOC must place its own private storage in the +buffer and also to aid in error detection, its version of {\tt realloc()} +always moves and copies the buffer except in the trivial case where the size +of the buffer is not being changed. By forcing the buffer to move on every +call and destroying the contents of the old buffer when it is released, +SMARTALLOC traps programs which keep pointers into a buffer across a call on +{\tt realloc()} which may move it. This strategy may prove very costly to +programs which make extensive use of {\tt realloc()}. If this proves to be a +problem, such programs may wish to use {\tt actuallymalloc()}, {\tt +actuallyrealloc()}, and {\tt actuallyfree()} for such frequently-adjusted +buffers, trading error detection for performance. Although not specified in +the System V Interface Definition, many C library implementations of {\tt +realloc()} permit an old buffer argument of NULL, causing {\tt realloc()} to +allocate a new buffer. The SMARTALLOC version permits this. + +\subsection{ When SMARTALLOC is Disabled} +\index{When SMARTALLOC is Disabled } +\index{Disabled!When SMARTALLOC is } +\addcontentsline{toc}{subsection}{When SMARTALLOC is Disabled} + +When SMARTALLOC is disabled by compiling a program with the symbol SMARTALLOC +not defined, calls on the functions otherwise redefined by SMARTALLOC go +directly to the system functions. In addition, compile-time definitions +translate calls on the ''{\tt actually}...{\tt ()}`` functions into the +corresponding library calls; ''{\tt actuallymalloc(100)}``, for example, +compiles into ''{\tt malloc(100)}``. The two special SMARTALLOC functions, +{\tt sm\_dump()} and {\tt sm\_static()}, are defined to generate no code +(hence the null statement). Finally, if SMARTALLOC is not defined, compilation +of the file smartall.c generates no code or data at all, effectively removing +it from the program even if named in the link instructions. + +Thus, except for unusual circumstances, a program that works with SMARTALLOC +defined for testing should require no changes when built without it for +production release. + +\subsection{ The {\tt alloc()} Function} +\index{Function!alloc } +\index{Alloc() Function } +\addcontentsline{toc}{subsection}{alloc() Function} + +Many programs I've worked on use very few direct calls to {\tt malloc()}, +using the identically declared {\tt alloc()} function instead. Alloc detects +out-of-memory conditions and aborts, removing the need for error checking on +every call of {\tt malloc()} (and the temptation to skip checking for +out-of-memory). + +As a convenience, SMARTALLOC supplies a compatible version of {\tt alloc()} in +the file alloc.c, with its definition in the file alloc.h. This version of +{\tt alloc()} is sensitive to the definition of SMARTALLOC and cooperates with +SMARTALLOC's orphaned buffer detection. In addition, when SMARTALLOC is +defined and {\tt alloc()} detects an out of memory condition, it takes +advantage of the SMARTALLOC diagnostic information to identify the file and +line number of the call on {\tt alloc()} that failed. + +\subsection{ Overlays and Underhandedness} +\index{Underhandedness!Overlays and } +\index{Overlays and Underhandedness } +\addcontentsline{toc}{subsection}{Overlays and Underhandedness} + +String constants in the C language are considered to be static arrays of +characters accessed through a pointer constant. The arrays are potentially +writable even though their pointer is a constant. SMARTALLOC uses the +compile-time definition {\tt ./smartall.wml} to obtain the name of the file in +which a call on buffer allocation was performed. Rather than reserve space in +a buffer to save this information, SMARTALLOC simply stores the pointer to the +compiled-in text of the file name. This works fine as long as the program does +not overlay its data among modules. If data are overlayed, the area of memory +which contained the file name at the time it was saved in the buffer may +contain something else entirely when {\tt sm\_dump()} gets around to using the +pointer to edit the file name which allocated the buffer. + +If you want to use SMARTALLOC in a program with overlayed data, you'll have to +modify smartall.c to either copy the file name to a fixed-length field added +to the {\tt abufhead} structure, or else allocate storage with {\tt malloc()}, +copy the file name there, and set the {\tt abfname} pointer to that buffer, +then remember to release the buffer in {\tt sm\_free}. Either of these +approaches are wasteful of storage and time, and should be considered only if +there is no alternative. Since most initial debugging is done in non-overlayed +environments, the restrictions on SMARTALLOC with data overlaying may never +prove a problem. Note that conventional overlaying of code, by far the most +common form of overlaying, poses no problems for SMARTALLOC; you need only be +concerned if you're using exotic tools for data overlaying on MS-DOS or other +address-space-challenged systems. + +Since a C language ''constant`` string can actually be written into, most C +compilers generate a unique copy of each string used in a module, even if the +same constant string appears many times. In modules that contain many calls on +allocation functions, this results in substantial wasted storage for the +strings that identify the file name. If your compiler permits optimization of +multiple occurrences of constant strings, enabling this mode will eliminate +the overhead for these strings. Of course, it's up to you to make sure +choosing this compiler mode won't wreak havoc on some other part of your +program. + +\subsection{ Test and Demonstration Program} +\index{Test and Demonstration Program } +\index{Program!Test and Demonstration } +\addcontentsline{toc}{subsection}{Test and Demonstration Program} + +A test and demonstration program, smtest.c, is supplied with SMARTALLOC. You +can build this program with the Makefile included. Please refer to the +comments in smtest.c and the Makefile for information on this program. If +you're attempting to use SMARTALLOC on a new machine or with a new compiler or +operating system, it's a wise first step to check it out with smtest first. + +\subsection{ Invitation to the Hack} +\index{Hack!Invitation to the } +\index{Invitation to the Hack } +\addcontentsline{toc}{subsection}{Invitation to the Hack} + +SMARTALLOC is not intended to be a panacea for storage management problems, +nor is it universally applicable or effective; it's another weapon in the +arsenal of the defensive professional programmer attempting to create reliable +products. It represents the current state of evolution of expedient debug code +which has been used in several commercial software products which have, +collectively, sold more than third of a million copies in the retail market, +and can be expected to continue to develop through time as it is applied to +ever more demanding projects. + +The version of SMARTALLOC here has been tested on a Sun SPARCStation, Silicon +Graphics Indigo2, and on MS-DOS using both Borland and Microsoft C. Moving +from compiler to compiler requires the usual small changes to resolve disputes +about prototyping of functions, whether the type returned by buffer allocation +is {\tt char\ *} or {\tt void\ *}, and so forth, but following those changes +it works in a variety of environments. I hope you'll find SMARTALLOC as useful +for your projects as I've found it in mine. + +\section{ +\elink{}{http://www.fourmilab.ch/smartall/smartall.zip} +\elink{Download smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip} +(Zipped archive)} +\index{Archive! Download smartall.zip Zipped } +\index{ Download smartall.zip (Zipped archive) } +\addcontentsline{toc}{section}{ Download smartall.zip (Zipped archive)} + +SMARTALLOC is provided as +\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}, a +\elink{Zipped}{http://www.pkware.com/} archive containing source code, +documentation, and a {\tt Makefile} to build the software under Unix. + +\subsection{ Copying} +\index{Copying } +\addcontentsline{toc}{subsection}{Copying} + +\begin{quote} +SMARTALLOC is in the public domain. Permission to use, copy, modify, and +distribute this software and its documentation for any purpose and without fee +is hereby granted, without any conditions or restrictions. This software is +provided ''as is`` without express or implied warranty. +\end{quote} + +{\it +\elink{by John Walker}{http://www.fourmilab.ch} +October 30th, 1998 } diff --git a/docs/manuals/en/developers/storage.tex b/docs/manuals/en/developers/storage.tex new file mode 100644 index 00000000..e46f228c --- /dev/null +++ b/docs/manuals/en/developers/storage.tex @@ -0,0 +1,258 @@ +%% +%% + +\chapter{Storage Daemon Design} +\label{_ChapterStart3} +\index{Storage Daemon Design } +\index{Design!Storage Daemon } +\addcontentsline{toc}{section}{Storage Daemon Design} + +This chapter is intended to be a technical discussion of the Storage daemon +services and as such is not targeted at end users but rather at developers and +system administrators that want or need to know more of the working details of +{\bf Bacula}. + +This document is somewhat out of date. + +\section{SD Design Introduction} +\index{Introduction!SD Design } +\index{SD Design Introduction } +\addcontentsline{toc}{section}{SD Design Introduction} + +The Bacula Storage daemon provides storage resources to a Bacula installation. +An individual Storage daemon is associated with a physical permanent storage +device (for example, a tape drive, CD writer, tape changer or jukebox, etc.), +and may employ auxiliary storage resources (such as space on a hard disk file +system) to increase performance and/or optimize use of the permanent storage +medium. + +Any number of storage daemons may be run on a given machine; each associated +with an individual storage device connected to it, and BACULA operations may +employ storage daemons on any number of hosts connected by a network, local or +remote. The ability to employ remote storage daemons (with appropriate +security measures) permits automatic off-site backup, possibly to publicly +available backup repositories. + +\section{SD Development Outline} +\index{Outline!SD Development } +\index{SD Development Outline } +\addcontentsline{toc}{section}{SD Development Outline} + +In order to provide a high performance backup and restore solution that scales +to very large capacity devices and networks, the storage daemon must be able +to extract as much performance from the storage device and network with which +it interacts. In order to accomplish this, storage daemons will eventually +have to sacrifice simplicity and painless portability in favor of techniques +which improve performance. My goal in designing the storage daemon protocol +and developing the initial prototype storage daemon is to provide for these +additions in the future, while implementing an initial storage daemon which is +very simple and portable to almost any POSIX-like environment. This original +storage daemon (and its evolved descendants) can serve as a portable solution +for non-demanding backup requirements (such as single servers of modest size, +individual machines, or small local networks), while serving as the starting +point for development of higher performance configurable derivatives which use +techniques such as POSIX threads, shared memory, asynchronous I/O, buffering +to high-speed intermediate media, and support for tape changers and jukeboxes. + + +\section{SD Connections and Sessions} +\index{Sessions!SD Connections and } +\index{SD Connections and Sessions } +\addcontentsline{toc}{section}{SD Connections and Sessions} + +A client connects to a storage server by initiating a conventional TCP +connection. The storage server accepts the connection unless its maximum +number of connections has been reached or the specified host is not granted +access to the storage server. Once a connection has been opened, the client +may make any number of Query requests, and/or initiate (if permitted), one or +more Append sessions (which transmit data to be stored by the storage daemon) +and/or Read sessions (which retrieve data from the storage daemon). + +Most requests and replies sent across the connection are simple ASCII strings, +with status replies prefixed by a four digit status code for easier parsing. +Binary data appear in blocks stored and retrieved from the storage. Any +request may result in a single-line status reply of ``{\tt 3201\ Notification\ +pending}'', which indicates the client must send a ``Query notification'' +request to retrieve one or more notifications posted to it. Once the +notifications have been returned, the client may then resubmit the request +which resulted in the 3201 status. + +The following descriptions omit common error codes, yet to be defined, which +can occur from most or many requests due to events like media errors, +restarting of the storage daemon, etc. These details will be filled in, along +with a comprehensive list of status codes along with which requests can +produce them in an update to this document. + +\subsection{SD Append Requests} +\index{Requests!SD Append } +\index{SD Append Requests } +\addcontentsline{toc}{subsection}{SD Append Requests} + +\begin{description} + +\item [{append open session = \lt{}JobId\gt{} [ \lt{}Password\gt{} ] }] + \index{SPAN class } + A data append session is opened with the Job ID given by {\it JobId} with +client password (if required) given by {\it Password}. If the session is +successfully opened, a status of {\tt 3000\ OK} is returned with a ``{\tt +ticket\ =\ }{\it number}'' reply used to identify subsequent messages in the +session. If too many sessions are open, or a conflicting session (for +example, a read in progress when simultaneous read and append sessions are +not permitted), a status of ``{\tt 3502\ Volume\ busy}'' is returned. If no +volume is mounted, or the volume mounted cannot be appended to, a status of +``{\tt 3503\ Volume\ not\ mounted}'' is returned. + +\item [append data = \lt{}ticket-number\gt{} ] + \index{SPAN class } + If the append data is accepted, a status of {\tt 3000\ OK data address = +\lt{}IPaddress\gt{} port = \lt{}port\gt{}} is returned, where the {\tt +IPaddress} and {\tt port} specify the IP address and port number of the data +channel. Error status codes are {\tt 3504\ Invalid\ ticket\ number} and {\tt +3505\ Session\ aborted}, the latter of which indicates the entire append +session has failed due to a daemon or media error. + +Once the File daemon has established the connection to the data channel +opened by the Storage daemon, it will transfer a header packet followed by +any number of data packets. The header packet is of the form: + +{\tt \lt{}file-index\gt{} \lt{}stream-id\gt{} \lt{}info\gt{}} + +The details are specified in the +\ilink{Daemon Protocol}{_ChapterStart2} section of this +document. + +\item [*append abort session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The open append session with ticket {\it ticket-number} is aborted; any blocks +not yet written to permanent media are discarded. Subsequent attempts to +append data to the session will receive an error status of {\tt 3505\ +Session\ aborted}. + +\item [append end session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The open append session with ticket {\it ticket-number} is marked complete; no +further blocks may be appended. The storage daemon will give priority to +saving any buffered blocks from this session to permanent media as soon as +possible. + +\item [append close session = \lt{}ticket-number\gt{} ] + \index{SPAN class } + The append session with ticket {\it ticket} is closed. This message does not +receive an {\tt 3000\ OK} reply until all of the content of the session are +stored on permanent media, at which time said reply is given, followed by a +list of volumes, from first to last, which contain blocks from the session, +along with the first and last file and block on each containing session data +and the volume session key identifying data from that session in lines with +the following format: + +{\tt {\tt Volume = }\lt{}Volume-id\gt{} \lt{}start-file\gt{} +\lt{}start-block\gt{} \lt{}end-file\gt{} \lt{}end-block\gt{} +\lt{}volume-session-id\gt{}}where {\it Volume-id} is the volume label, {\it +start-file} and {\it start-block} are the file and block containing the first +data from that session on the volume, {\it end-file} and {\it end-block} are +the file and block with the last data from the session on the volume and {\it +volume-session-id} is the volume session ID for blocks from the session +stored on that volume. +\end{description} + +\subsection{SD Read Requests} +\index{SD Read Requests } +\index{Requests!SD Read } +\addcontentsline{toc}{subsection}{SD Read Requests} + +\begin{description} + +\item [Read open session = \lt{}JobId\gt{} \lt{}Volume-id\gt{} + \lt{}start-file\gt{} \lt{}start-block\gt{} \lt{}end-file\gt{} + \lt{}end-block\gt{} \lt{}volume-session-id\gt{} \lt{}password\gt{} ] +\index{SPAN class } +where {\it Volume-id} is the volume label, {\it start-file} and {\it +start-block} are the file and block containing the first data from that +session on the volume, {\it end-file} and {\it end-block} are the file and +block with the last data from the session on the volume and {\it +volume-session-id} is the volume session ID for blocks from the session +stored on that volume. + +If the session is successfully opened, a status of + +{\tt {\tt 3100\ OK Ticket\ =\ }{\it number}``} + +is returned with a reply used to identify subsequent messages in the session. +If too many sessions are open, or a conflicting session (for example, an +append in progress when simultaneous read and append sessions are not +permitted), a status of ''{\tt 3502\ Volume\ busy}`` is returned. If no +volume is mounted, or the volume mounted cannot be appended to, a status of +''{\tt 3503\ Volume\ not\ mounted}`` is returned. If no block with the given +volume session ID and the correct client ID number appears in the given first +file and block for the volume, a status of ''{\tt 3505\ Session\ not\ +found}`` is returned. + +\item [Read data = \lt{}Ticket\gt{} \gt{} \lt{}Block\gt{} ] + \index{SPAN class } + The specified Block of data from open read session with the specified Ticket +number is returned, with a status of {\tt 3000\ OK} followed by a ''{\tt +Length\ =\ }{\it size}`` line giving the length in bytes of the block data +which immediately follows. Blocks must be retrieved in ascending order, but +blocks may be skipped. If a block number greater than the largest stored on +the volume is requested, a status of ''{\tt 3201\ End\ of\ volume}`` is +returned. If a block number greater than the largest in the file is +requested, a status of ''{\tt 3401\ End\ of\ file}`` is returned. + +\item [Read close session = \lt{}Ticket\gt{} ] + \index{SPAN class } + The read session with Ticket number is closed. A read session may be closed +at any time; you needn't read all its blocks before closing it. +\end{description} + +{\it by +\elink{John Walker}{http://www.fourmilab.ch/} +January 30th, MM } + +\section{SD Data Structures} +\index{SD Data Structures} +\addcontentsline{toc}{section}{SD Data Structures} + +In the Storage daemon, there is a Device resource (i.e. from conf file) +that describes each physical device. When the physical device is used it +is controled by the DEVICE structure (defined in dev.h), and typically +refered to as dev in the C++ code. Anyone writing or reading a physical +device must ultimately get a lock on the DEVICE structure -- this controls +the device. However, multiple Jobs (defined by a JCR structure src/jcr.h) +can be writing a physical DEVICE at the same time (of course they are +sequenced by locking the DEVICE structure). There are a lot of job +dependent "device" variables that may be different for each Job such as +spooling (one job may spool and another may not, and when a job is +spooling, it must have an i/o packet open, each job has its own record and +block structures, ...), so there is a device control record or DCR that is +the primary way of interfacing to the physical device. The DCR contains +all the job specific data as well as a pointer to the Device resource +(DEVRES structure) and the physical DEVICE structure. + +Now if a job is writing to two devices (it could be writing two separate +streams to the same device), it must have two DCRs. Today, the code only +permits one. This won't be hard to change, but it is new code. + +Today three jobs (threads), two physical devices each job + writes to only one device: + +\begin{verbatim} + Job1 -> DCR1 -> DEVICE1 + Job2 -> DCR2 -> DEVICE1 + Job3 -> DCR3 -> DEVICE2 +\end{verbatim} + +To be implemented three jobs, three physical devices, but + job1 is writing simultaneously to three devices: + +\begin{verbatim} + Job1 -> DCR1 -> DEVICE1 + -> DCR4 -> DEVICE2 + -> DCR5 -> DEVICE3 + Job2 -> DCR2 -> DEVICE1 + Job3 -> DCR3 -> DEVICE2 + + Job = job control record + DCR = Job contorl data for a specific device + DEVICE = Device only control data +\end{verbatim} + diff --git a/docs/manuals/en/developers/tls-techdoc.tex b/docs/manuals/en/developers/tls-techdoc.tex new file mode 100644 index 00000000..565869f1 --- /dev/null +++ b/docs/manuals/en/developers/tls-techdoc.tex @@ -0,0 +1,391 @@ +%% +%% + +%\author{Landon Fuller} +%\title{Bacula TLS Additions} + +\chapter{TLS} +\label{_Chapter_TLS} +\index{TLS} + +Written by Landon Fuller + +\section{Introduction to TLS} +\index{TLS Introduction} +\index{Introduction!TLS} +\addcontentsline{toc}{section}{TLS Introduction} + +This patch includes all the back-end code necessary to add complete TLS +data encryption support to Bacula. In addition, support for TLS in +Console/Director communications has been added as a proof of concept. +Adding support for the remaining daemons will be straight-forward. +Supported features of this patchset include: + +\begin{itemize} +\item Client/Server TLS Requirement Negotiation +\item TLSv1 Connections with Server and Client Certificate +Validation +\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying +\end{itemize} + +This document will refer to both ``server'' and ``client'' contexts. These +terms refer to the accepting and initiating peer, respectively. + +Diffie-Hellman anonymous ciphers are not supported by this patchset. The +use of DH anonymous ciphers increases the code complexity and places +explicit trust upon the two-way Cram-MD5 implementation. Cram-MD5 is +subject to known plaintext attacks, and is should be considered +considerably less secure than PKI certificate-based authentication. + +Appropriate autoconf macros have been added to detect and use OpenSSL. Two +additional preprocessor defines have been added: \emph{HAVE\_TLS} and +\emph{HAVE\_OPENSSL}. All changes not specific to OpenSSL rely on +\emph{HAVE\_TLS}. OpenSSL-specific code is constrained to +\emph{src/lib/tls.c} to facilitate the support of alternative TLS +implementations. + +\section{New Configuration Directives} +\index{TLS Configuration Directives} +\index{Directives!TLS Configuration} +\addcontentsline{toc}{section}{New Configuration Directives} + +Additional configuration directives have been added to both the Console and +Director resources. These new directives are defined as follows: + +\begin{itemize} +\item \underline{TLS Enable} \emph{(yes/no)} +Enable TLS support. + +\item \underline{TLS Require} \emph{(yes/no)} +Require TLS connections. + +\item \underline{TLS Certificate} \emph{(path)} +Path to PEM encoded TLS certificate. Used as either a client or server +certificate. + +\item \underline{TLS Key} \emph{(path)} +Path to PEM encoded TLS private key. Must correspond with the TLS +certificate. + +\item \underline{TLS Verify Peer} \emph{(yes/no)} +Verify peer certificate. Instructs server to request and verify the +client's x509 certificate. Any client certificate signed by a known-CA +will be accepted unless the TLS Allowed CN configuration directive is used. +Not valid in a client context. + +\item \underline{TLS Allowed CN} \emph{(string list)} +Common name attribute of allowed peer certificates. If directive is +specified, all client certificates will be verified against this list. +This directive may be specified more than once. Not valid in a client +context. + +\item \underline{TLS CA Certificate File} \emph{(path)} +Path to PEM encoded TLS CA certificate(s). Multiple certificates are +permitted in the file. One of \emph{TLS CA Certificate File} or \emph{TLS +CA Certificate Dir} are required in a server context if \underline{TLS +Verify Peer} is also specified, and are always required in a client +context. + +\item \underline{TLS CA Certificate Dir} \emph{(path)} +Path to TLS CA certificate directory. In the current implementation, +certificates must be stored PEM encoded with OpenSSL-compatible hashes. +One of \emph{TLS CA Certificate File} or \emph{TLS CA Certificate Dir} are +required in a server context if \emph{TLS Verify Peer} is also specified, +and are always required in a client context. + +\item \underline{TLS DH File} \emph{(path)} +Path to PEM encoded Diffie-Hellman parameter file. If this directive is +specified, DH ephemeral keying will be enabled, allowing for forward +secrecy of communications. This directive is only valid within a server +context. To generate the parameter file, you may use openssl: +\footnotesize +\begin{verbatim} +openssl dhparam -out dh1024.pem -5 1024 +\end{verbatim} +\normalsize +\end{itemize} + +\section{TLS API Implementation} +\index{TLS API Implimentation} +\index{API Implimentation!TLS} +\addcontentsline{toc}{section}{TLS API Implementation} + +To facilitate the use of additional TLS libraries, all OpenSSL-specific +code has been implemented within \emph{src/lib/tls.c}. In turn, a generic +TLS API is exported. + +\subsection{Library Initialization and Cleanup} +\index{Library Initialization and Cleanup} +\index{Initialization and Cleanup!Library} +\addcontentsline{toc}{subsection}{Library Initialization and Cleanup} + +\footnotesize +\begin{verbatim} +int init_tls (void); +\end{verbatim} +\normalsize + +Performs TLS library initialization, including seeding of the PRNG. PRNG +seeding has not yet been implemented for win32. + +\footnotesize +\begin{verbatim} +int cleanup_tls (void); +\end{verbatim} +\normalsize + +Performs TLS library cleanup. + +\subsection{Manipulating TLS Contexts} +\index{TLS Context Manipulation} +\index{Contexts!Manipulating TLS} +\addcontentsline{toc}{subsection}{Manipulating TLS Contexts} + +\footnotesize +\begin{verbatim} +TLS_CONTEXT *new_tls_context (const char *ca_certfile, + const char *ca_certdir, const char *certfile, + const char *keyfile, const char *dhfile, bool verify_peer); +\end{verbatim} +\normalsize + +Allocates and initalizes a new opaque \emph{TLS\_CONTEXT} structure. The +\emph{TLS\_CONTEXT} structure maintains default TLS settings from which +\emph{TLS\_CONNECTION} structures are instantiated. In the future the +\emph{TLS\_CONTEXT} structure may be used to maintain the TLS session +cache. \emph{ca\_certfile} and \emph{ca\_certdir} arguments are used to +initialize the CA verification stores. The \emph{certfile} and +\emph{keyfile} arguments are used to initialize the local certificate and +private key. If \emph{dhfile} is non-NULL, it is used to initialize +Diffie-Hellman ephemeral keying. If \emph{verify\_peer} is \emph{true} , +client certificate validation is enabled. + +\footnotesize +\begin{verbatim} +void free_tls_context (TLS_CONTEXT *ctx); +\end{verbatim} +\normalsize + +Deallocated a previously allocated \emph{TLS\_CONTEXT} structure. + +\subsection{Performing Post-Connection Verification} +\index{TLS Post-Connection Verification} +\index{Verification!TLS Post-Connection} +\addcontentsline{toc}{subsection}{Performing Post-Connection Verification} + +\footnotesize +\begin{verbatim} +bool tls_postconnect_verify_host (TLS_CONNECTION *tls, const char *host); +\end{verbatim} +\normalsize + +Performs post-connection verification of the peer-supplied x509 +certificate. Checks whether the \emph{subjectAltName} and +\emph{commonName} attributes match the supplied \emph{host} string. +Returns \emph{true} if there is a match, \emph{false} otherwise. + +\footnotesize +\begin{verbatim} +bool tls_postconnect_verify_cn (TLS_CONNECTION *tls, alist *verify_list); +\end{verbatim} +\normalsize + +Performs post-connection verification of the peer-supplied x509 +certificate. Checks whether the \emph{commonName} attribute matches any +strings supplied via the \emph{verify\_list} parameter. Returns +\emph{true} if there is a match, \emph{false} otherwise. + +\subsection{Manipulating TLS Connections} +\index{TLS Connection Manipulation} +\index{Connections!Manipulating TLS} +\addcontentsline{toc}{subsection}{Manipulating TLS Connections} + +\footnotesize +\begin{verbatim} +TLS_CONNECTION *new_tls_connection (TLS_CONTEXT *ctx, int fd); +\end{verbatim} +\normalsize + +Allocates and initializes a new \emph{TLS\_CONNECTION} structure with +context \emph{ctx} and file descriptor \emph{fd}. + +\footnotesize +\begin{verbatim} +void free_tls_connection (TLS_CONNECTION *tls); +\end{verbatim} +\normalsize + +Deallocates memory associated with the \emph{tls} structure. + +\footnotesize +\begin{verbatim} +bool tls_bsock_connect (BSOCK *bsock); +\end{verbatim} +\normalsize + +Negotiates a a TLS client connection via \emph{bsock}. Returns \emph{true} +if successful, \emph{false} otherwise. Will fail if there is a TLS +protocol error or an invalid certificate is presented + +\footnotesize +\begin{verbatim} +bool tls_bsock_accept (BSOCK *bsock); +\end{verbatim} +\normalsize + +Accepts a TLS client connection via \emph{bsock}. Returns \emph{true} if +successful, \emph{false} otherwise. Will fail if there is a TLS protocol +error or an invalid certificate is presented. + +\footnotesize +\begin{verbatim} +bool tls_bsock_shutdown (BSOCK *bsock); +\end{verbatim} +\normalsize + +Issues a blocking TLS shutdown request to the peer via \emph{bsock}. This function may not wait for the peer's reply. + +\footnotesize +\begin{verbatim} +int tls_bsock_writen (BSOCK *bsock, char *ptr, int32_t nbytes); +\end{verbatim} +\normalsize + +Writes \emph{nbytes} from \emph{ptr} via the \emph{TLS\_CONNECTION} +associated with \emph{bsock}. Due to OpenSSL's handling of \emph{EINTR}, +\emph{bsock} is set non-blocking at the start of the function, and restored +to its original blocking state before the function returns. Less than +\emph{nbytes} may be written if an error occurs. The actual number of +bytes written will be returned. + +\footnotesize +\begin{verbatim} +int tls_bsock_readn (BSOCK *bsock, char *ptr, int32_t nbytes); +\end{verbatim} +\normalsize + +Reads \emph{nbytes} from the \emph{TLS\_CONNECTION} associated with +\emph{bsock} and stores the result in \emph{ptr}. Due to OpenSSL's +handling of \emph{EINTR}, \emph{bsock} is set non-blocking at the start of +the function, and restored to its original blocking state before the +function returns. Less than \emph{nbytes} may be read if an error occurs. +The actual number of bytes read will be returned. + +\section{Bnet API Changes} +\index{Bnet API Changes} +\index{API Changes!Bnet} +\addcontentsline{toc}{section}{Bnet API Changes} + +A minimal number of changes were required in the Bnet socket API. The BSOCK +structure was expanded to include an associated TLS\_CONNECTION structure, +as well as a flag to designate the current blocking state of the socket. +The blocking state flag is required for win32, where it does not appear +possible to discern the current blocking state of a socket. + +\subsection{Negotiating a TLS Connection} +\index{Negotiating a TLS Connection} +\index{TLS Connection!Negotiating} +\addcontentsline{toc}{subsection}{Negotiating a TLS Connection} + +\emph{bnet\_tls\_server()} and \emph{bnet\_tls\_client()} were both +implemented using the new TLS API as follows: + +\footnotesize +\begin{verbatim} +int bnet_tls_client(TLS_CONTEXT *ctx, BSOCK * bsock); +\end{verbatim} +\normalsize + +Negotiates a TLS session via \emph{bsock} using the settings from +\emph{ctx}. Returns 1 if successful, 0 otherwise. + +\footnotesize +\begin{verbatim} +int bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list); +\end{verbatim} +\normalsize + +Accepts a TLS client session via \emph{bsock} using the settings from +\emph{ctx}. If \emph{verify\_list} is non-NULL, it is passed to +\emph{tls\_postconnect\_verify\_cn()} for client certificate verification. + +\subsection{Manipulating Socket Blocking State} +\index{Manipulating Socket Blocking State} +\index{Socket Blocking State!Manipulating} +\index{Blocking State!Socket!Manipulating} +\addcontentsline{toc}{subsection}{Manipulating Socket Blocking State} + +Three functions were added for manipulating the blocking state of a socket +on both Win32 and Unix-like systems. The Win32 code was written according +to the MSDN documentation, but has not been tested. + +These functions are prototyped as follows: + +\footnotesize +\begin{verbatim} +int bnet_set_nonblocking (BSOCK *bsock); +\end{verbatim} +\normalsize + +Enables non-blocking I/O on the socket associated with \emph{bsock}. +Returns a copy of the socket flags prior to modification. + +\footnotesize +\begin{verbatim} +int bnet_set_blocking (BSOCK *bsock); +\end{verbatim} +\normalsize + +Enables blocking I/O on the socket associated with \emph{bsock}. Returns a +copy of the socket flags prior to modification. + +\footnotesize +\begin{verbatim} +void bnet_restore_blocking (BSOCK *bsock, int flags); +\end{verbatim} +\normalsize + +Restores blocking or non-blocking IO setting on the socket associated with +\emph{bsock}. The \emph{flags} argument must be the return value of either +\emph{bnet\_set\_blocking()} or \emph{bnet\_restore\_blocking()}. + +\pagebreak + +\section{Authentication Negotiation} +\index{Authentication Negotiation} +\index{Negotiation!TLS Authentication} +\addcontentsline{toc}{section}{Authentication Negotiation} + +Backwards compatibility with the existing SSL negotiation hooks implemented +in src/lib/cram-md5.c have been maintained. The +\emph{cram\_md5\_get\_auth()} function has been modified to accept an +integer pointer argument, tls\_remote\_need. The TLS requirement +advertised by the remote host is returned via this pointer. + +After exchanging cram-md5 authentication and TLS requirements, both the +client and server independently decide whether to continue: + +\footnotesize +\begin{verbatim} +if (!cram_md5_get_auth(dir, password, &tls_remote_need) || + !cram_md5_auth(dir, password, tls_local_need)) { +[snip] +/* Verify that the remote host is willing to meet our TLS requirements */ +if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK && + tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server did not advertise required TLS support.\n")); + auth_success = false; + goto auth_done; +} + +/* Verify that we are willing to meet the remote host's requirements */ +if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK && + tls_remote_need != BNET_TLS_OK) { + sendit(_("Authorization problem:" + " Remote server requires TLS.\n")); + auth_success = false; + goto auth_done; +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/developers/translate_images.pl b/docs/manuals/en/developers/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/developers/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/developers/update_version b/docs/manuals/en/developers/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/developers/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/developers/update_version.in b/docs/manuals/en/developers/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/developers/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/developers/version.tex.in b/docs/manuals/en/developers/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/developers/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/install/Makefile.in b/docs/manuals/en/install/Makefile.in new file mode 100644 index 00000000..0edc87f6 --- /dev/null +++ b/docs/manuals/en/install/Makefile.in @@ -0,0 +1,139 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=install + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null + makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null + makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null + makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Installation and Configuration Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Instal_Config_Guide.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/install/Makefile.save b/docs/manuals/en/install/Makefile.save new file mode 100644 index 00000000..8a1708ab --- /dev/null +++ b/docs/manuals/en/install/Makefile.save @@ -0,0 +1,101 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# + +IMAGES=../../../images + +first_rule: bacula + +bacula: tex web html dvipdf + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @cp -fp ${IMAGES}/hires/*.eps . + touch install.idx installi-general.tex + -latex -interaction=batchmode install.tex + makeindex install.idx >/dev/null 2>/dev/null + -latex -interaction=batchmode install.tex + +pdf: + @echo "Making install pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf install.dvi install.pdf + @rm -f *.eps *.old + +dvipdf: + @echo "Making install pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 install.dvi + @rm -f *.eps *.old + +html: + @echo "Making install html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names install.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + install >/dev/null + ./translate_images.pl --to_meaningful_names install.html + @rm -f *.eps *.gif *.jpg *.old + +web: + @echo "Making install web" + @mkdir -p install + @rm -f install/* + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png install/ + @rm -f install/next.eps install/next.png install/prev.eps install/prev.png install/up.eps install/up.png + @(if [ -f install/imagename_translations ] ; then \ + ./translate_images.pl --to_meaningful_names install/Bacula_Users_Guide.html; \ + fi) + @rm -rf install/*.html + latex2html -split 3 -local_icons -t "Developer's Guide" \ + -long_titles 4 -contents_in_nav -toc_stars -white \ + -notransparent install >/dev/null + ./translate_images.pl --to_meaningful_names install/install_Guide.html + @cp -f install/install_Guide.html install/index.html + @rm -f *.eps *.gif *.jpg install/*.eps *.old + @rm -f install/idle.png + @rm -f install/win32-*.png install/wx-console*.png install/xp-*.png + @rm -f install/*.pl install/*.log install/*.aux install/*.idx + @rm -f install/*.out WARNINGS + +texcheck: + ./check_tex.pl install.tex + +main_configs: + pic2graph -density 100 main_configs.png + +clean: + @rm -f 1 2 3 + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f images.pl labels.pl internals.pl + @rm -rf install + @rm -f images.tex installi-general.tex + + +distclean: clean + @rm -f install.html install.pdf diff --git a/docs/manuals/en/install/autochangerres.tex b/docs/manuals/en/install/autochangerres.tex new file mode 100644 index 00000000..98563c77 --- /dev/null +++ b/docs/manuals/en/install/autochangerres.tex @@ -0,0 +1,107 @@ +%% +\chapter{Autochanger Resource} +\index[sd]{Autochanger Resource} +\index[sd]{Resource!Autochanger} + +The Autochanger resource supports single or multiple drive +autochangers by grouping one or more Device resources +into one unit called an autochanger in Bacula (often referred to +as a "tape library" by autochanger manufacturers). + +If you have an Autochanger, and you want it to function correctly, +you {\bf must} have an Autochanger resource in your Storage +conf file, and your Director's Storage directives that want to +use an Autochanger {\bf must} refer to the Autochanger resource name. +In previous versions of Bacula, the Director's Storage directives +referred directly to Device resources that were autochangers. +In version 1.38.0 and later, referring directly to Device resources +will not work for Autochangers. + +\begin{description} +\item [Name = \lt{}Autochanger-Name\gt{}] + \index[sd]{Name} + Specifies the Name of the Autochanger. This name is used in the + Director's Storage definition to refer to the autochanger. This + directive is required. + +\item [Device = \lt{}Device-name1, device-name2, ...\gt{}] + Specifies the names of the Device resource or resources that correspond + to the autochanger drive. If you have a multiple drive autochanger, you + must specify multiple Device names, each one referring to a separate + Device resource that contains a Drive Index specification that + corresponds to the drive number base zero. You may specify multiple + device names on a single line separated by commas, and/or you may + specify multiple Device directives. This directive is required. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + The specified {\bf name-string} gives the system file name of the autochanger + device name. If specified in this resource, the Changer Device name + is not needed in the Device resource. If it is specified in the Device + resource (see above), it will take precedence over one specified in + the Autochanger resource. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command } + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows. + If it is specified here, it need not be specified in the Device + resource. If it is also specified in the Device resource, it will take + precedence over the one specified in the Autochanger resource. + +\end{description} + +The following is an example of a valid Autochanger resource definition: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "DDS-4-changer" + Device = DDS-4-1, DDS-4-2, DDS-4-3 + Changer Device = /dev/sg0 + Changer Command = "/etc/bacula/mtx-changer %c %o %S %a %d" +} +Device { + Name = "DDS-4-1" + Drive Index = 0 + Autochanger = yes + ... +} +Device { + Name = "DDS-4-2" + Drive Index = 1 + Autochanger = yes + ... +Device { + Name = "DDS-4-3" + Drive Index = 2 + Autochanger = yes + Autoselect = no + ... +} +\end{verbatim} +\normalsize + +Please note that it is important to include the {\bf Autochanger = yes} directive +in each Device definition that belongs to an Autochanger. A device definition +should not belong to more than one Autochanger resource. Also, your Device +directive in the Storage resource of the Director's conf file should have +the Autochanger's resource name rather than a name of one of the Devices. + +If you have a drive that physically belongs to an Autochanger but you don't want +to have it automatically used when Bacula references the Autochanger for backups, +for example, you want to reserve it for restores, you can add the directive: + +\footnotesize +\begin{verbatim} +Autoselect = no +\end{verbatim} +\normalsize + +to the Device resource for that drive. In that case, Bacula will not automatically +select that drive when accessing the Autochanger. You can, still use the drive +by referencing it by the Device name directly rather than the Autochanger name. An example +of such a definition is shown above for the Device DDS-4-3, which will not be +selected when the name DDS-4-changer is used in a Storage definition, but will +be used if DDS-4-3 is used. diff --git a/docs/manuals/en/install/check_tex.pl b/docs/manuals/en/install/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/install/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/install/configure.tex b/docs/manuals/en/install/configure.tex new file mode 100644 index 00000000..e37773b9 --- /dev/null +++ b/docs/manuals/en/install/configure.tex @@ -0,0 +1,408 @@ +%% +%% + +\chapter{Customizing the Configuration Files} +\label{ConfigureChapter} +\index[general]{Files!Customizing the Configuration } +\index[general]{Customizing the Configuration Files } + +When each of the Bacula programs starts, it reads a configuration file +specified on the command line or the default {\bf bacula-dir.conf}, {\bf +bacula-fd.conf}, {\bf bacula-sd.conf}, or {\bf console.conf} for the Director +daemon, the File daemon, the Storage daemon, and the Console program +respectively. + +Each service (Director, Client, Storage, Console) has its own configuration +file containing a set of Resource definitions. These resources are very +similar from one service to another, but may contain different directives +(records) depending on the service. For example, in the Director's resource +file, the {\bf Director} resource defines the name of the Director, a number +of global Director parameters and his password. In the File daemon +configuration file, the {\bf Director} resource specifies which Directors are +permitted to use the File daemon. + +Before running Bacula for the first time, you must customize the configuration +files for each daemon. Default configuration files will have been created by +the installation process, but you will need to modify them to correspond to +your system. An overall view of the resources can be seen in the following: + +\addcontentsline{lof}{figure}{Bacula Objects} +\includegraphics{./bacula-objects.eps} +\\ +(thanks to Aristides Maniatis for the above graphic) +\label{ResFormat} + +\section{Character Sets} +\index[general]{Character Sets} +Bacula is designed to handle most character sets of the world, +US ASCII, German, French, Chinese, ... However, it does this by +encoding everything in UTF-8, and it expects all configuration files +(including those read on Win32 machines) to be in UTF-8 format. +UTF-8 is typically the default on Linux machines, but not on all +Unix machines, nor on Windows, so you must take some care to ensure +that your locale is set properly before starting Bacula. + +To ensure that Bacula configuration files can be correctly read including +foreign characters the {bf LANG} environment variable +must end in {\bf .UTF-8}. An full example is {\bf en\_US.UTF-8}. The +exact syntax may vary a bit from OS to OS, and exactly how you define +it will also vary. On most newer Win32 machines, you can use {\bf notepad} +to edit the conf files, then choose output encoding UTF-8. + +Bacula assumes that all filenames are in UTF-8 format on Linux and +Unix machines. On Win32 they are in Unicode (UTF-16), and will +be automatically converted to UTF-8 format. + +\section{Resource Directive Format} +\index[general]{Resource Directive Format } +\index[general]{Format!Resource Directive } + +Although, you won't need to know the details of all the directives a basic +knowledge of Bacula resource directives is essential. Each directive contained +within the resource (within the braces) is composed of a keyword followed by +an equal sign (=) followed by one or more values. The keywords must be one of +the known Bacula resource record keywords, and it may be composed of upper or +lower case characters and spaces. + +Each resource definition MUST contain a Name directive, and may optionally +contain a Description directive. The Name directive is used to +uniquely identify the resource. The Description directive is (will be) used +during display of the Resource to provide easier human recognition. For +example: + +\footnotesize +\begin{verbatim} +Director { + Name = "MyDir" + Description = "Main Bacula Director" + WorkingDirectory = "$HOME/bacula/bin/working" +} +\end{verbatim} +\normalsize + +Defines the Director resource with the name "MyDir" and a working directory +\$HOME/bacula/bin/working. In general, if you want spaces in a name to the +right of the first equal sign (=), you must enclose that name within double +quotes. Otherwise quotes are not generally necessary because once defined, +quoted strings and unquoted strings are all equal. + +\label{Comments} +\subsection{Comments} +\index[general]{Comments} + +When reading the configuration file, blank lines are ignored and everything +after a hash sign (\#) until the end of the line is taken to be a comment. A +semicolon (;) is a logical end of line, and anything after the semicolon is +considered as the next statement. If a statement appears on a line by itself, +a semicolon is not necessary to terminate it, so generally in the examples in +this manual, you will not see many semicolons. +\label{Case1} + +\subsection{Upper and Lower Case and Spaces} +\index[general]{Spaces!Upper/Lower Case} +\index[general]{Upper and Lower Case and Spaces} + +Case (upper/lower) and spaces are totally ignored in the resource directive +keywords (the part before the equal sign). + +Within the keyword (i.e. before the equal sign), spaces are not significant. +Thus the keywords: {\bf name}, {\bf Name}, and {\bf N a m e} are all +identical. + +Spaces after the equal sign and before the first character of the value are +ignored. + +In general, spaces within a value are significant (not ignored), and if the +value is a name, you must enclose the name in double quotes for the spaces to +be accepted. Names may contain up to 127 characters. Currently, a name may +contain any ASCII character. Within a quoted string, any character following a +backslash (\textbackslash{}) is taken as itself (handy for inserting +backslashes and double quotes (")). + +Please note, however, that Bacula resource names as well as certain other +names (e.g. Volume names) must contain only letters (including ISO accented +letters), numbers, and a few special characters (space, underscore, ...). +All other characters and punctuation are invalid. + +\label{Includes} +\subsection{Including other Configuration Files} +\index[general]{Including other Configuration Files } +\index[general]{Files!Including other Configuration } +\index[general]{Using @ to include other files} +\index[general]{@{\bf filename}} + +If you wish to break your configuration file into smaller pieces, you can do +so by including other files using the syntax @{\bf filename} where {\bf +filename} is the full path and filename of another file. The @filename +specification can be given anywhere a primitive token would appear. + +\label{DataTypes} +\subsection{Recognized Primitive Data Types} +\index[general]{Types!Recognized Primitive Data } +\index[general]{Recognized Primitive Data Types } + +When parsing the resource directives, Bacula classifies the data according to +the types listed below. The first time you read this, it may appear a bit +overwhelming, but in reality, it is all pretty logical and straightforward. + +\begin{description} + +\item [name] + \index[fd]{name} + A keyword or name consisting of alphanumeric characters, including the +hyphen, underscore, and dollar characters. The first character of a {\bf +name} must be a letter. A name has a maximum length currently set to 127 +bytes. Typically keywords appear on the left side of an equal (i.e. they are +Bacula keywords -- i.e. Resource names or directive names). Keywords may not +be quoted. + +\item [name-string] + \index[fd]{name-string} + A name-string is similar to a name, except that the name may be quoted and +can thus contain additional characters including spaces. Name strings are +limited to 127 characters in length. Name strings are typically used on the +right side of an equal (i.e. they are values to be associated with a keyword). + + +\item [string] + \index[fd]{string} + A quoted string containing virtually any character including spaces, or a +non-quoted string. A string may be of any length. Strings are typically +values that correspond to filenames, directories, or system command names. A +backslash (\textbackslash{}) turns the next character into itself, so to +include a double quote in a string, you precede the double quote with a +backslash. Likewise to include a backslash. + +\item [directory] + \index[dir]{directory} + A directory is either a quoted or non-quoted string. A directory will be +passed to your standard shell for expansion when it is scanned. Thus +constructs such as {\bf \$HOME} are interpreted to be their correct values. + +\item [password] + \index[dir]{password} + This is a Bacula password and it is stored internally in MD5 hashed format. + +\item [integer] + \index[dir]{integer} + A 32 bit integer value. It may be positive or negative. + +\item [positive integer] + \index[dir]{positive integer } + A 32 bit positive integer value. + +\item [long integer] + \index[dir]{long integer} + A 64 bit integer value. Typically these are values such as bytes that can +exceed 4 billion and thus require a 64 bit value. + +\item [yes|no] + \index[dir]{yes or no } + Either a {\bf yes} or a {\bf no}. + +\label{Size1} +\item [size] +\index[dir]{size} +A size specified as bytes. Typically, this is a floating point scientific +input format followed by an optional modifier. The floating point input is +stored as a 64 bit integer value. If a modifier is present, it must +immediately follow the value with no intervening spaces. The following +modifiers are permitted: + +\begin{description} +\item [k] + 1,024 (kilobytes) + +\item [kb] + 1,000 (kilobytes) + +\item [m] + 1,048,576 (megabytes) + +\item [mb] + 1,000,000 (megabytes) + +\item [g] + 1,073,741,824 (gigabytes) + +\item [gb] + 1,000,000,000 (gigabytes) +\end{description} + +\label{Time} +\item [time] +\index[dir]{time} +A time or duration specified in seconds. The time is stored internally as +a 64 bit integer value, but it is specified in two parts: a number part and +a modifier part. The number can be an integer or a floating point number. +If it is entered in floating point notation, it will be rounded to the +nearest integer. The modifier is mandatory and follows the number part, +either with or without intervening spaces. The following modifiers are +permitted: + +\begin{description} + +\item [seconds] + \index[dir]{seconds} + seconds + +\item [minutes] + \index[dir]{minutes} + minutes (60 seconds) + +\item [hours] + \index[dir]{hours } + hours (3600 seconds) + +\item [days] + \index[dir]{days} + days (3600*24 seconds) + +\item [weeks] + \index[dir]{weeks} + weeks (3600*24*7 seconds) + +\item [months] + \index[dir]{months } + months (3600*24*30 seconds) + +\item [quarters] + \index[dir]{quarters } + quarters (3600*24*91 seconds) + +\item [years] + \index[dir]{years } + years (3600*24*365 seconds) +\end{description} + +Any abbreviation of these modifiers is also permitted (i.e. {\bf seconds} +may be specified as {\bf sec} or {\bf s}). A specification of {\bf m} will +be taken as months. + +The specification of a time may have as many number/modifier parts as you +wish. For example: + +\footnotesize +\begin{verbatim} +1 week 2 days 3 hours 10 mins +1 month 2 days 30 sec + +\end{verbatim} +\normalsize + +are valid date specifications. + +\end{description} + +\label{ResTypes} +\section{Resource Types} +\index[general]{Types!Resource } +\index[general]{Resource Types } + +The following table lists all current Bacula resource types. It shows what +resources must be defined for each service (daemon). The default configuration +files will already contain at least one example of each permitted resource, so +you need not worry about creating all these kinds of resources from scratch. + +\addcontentsline{lot}{table}{Resource Types} +\begin{longtable}{|l|l|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf Resource } & \multicolumn{1}{c| }{\bf Director } & +\multicolumn{1}{c| }{\bf Client } & \multicolumn{1}{c| }{\bf Storage } & +\multicolumn{1}{c| }{\bf Console } \\ + \hline +{Autochanger } & {No } & {No } & {Yes } & {No } \\ +\hline +{Catalog } & {Yes } & {No } & {No } & {No } \\ + \hline +{Client } & {Yes } & {Yes } & {No } & {No } \\ + \hline +{Console } & {Yes } & {No } & {No } & {Yes } \\ + \hline +{Device } & {No } & {No } & {Yes } & {No } \\ + \hline +{Director } & {Yes } & {Yes } & {Yes } & {Yes } \\ + \hline +{FileSet } & {Yes } & {No } & {No } & {No } \\ + \hline +{Job } & {Yes } & {No } & {No } & {No } \\ + \hline +{JobDefs } & {Yes } & {No } & {No } & {No } \\ + \hline +{Message } & {Yes } & {Yes } & {Yes } & {No } \\ + \hline +{Pool } & {Yes } & {No } & {No } & {No } \\ + \hline +{Schedule } & {Yes } & {No } & {No } & {No } \\ + \hline +{Storage } & {Yes } & {No } & {Yes } & {No } +\\ \hline + +\end{longtable} + +\section{Names, Passwords and Authorization} +\label{Names} +\index[general]{Authorization!Names Passwords and } +\index[general]{Names, Passwords and Authorization } +\index[general]{Passwords} + +In order for one daemon to contact another daemon, it must authorize itself +with a password. In most cases, the password corresponds to a particular name, +so both the name and the password must match to be authorized. Passwords are +plain text, any text. They are not generated by any special process; just +use random text. + +The default configuration files are automatically defined for correct +authorization with random passwords. If you add to or modify these files, you +will need to take care to keep them consistent. + +Here is sort of a picture of what names/passwords in which files/Resources +must match up: + +\includegraphics{./Conf-Diagram.eps} + +In the left column, you will find the Director, Storage, and Client resources, +with their names and passwords -- these are all in {\bf bacula-dir.conf}. In +the right column are where the corresponding values should be found in the +Console, Storage daemon (SD), and File daemon (FD) configuration files. + +Please note that the Address, {\bf fd-sd}, that appears in the Storage +resource of the Director, preceded with and asterisk in the above example, is +passed to the File daemon in symbolic form. The File daemon then resolves it +to an IP address. For this reason, you must use either an IP address or a +fully qualified name. A name such as {\bf localhost}, not being a fully +qualified name, will resolve in the File daemon to the localhost of the File +daemon, which is most likely not what is desired. The password used for the +File daemon to authorize with the Storage daemon is a temporary password +unique to each Job created by the daemons and is not specified in any .conf +file. + +\section{Detailed Information for each Daemon} +\index[general]{Detailed Information for each Daemon } +\index[general]{Daemon!Detailed Information for each } + +The details of each Resource and the directives permitted therein are +described in the following chapters. + +The following configuration files must be defined: + +\begin{itemize} +\item + \ilink{Console}{ConsoleConfChapter} -- to define the resources for + the Console program (user interface to the Director). It defines which +Directors are available so that you may interact with them. +\item + \ilink{Director}{DirectorChapter} -- to define the resources + necessary for the Director. You define all the Clients and Storage daemons +that you use in this configuration file. +\item + \ilink{Client}{FiledConfChapter} -- to define the resources for + each client to be backed up. That is, you will have a separate Client +resource file on each machine that runs a File daemon. +\item + \ilink{Storage}{StoredConfChapter} -- to define the resources to + be used by each Storage daemon. Normally, you will have a single Storage +daemon that controls your tape drive or tape drives. However, if you have +tape drives on several machines, you will have at least one Storage daemon +per machine. +\end{itemize} diff --git a/docs/manuals/en/install/consoleconf.tex b/docs/manuals/en/install/consoleconf.tex new file mode 100644 index 00000000..563c81ad --- /dev/null +++ b/docs/manuals/en/install/consoleconf.tex @@ -0,0 +1,356 @@ +%% +%% + +\chapter{Console Configuration} +\label{ConsoleConfChapter} +\index[general]{Configuration!Console} +\index[general]{Console Configuration} + +\section{General} +\index[general]{General} + +The Console configuration file is the simplest of all the configuration files, +and in general, you should not need to change it except for the password. It +simply contains the information necessary to contact the Director or +Directors. + +For a general discussion of the syntax of configuration files and their +resources including the data types recognized by {\bf Bacula}, please see +the \ilink{Configuration}{ConfigureChapter} chapter of this manual. + +The following Console Resource definition must be defined: + +\section{The Director Resource} +\label{DirectorResource3} +\index[general]{Director Resource} +\index[general]{Resource!Director} + +The Director resource defines the attributes of the Director running on the +network. You may have multiple Director resource specifications in a single +Console configuration file. If you have more than one, you will be prompted to +choose one when you start the {\bf Console} program. + +\begin{description} +\item [Director] + \index[console]{Director} + Start of the Director directives. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + The director name used to select among different Directors, otherwise, this + name is not used. + +\item [DIRPort = \lt{}port-number\gt{}] + \index[dir]{DIRPort} + Specify the port to use to connect to the Director. This value will most + likely already be set to the value you specified on the {\bf + \verb:--:with-base-port} option of the {\bf ./configure} command. This port must be + identical to the {\bf DIRport} specified in the {\bf Director} resource of + the \ilink{Director's configuration}{DirectorChapter} file. The + default is 9101 so this directive is not normally specified. + +\item [Address = \lt{}address\gt{}] + \index[dir]{Address} + Where the address is a host name, a fully qualified domain name, or a network + address used to connect to the Director. + +\item [Password = \lt{}password\gt{}] + \index[dir]{Password} + Where the password is the password needed for the Director to accept the + Console connection. This password must be identical to the {\bf Password} + specified in the {\bf Director} resource of the + \ilink{Director's configuration}{DirectorChapter} file. This + directive is required. +\end{description} + +An actual example might be: + +\footnotesize +\begin{verbatim} +Director { + Name = HeadMan + address = rufus.cats.com + password = xyz1erploit +} +\end{verbatim} +\normalsize + +\section{The ConsoleFont Resource} +\index[general]{Resource!ConsoleFont} +\index[general]{ConsoleFont Resource} + +The ConsoleFont resource is available only in the GNOME version of the +console. It permits you to define the font that you want used to display in +the main listing window. + +\begin{description} + +\item [ConsoleFont] + \index[console]{ConsoleFont} + Start of the ConsoleFont directives. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + The name of the font. + +\item [Font = \lt{}Pango Font Name\gt{}] + \index[console]{Font} + The string value given here defines the desired font. It is specified in the + Pango format. For example, the default specification is: + +\footnotesize +\begin{verbatim} +Font = "LucidaTypewriter 9" +\end{verbatim} +\normalsize + +\end{description} + +Thanks to Phil Stracchino for providing the code for this feature. + +An different example might be: + +\footnotesize +\begin{verbatim} +ConsoleFont { + Name = Default + Font = "Monospace 10" +} +\end{verbatim} +\normalsize + +\section{The Console Resource} +\label{ConsoleResource} +\index[general]{Console Resource} +\index[general]{Resource!Console} + +As of Bacula version 1.33 and higher, there are three different kinds of +consoles, which the administrator or user can use to interact with the +Director. These three kinds of consoles comprise three different security +levels. + +\begin{itemize} +\item The first console type is an {\bf anonymous} or {\bf default} console, + which has full privileges. There is no console resource necessary for this + type since the password is specified in the Director resource. This is the + kind of console that was initially implemented in versions prior to 1.33 and + remains valid. Typically you would use it only for administrators. + +\item The second type of console, and new to version 1.33 and higher is a + "named" or "restricted" console defined within a Console resource in + both the Director's configuration file and in the Console's + configuration file. Both the names and the passwords in these two + entries must match much as is the case for Client programs. + + This second type of console begins with absolutely no privileges except + those explicitly specified in the Director's Console resource. Note, + the definition of what these restricted consoles can do is determined + by the Director's conf file. + + Thus you may define within the Director's conf file multiple Consoles + with different names and passwords, sort of like multiple users, each + with different privileges. As a default, these consoles can do + absolutely nothing -- no commands what so ever. You give them + privileges or rather access to commands and resources by specifying + access control lists in the Director's Console resource. This gives the + administrator fine grained control over what particular consoles (or + users) can do. + +\item The third type of console is similar to the above mentioned + restricted console in that it requires a Console resource definition in + both the Director and the Console. In addition, if the console name, + provided on the {\bf Name =} directive, is the same as a Client name, + the user of that console is permitted to use the {\bf SetIP} command to + change the Address directive in the Director's client resource to the IP + address of the Console. This permits portables or other machines using + DHCP (non-fixed IP addresses) to "notify" the Director of their current + IP address. + +\end{itemize} + +The Console resource is optional and need not be specified. However, if it is +specified, you can use ACLs (Access Control Lists) in the Director's +configuration file to restrict the particular console (or user) to see only +information pertaining to his jobs or client machine. + +You may specify as many Console resources in the console's conf file. If +you do so, generally the first Console resource will be used. However, if +you have multiple Director resources (i.e. you want to connect to different +directors), you can bind one of your Console resources to a particular +Director resource, and thus when you choose a particular Director, the +appropriate Console configuration resource will be used. See the "Director" +directive in the Console resource described below for more information. + +Note, the Console resource is optional, but can be useful for +restricted consoles as noted above. + +\begin{description} +\item [Console] + \index[console]{Console} + Start of the Console resource. + +\item [Name = \lt{}name\gt{}] + \index[console]{Name} + The Console name used to allow a restricted console to change + its IP address using the SetIP command. The SetIP command must + also be defined in the Director's conf CommandACL list. + + +\item [Password = \lt{}password\gt{}] + \index[console]{Password} + If this password is supplied, then the password specified in the + Director resource of you Console conf will be ignored. See below + for more details. + +\item [Director = \lt{}director-resource-name\gt{}] + If this directive is specified, this Console resource will be + used by bconsole when that particular director is selected + when first starting bconsole. I.e. it binds a particular console + resource with its name and password to a particular director. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[console]{Heartbeat Interval} + \index[console]{Directive!Heartbeat} + This directive is optional and if specified will cause the Console to + set a keepalive interval (heartbeat) in seconds on each of the sockets + to communicate with the Director. It is implemented only on systems + (Linux, ...) that provide the {\bf setsockopt} TCP\_KEEPIDLE function. + The default value is zero, which means no change is made to the socket. + +\end{description} + + +The following configuration files were supplied by Phil Stracchino. For +example, if we define the following in the user's bconsole.conf file (or +perhaps the bwx-console.conf file): + +\footnotesize +\begin{verbatim} +Director { + Name = MyDirector + DIRport = 9101 + Address = myserver + Password = "XXXXXXXXXXX" # no, really. this is not obfuscation. +} + + +Console { + Name = restricted-user + Password = "UntrustedUser" +} +\end{verbatim} +\normalsize + +Where the Password in the Director section is deliberately incorrect, and the +Console resource is given a name, in this case {\bf restricted-user}. Then +in the Director's bacula-dir.conf file (not directly accessible by the user), +we define: + +\footnotesize +\begin{verbatim} +Console { + Name = restricted-user + Password = "UntrustedUser" + JobACL = "Restricted Client Save" + ClientACL = restricted-client + StorageACL = main-storage + ScheduleACL = *all* + PoolACL = *all* + FileSetACL = "Restricted Client's FileSet" + CatalogACL = DefaultCatalog + CommandACL = run +} +\end{verbatim} +\normalsize + +the user logging into the Director from his Console will get logged in as {\bf +restricted-user}, and he will only be able to see or access a Job with the +name {\bf Restricted Client Save} a Client with the name {\bf +restricted-client}, a Storage device {\bf main-storage}, any Schedule or Pool, +a FileSet named {\bf Restricted Client's FileSet}, a Catalog named {\bf +DefaultCatalog}, and the only command he can use in the Console is the {\bf +run} command. In other words, this user is rather limited in what he can see +and do with Bacula. + +The following is an example of a bconsole conf file that can access +several Directors and has different Consoles depending on the director: + +\footnotesize +\begin{verbatim} +Director { + Name = MyDirector + DIRport = 9101 + Address = myserver + Password = "XXXXXXXXXXX" # no, really. this is not obfuscation. +} + +Director { + Name = SecondDirector + DIRport = 9101 + Address = secondserver + Password = "XXXXXXXXXXX" # no, really. this is not obfuscation. +} + +Console { + Name = restricted-user + Password = "UntrustedUser" + Director = MyDirector +} + +Console { + Name = restricted-user + Password = "A different UntrustedUser" + Director = SecondDirector +} +\end{verbatim} +\normalsize + +The second Director referenced at "secondserver" might look +like the following: + +\footnotesize +\begin{verbatim} +Console { + Name = restricted-user + Password = "A different UntrustedUser" + JobACL = "Restricted Client Save" + ClientACL = restricted-client + StorageACL = second-storage + ScheduleACL = *all* + PoolACL = *all* + FileSetACL = "Restricted Client's FileSet" + CatalogACL = RestrictedCatalog + CommandACL = run, restore + WhereACL = "/" +} +\end{verbatim} +\normalsize + + + +\section{Console Commands} +\index[general]{Console Commands} +\index[general]{Commands!Console} + +For more details on running the console and its commands, please see the +\ilink{Bacula Console}{_ConsoleChapter} chapter of this manual. + +\section{Sample Console Configuration File} +\label{SampleConfiguration2} +\index[general]{File!Sample Console Configuration} +\index[general]{Sample Console Configuration File} + +An example Console configuration file might be the following: + +\footnotesize +\begin{verbatim} +# +# Bacula Console Configuration File +# +Director { + Name = HeadMan + address = "my_machine.my_domain.com" + Password = Console_password +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/install/critical.tex b/docs/manuals/en/install/critical.tex new file mode 100644 index 00000000..30462e39 --- /dev/null +++ b/docs/manuals/en/install/critical.tex @@ -0,0 +1,130 @@ +%% +%% + +\chapter{Critical Items to Implement Before Production} +\label{CriticalChapter} +\index[general]{Production!Critical Items to Implement Before } +\index[general]{Critical Items to Implement Before Production } + +We recommend you take your time before implementing a production a Bacula +backup system since Bacula is a rather complex program, and if you make a +mistake, you may suddenly find that you cannot restore your files in case +of a disaster. This is especially true if you have not previously used a +major backup product. + +If you follow the instructions in this chapter, you will have covered most of +the major problems that can occur. It goes without saying that if you ever +find that we have left out an important point, please inform us, so +that we can document it to the benefit of everyone. + +\label{Critical} +\section{Critical Items} +\index[general]{Critical Items } +\index[general]{Items!Critical } + +The following assumes that you have installed Bacula, you more or less +understand it, you have at least worked through the tutorial or have +equivalent experience, and that you have set up a basic production +configuration. If you haven't done the above, please do so and then come back +here. The following is a sort of checklist that points with perhaps a brief +explanation of why you should do it. In most cases, you will find the +details elsewhere in the manual. The order is more or less the order you +would use in setting up a production system (if you already are in +production, use the checklist anyway). + +\begin{itemize} +\item Test your tape drive for compatibility with Bacula by using the test + command in the \ilink{btape}{btape} program. +\item Better than doing the above is to walk through the nine steps in the + \ilink{Tape Testing}{TapeTestingChapter} chapter of the manual. It + may take you a bit of time, but it will eliminate surprises. +\item Test the end of tape handling of your tape drive by using the + fill command in the \ilink{btape}{btape} program. +\item If you are using a Linux 2.4 kernel, make sure that /lib/tls is disabled. Bacula + does not work with this library. See the second point under + \ilink{ Supported Operating Systems.}{SupportedOSes} +\item Do at least one restore of files. If you backup multiple OS types + (Linux, Solaris, HP, MacOS, FreeBSD, Win32, ...), + restore files from each system type. The + \ilink{Restoring Files}{RestoreChapter} chapter shows you how. +\item Write a bootstrap file to a separate system for each backup job. The + Write Bootstrap directive is described in the + \ilink{Director Configuration}{writebootstrap} chapter of the + manual, and more details are available in the + \ilink{Bootstrap File}{BootstrapChapter} chapter. Also, the default + bacula-dir.conf comes with a Write Bootstrap directive defined. This allows + you to recover the state of your system as of the last backup. +\item Backup your catalog. An example of this is found in the default + bacula-dir.conf file. The backup script is installed by default and + should handle any database, though you may want to make your own local + modifications. See also \ilink{Backing Up Your Bacula Database - + Security Considerations }{BackingUpBaculaSecurityConsiderations} for more + information. +\item Write a bootstrap file for the catalog. An example of this is found in + the default bacula-dir.conf file. This will allow you to quickly restore your + catalog in the event it is wiped out -- otherwise it is many excruciating + hours of work. +\item Make a copy of the bacula-dir.conf, bacula-sd.conf, and + bacula-fd.conf files that you are using on your server. Put it in a safe + place (on another machine) as these files can be difficult to + reconstruct if your server dies. +\item Make a Bacula Rescue CDROM! See the + \ilink{Disaster Recovery Using a Bacula Rescue + CDROM}{RescueChapter} chapter. It is trivial to make such a CDROM, + and it can make system recovery in the event of a lost hard disk infinitely + easier. +\item Bacula assumes all filenames are in UTF-8 format. This is important + when saving the filenames to the catalog. For Win32 machine, Bacula will + automatically convert from Unicode to UTF-8, but on Unix, Linux, *BSD, + and MacOS X machines, you must explicitly ensure that your locale is set + properly. Typically this means that the {bf LANG} environment variable + must end in {\bf .UTF-8}. An full example is {\bf en\_US.UTF-8}. The + exact syntax may vary a bit from OS to OS, and exactly how you define it + will also vary. + + On most modern Win32 machines, you can edit the conf files with {\bf + notebook} and choose output encoding UTF-8. +\end{itemize} + +\section{Recommended Items} +\index[general]{Items!Recommended } +\index[general]{Recommended Items } + +Although these items may not be critical, they are recommended and will help +you avoid problems. + +\begin{itemize} +\item Read the \ilink{Quick Start Guide to Bacula}{QuickStartChapter} +\item After installing and experimenting with Bacula, read and work carefully + through the examples in the + \ilink{Tutorial}{TutorialChapter} chapter of this manual. +\item Learn what each of the \ilink{Bacula Utility Programs}{_UtilityChapter} + does. +\item Set up reasonable retention periods so that your catalog does not grow + to be too big. See the following three chapters:\\ + \ilink{Recycling your Volumes}{RecyclingChapter},\\ + \ilink{Basic Volume Management}{DiskChapter},\\ + \ilink{Using Pools to Manage Volumes}{PoolsChapter}. +\item Perform a bare metal recovery using the Bacula Rescue CDROM. See the + \ilink{Disaster Recovery Using a Bacula Rescue CDROM}{RescueChapter} + chapter. +\end{itemize} + +If you absolutely must implement a system where you write a different +tape each night and take it offsite in the morning. We recommend that you do +several things: +\begin{itemize} +\item Write a bootstrap file of your backed up data and a bootstrap file + of your catalog backup to a floppy disk or a CDROM, and take that with + the tape. If this is not possible, try to write those files to another + computer or offsite computer, or send them as email to a friend. If none + of that is possible, at least print the bootstrap files and take that + offsite with the tape. Having the bootstrap files will make recovery + much easier. +\item It is better not to force Bacula to load a particular tape each day. + Instead, let Bacula choose the tape. If you need to know what tape to + mount, you can print a list of recycled and appendable tapes daily, and + select any tape from that list. Bacula may propose a particular tape + for use that it considers optimal, but it will accept any valid tape + from the correct pool. +\end{itemize} diff --git a/docs/manuals/en/install/dirdconf.tex b/docs/manuals/en/install/dirdconf.tex new file mode 100644 index 00000000..c823d640 --- /dev/null +++ b/docs/manuals/en/install/dirdconf.tex @@ -0,0 +1,3377 @@ +%% +%% + +\chapter{Configuring the Director} +\label{DirectorChapter} +\index[general]{Director!Configuring the} +\index[general]{Configuring the Director} + +Of all the configuration files needed to run {\bf Bacula}, the Director's is +the most complicated, and the one that you will need to modify the most often +as you add clients or modify the FileSets. + +For a general discussion of configuration files and resources including the +data types recognized by {\bf Bacula}. Please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. + +\section{Director Resource Types} +\index[general]{Types!Director Resource} +\index[general]{Director Resource Types} + +Director resource type may be one of the following: + +Job, JobDefs, Client, Storage, Catalog, Schedule, FileSet, Pool, Director, or +Messages. We present them here in the most logical order for defining them: + +Note, everything revolves around a job and is tied to a job in one +way or another. + +\begin{itemize} +\item + \ilink{Director}{DirectorResource4} -- to define the Director's + name and its access password used for authenticating the Console program. + Only a single Director resource definition may appear in the Director's + configuration file. If you have either {\bf /dev/random} or {\bf bc} on your + machine, Bacula will generate a random password during the configuration + process, otherwise it will be left blank. +\item + \ilink{Job}{JobResource} -- to define the backup/restore Jobs + and to tie together the Client, FileSet and Schedule resources to be used + for each Job. Normally, you will Jobs of different names corresponding + to each client (i.e. one Job per client, but a different one with a different name + for each client). +\item + \ilink{JobDefs}{JobDefsResource} -- optional resource for + providing defaults for Job resources. +\item + \ilink{Schedule}{ScheduleResource} -- to define when a Job is to + be automatically run by {\bf Bacula's} internal scheduler. You + may have any number of Schedules, but each job will reference only + one. +\item + \ilink{FileSet}{FileSetResource} -- to define the set of files + to be backed up for each Client. You may have any number of + FileSets but each Job will reference only one. +\item + \ilink{Client}{ClientResource2} -- to define what Client is to be + backed up. You will generally have multiple Client definitions. Each + Job will reference only a single client. +\item + \ilink{Storage}{StorageResource2} -- to define on what physical + device the Volumes should be mounted. You may have one or + more Storage definitions. +\item + \ilink{Pool}{PoolResource} -- to define the pool of Volumes + that can be used for a particular Job. Most people use a + single default Pool. However, if you have a large number + of clients or volumes, you may want to have multiple Pools. + Pools allow you to restrict a Job (or a Client) to use + only a particular set of Volumes. +\item + \ilink{Catalog}{CatalogResource} -- to define in what database to + keep the list of files and the Volume names where they are backed up. + Most people only use a single catalog. However, if you want to + scale the Director to many clients, multiple catalogs can be helpful. + Multiple catalogs require a bit more management because in general + you must know what catalog contains what data. Currently, all + Pools are defined in each catalog. This restriction will be removed + in a later release. +\item + \ilink{Messages}{MessagesChapter} -- to define where error and + information messages are to be sent or logged. You may define + multiple different message resources and hence direct particular + classes of messages to different users or locations (files, ...). +\end{itemize} + +\section{The Director Resource} +\label{DirectorResource4} +\index[general]{Director Resource} +\index[general]{Resource!Director} + +The Director resource defines the attributes of the Directors running on the +network. In the current implementation, there is only a single Director +resource, but the final design will contain multiple Directors to maintain +index and media database redundancy. + +\begin{description} + +\item [Director] + \index[dir]{Director} + Start of the Director resource. One and only one director resource must be +supplied. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The director name used by the system administrator. This directive is +required. + +\item [Description = \lt{}text\gt{}] + \index[dir]{Description} + \index[dir]{Directive!Description} + The text field contains a description of the Director that will be displayed +in the graphical user interface. This directive is optional. + +\item [Password = \lt{}UA-password\gt{}] + \index[dir]{Password} + \index[dir]{Directive!Password} + Specifies the password that must be supplied for the default Bacula + Console to be authorized. The same password must appear in the {\bf + Director} resource of the Console configuration file. For added + security, the password is never passed across the network but instead a + challenge response hash code created with the password. This directive + is required. If you have either {\bf /dev/random} or {\bf bc} on your + machine, Bacula will generate a random password during the configuration + process, otherwise it will be left blank and you must manually supply + it. + + The password is plain text. It is not generated through any special + process but as noted above, it is better to use random text for + security reasons. + +\item [Messages = \lt{}Messages-resource-name\gt{}] + \index[dir]{Messages} + \index[dir]{Directive!Messages} + The messages resource specifies where to deliver Director messages that are + not associated with a specific Job. Most messages are specific to a job and + will be directed to the Messages resource specified by the job. However, + there are a few messages that can occur when no job is running. This + directive is required. + +\item [Working Directory = \lt{}Directory\gt{}] + \index[dir]{Working Directory} + \index[dir]{Directive!Working Directory} + This directive is mandatory and specifies a directory in which the Director + may put its status files. This directory should be used only by Bacula but + may be shared by other Bacula daemons. However, please note, if this + directory is shared with other Bacula daemons (the File daemon and Storage + daemon), you must ensure that the {\bf Name} given to each daemon is + unique so that the temporary filenames used do not collide. By default + the Bacula configure process creates unique daemon names by postfixing them + with -dir, -fd, and -sd. Standard shell expansion of the {\bf + Directory} is done when the configuration file is read so that values such + as {\bf \$HOME} will be properly expanded. This directive is required. + The working directory specified must already exist and be + readable and writable by the Bacula daemon referencing it. + + If you have specified a Director user and/or a Director group on your + ./configure line with {\bf {-}{-}with-dir-user} and/or + {\bf {-}{-}with-dir-group} the Working Directory owner and group will + be set to those values. + +\item [Pid Directory = \lt{}Directory\gt{}] + \index[dir]{Pid Directory} + \index[dir]{Directive!Pid Directory} + This directive is mandatory and specifies a directory in which the Director + may put its process Id file. The process Id file is used to shutdown + Bacula and to prevent multiple copies of Bacula from running simultaneously. + Standard shell expansion of the {\bf Directory} is done when the + configuration file is read so that values such as {\bf \$HOME} will be + properly expanded. + + The PID directory specified must already exist and be + readable and writable by the Bacula daemon referencing it + + Typically on Linux systems, you will set this to: {\bf /var/run}. If you are + not installing Bacula in the system directories, you can use the {\bf Working + Directory} as defined above. This directive is required. + +\item [Scripts Directory = \lt{}Directory\gt{}] + \index[dir]{Scripts Directory} + \index[dir]{Directive!Scripts Directory} + This directive is optional and, if defined, specifies a directory in + which the Director will look for the Python startup script {\bf + DirStartup.py}. This directory may be shared by other Bacula daemons. + Standard shell expansion of the directory is done when the configuration + file is read so that values such as {\bf \$HOME} will be properly + expanded. + +\item [QueryFile = \lt{}Path\gt{}] + \index[dir]{QueryFile} + \index[dir]{Directive!QueryFile} + This directive is mandatory and specifies a directory and file in which + the Director can find the canned SQL statements for the {\bf Query} + command of the Console. Standard shell expansion of the {\bf Path} is + done when the configuration file is read so that values such as {\bf + \$HOME} will be properly expanded. This directive is required. + +\label{DirMaxConJobs} +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] +\index[dir]{Maximum Concurrent Jobs} +\index[dir]{Directive!Maximum Concurrent Jobs} +\index[general]{Simultaneous Jobs} +\index[general]{Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of total Director Jobs that + should run concurrently. The default is set to 1, but you may set it to a + larger number. + + Please note that the Volume format becomes much more complicated with + multiple simultaneous jobs, consequently, restores can take much longer if + Bacula must sort through interleaved volume blocks from multiple simultaneous + jobs. This can be avoided by having each simultaneously running job write to + a different volume or by using data spooling, which will first spool the data + to disk simultaneously, then write each spool file to the volume in + sequence. + + There may also still be some cases where directives such as {\bf Maximum + Volume Jobs} are not properly synchronized with multiple simultaneous jobs + (subtle timing issues can arise), so careful testing is recommended. + + At the current time, there is no configuration parameter set to limit the + number of console connections. A maximum of five simultaneous console + connections are permitted. + +\item [FD Connect Timeout = \lt{}time\gt{}] + \index[dir]{FD Connect Timeout} + \index[dir]{Directive!FD Connect Timeout} + where {\bf time} is the time that the Director should continue + attempting to contact the File daemon to start a job, and after which + the Director will cancel the job. The default is 30 minutes. + +\item [SD Connect Timeout = \lt{}time\gt{}] + \index[dir]{SD Connect Timeout} + \index[dir]{Directive!SD Connect Timeout} + where {\bf time} is the time that the Director should continue + attempting to contact the Storage daemon to start a job, and after which + the Director will cancel the job. The default is 30 minutes. + +\item [DirAddresses = \lt{}IP-address-specification\gt{}] + \index[dir]{DirAddresses} + \index[dir]{Address} + \index[general]{Address} + \index[dir]{Directive!DirAddresses} + Specify the ports and addresses on which the Director daemon will listen + for Bacula Console connections. Probably the simplest way to explain + this is to show an example: + +\footnotesize +\begin{verbatim} + DirAddresses = { + ip = { addr = 1.2.3.4; port = 1205;} + ipv4 = { + addr = 1.2.3.4; port = http;} + ipv6 = { + addr = 1.2.3.4; + port = 1205; + } + ip = { + addr = 1.2.3.4 + port = 1205 + } + ip = { addr = 1.2.3.4 } + ip = { addr = 201:220:222::2 } + ip = { + addr = bluedot.thun.net + } +} +\end{verbatim} +\normalsize + +where ip, ip4, ip6, addr, and port are all keywords. Note, that the address +can be specified as either a dotted quadruple, or IPv6 colon notation, or as +a symbolic name (only in the ip specification). Also, port can be specified +as a number or as the mnemonic value from the /etc/services file. If a port +is not specified, the default will be used. If an ip section is specified, +the resolution can be made either by IPv4 or IPv6. If ip4 is specified, then +only IPv4 resolutions will be permitted, and likewise with ip6. + +Please note that if you use the DirAddresses directive, you must +not use either a DirPort or a DirAddress directive in the same +resource. + +\item [DirPort = \lt{}port-number\gt{}] + \index[dir]{DirPort} + \index[dir]{Directive!DirPort} + Specify the port (a positive integer) on which the Director daemon will + listen for Bacula Console connections. This same port number must be + specified in the Director resource of the Console configuration file. The + default is 9101, so normally this directive need not be specified. This + directive should not be used if you specify DirAddresses (not plural) + directive. + +\item [DirAddress = \lt{}IP-Address\gt{}] + \index[dir]{DirAddress} + \index[dir]{Directive!DirAddress} + This directive is optional, but if it is specified, it will cause the + Director server (for the Console program) to bind to the specified {\bf + IP-Address}, which is either a domain name or an IP address specified as a + dotted quadruple in string or quoted string format. If this directive is not + specified, the Director will bind to any available address (the default). + Note, unlike the DirAddresses specification noted above, this directive only + permits a single address to be specified. This directive should not be used if you + specify a DirAddresses (note plural) directive. + + + +\end{description} + +The following is an example of a valid Director resource definition: + +\footnotesize +\begin{verbatim} +Director { + Name = HeadMan + WorkingDirectory = "$HOME/bacula/bin/working" + Password = UA_password + PidDirectory = "$HOME/bacula/bin/working" + QueryFile = "$HOME/bacula/bin/query.sql" + Messages = Standard +} +\end{verbatim} +\normalsize + +\section{The Job Resource} +\label{JobResource} +\index[general]{Resource!Job} +\index[general]{Job Resource} + +The Job resource defines a Job (Backup, Restore, ...) that Bacula must +perform. Each Job resource definition contains the name of a Client and +a FileSet to backup, the Schedule for the Job, where the data +are to be stored, and what media Pool can be used. In effect, each Job +resource must specify What, Where, How, and When or FileSet, Storage, +Backup/Restore/Level, and Schedule respectively. Note, the FileSet must +be specified for a restore job for historical reasons, but it is no longer used. + +Only a single type ({\bf Backup}, {\bf Restore}, ...) can be specified for any +job. If you want to backup multiple FileSets on the same Client or multiple +Clients, you must define a Job for each one. + +Note, you define only a single Job to do the Full, Differential, and +Incremental backups since the different backup levels are tied together by +a unique Job name. Normally, you will have only one Job per Client, but +if a client has a really huge number of files (more than several million), +you might want to split it into to Jobs each with a different FileSet +covering only part of the total files. + + +\begin{description} + +\item [Job] + \index[dir]{Job} + \index[dir]{Directive!Job} + Start of the Job resource. At least one Job resource is required. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The Job name. This name can be specified on the {\bf Run} command in the + console program to start a job. If the name contains spaces, it must be + specified between quotes. It is generally a good idea to give your job the + same name as the Client that it will backup. This permits easy + identification of jobs. + + When the job actually runs, the unique Job Name will consist of the name you + specify here followed by the date and time the job was scheduled for + execution. This directive is required. + +\item [Enabled = \lt{}yes|no\gt{}] + \index[dir]{Enable} + \index[dir]{Directive!Enable} + This directive allows you to enable or disable automatic execution + via the scheduler of a Job. + +\item [Type = \lt{}job-type\gt{}] + \index[dir]{Type} + \index[dir]{Directive!Type} + The {\bf Type} directive specifies the Job type, which may be one of the + following: {\bf Backup}, {\bf Restore}, {\bf Verify}, or {\bf Admin}. This + directive is required. Within a particular Job Type, there are also Levels + as discussed in the next item. + +\begin{description} + +\item [Backup] + \index[dir]{Backup} + Run a backup Job. Normally you will have at least one Backup job for each + client you want to save. Normally, unless you turn off cataloging, most all + the important statistics and data concerning files backed up will be placed + in the catalog. + +\item [Restore] + \index[dir]{Restore} + Run a restore Job. Normally, you will specify only one Restore job + which acts as a sort of prototype that you will modify using the console + program in order to perform restores. Although certain basic + information from a Restore job is saved in the catalog, it is very + minimal compared to the information stored for a Backup job -- for + example, no File database entries are generated since no Files are + saved. + + {\bf Restore} jobs cannot be + automatically started by the scheduler as is the case for Backup, Verify + and Admin jobs. To restore files, you must use the {\bf restore} command + in the console. + + +\item [Verify] + \index[dir]{Verify} + Run a verify Job. In general, {\bf verify} jobs permit you to compare the + contents of the catalog to the file system, or to what was backed up. In + addition, to verifying that a tape that was written can be read, you can + also use {\bf verify} as a sort of tripwire intrusion detection. + +\item [Admin] + \index[dir]{Admin} + Run an admin Job. An {\bf Admin} job can be used to periodically run catalog + pruning, if you do not want to do it at the end of each {\bf Backup} Job. + Although an Admin job is recorded in the catalog, very little data is saved. +\end{description} + +\label{Level} + +\item [Level = \lt{}job-level\gt{}] +\index[dir]{Level} +\index[dir]{Directive!Level} + The Level directive specifies the default Job level to be run. Each + different Job Type (Backup, Restore, ...) has a different set of Levels + that can be specified. The Level is normally overridden by a different + value that is specified in the {\bf Schedule} resource. This directive + is not required, but must be specified either by a {\bf Level} directive + or as an override specified in the {\bf Schedule} resource. + +For a {\bf Backup} Job, the Level may be one of the following: + +\begin{description} + +\item [Full] +\index[dir]{Full} + When the Level is set to Full all files in the FileSet whether or not + they have changed will be backed up. + +\item [Incremental] + \index[dir]{Incremental} + When the Level is set to Incremental all files specified in the FileSet + that have changed since the last successful backup of the the same Job + using the same FileSet and Client, will be backed up. If the Director + cannot find a previous valid Full backup then the job will be upgraded + into a Full backup. When the Director looks for a valid backup record + in the catalog database, it looks for a previous Job with: + +\begin{itemize} +\item The same Job name. +\item The same Client name. +\item The same FileSet (any change to the definition of the FileSet such as + adding or deleting a file in the Include or Exclude sections constitutes a + different FileSet. +\item The Job was a Full, Differential, or Incremental backup. +\item The Job terminated normally (i.e. did not fail or was not canceled). +\end{itemize} + + If all the above conditions do not hold, the Director will upgrade the + Incremental to a Full save. Otherwise, the Incremental backup will be + performed as requested. + + The File daemon (Client) decides which files to backup for an + Incremental backup by comparing start time of the prior Job (Full, + Differential, or Incremental) against the time each file was last + "modified" (st\_mtime) and the time its attributes were last + "changed"(st\_ctime). If the file was modified or its attributes + changed on or after this start time, it will then be backed up. + + Some virus scanning software may change st\_ctime while + doing the scan. For example, if the virus scanning program attempts to + reset the access time (st\_atime), which Bacula does not use, it will + cause st\_ctime to change and hence Bacula will backup the file during + an Incremental or Differential backup. In the case of Sophos virus + scanning, you can prevent it from resetting the access time (st\_atime) + and hence changing st\_ctime by using the {\bf \verb:--:no-reset-atime} + option. For other software, please see their manual. + + When Bacula does an Incremental backup, all modified files that are + still on the system are backed up. However, any file that has been + deleted since the last Full backup remains in the Bacula catalog, which + means that if between a Full save and the time you do a restore, some + files are deleted, those deleted files will also be restored. The + deleted files will no longer appear in the catalog after doing another + Full save. However, to remove deleted files from the catalog during an + Incremental backup is quite a time consuming process and not currently + implemented in Bacula. + + In addition, if you move a directory rather than copy it, the files in + it do not have their modification time (st\_mtime) or their attribute + change time (st\_ctime) changed. As a consequence, those files will + probably not be backed up by an Incremental or Differential backup which + depend solely on these time stamps. If you move a directory, and wish + it to be properly backed up, it is generally preferable to copy it, then + delete the original. + +\item [Differential] + \index[dir]{Differential} + When the Level is set to Differential + all files specified in the FileSet that have changed since the last + successful Full backup of the same Job will be backed up. + If the Director cannot find a + valid previous Full backup for the same Job, FileSet, and Client, + backup, then the Differential job will be upgraded into a Full backup. + When the Director looks for a valid Full backup record in the catalog + database, it looks for a previous Job with: + +\begin{itemize} +\item The same Job name. +\item The same Client name. +\item The same FileSet (any change to the definition of the FileSet such as + adding or deleting a file in the Include or Exclude sections constitutes a + different FileSet. +\item The Job was a FULL backup. +\item The Job terminated normally (i.e. did not fail or was not canceled). +\end{itemize} + + If all the above conditions do not hold, the Director will upgrade the + Differential to a Full save. Otherwise, the Differential backup will be + performed as requested. + + The File daemon (Client) decides which files to backup for a + differential backup by comparing the start time of the prior Full backup + Job against the time each file was last "modified" (st\_mtime) and the + time its attributes were last "changed" (st\_ctime). If the file was + modified or its attributes were changed on or after this start time, it + will then be backed up. The start time used is displayed after the {\bf + Since} on the Job report. In rare cases, using the start time of the + prior backup may cause some files to be backed up twice, but it ensures + that no change is missed. As with the Incremental option, you should + ensure that the clocks on your server and client are synchronized or as + close as possible to avoid the possibility of a file being skipped. + Note, on versions 1.33 or greater Bacula automatically makes the + necessary adjustments to the time between the server and the client so + that the times Bacula uses are synchronized. + + When Bacula does a Differential backup, all modified files that are + still on the system are backed up. However, any file that has been + deleted since the last Full backup remains in the Bacula catalog, which + means that if between a Full save and the time you do a restore, some + files are deleted, those deleted files will also be restored. The + deleted files will no longer appear in the catalog after doing another + Full save. However, to remove deleted files from the catalog during a + Differential backup is quite a time consuming process and not currently + implemented in Bacula. It is, however, a planned future feature. + + As noted above, if you move a directory rather than copy it, the + files in it do not have their modification time (st\_mtime) or + their attribute change time (st\_ctime) changed. As a + consequence, those files will probably not be backed up by an + Incremental or Differential backup which depend solely on these + time stamps. If you move a directory, and wish it to be + properly backed up, it is generally preferable to copy it, then + delete the original. Alternatively, you can move the directory, then + use the {\bf touch} program to update the timestamps. + + Every once and a while, someone asks why we need Differential + backups as long as Incremental backups pickup all changed files. + There are possibly many answers to this question, but the one + that is the most important for me is that a Differential backup + effectively merges + all the Incremental and Differential backups since the last Full backup + into a single Differential backup. This has two effects: 1. It gives + some redundancy since the old backups could be used if the merged backup + cannot be read. 2. More importantly, it reduces the number of Volumes + that are needed to do a restore effectively eliminating the need to read + all the volumes on which the preceding Incremental and Differential + backups since the last Full are done. + +\end{description} + +For a {\bf Restore} Job, no level needs to be specified. + +For a {\bf Verify} Job, the Level may be one of the following: + +\begin{description} + +\item [InitCatalog] +\index[dir]{InitCatalog} + does a scan of the specified {\bf FileSet} and stores the file + attributes in the Catalog database. Since no file data is saved, you + might ask why you would want to do this. It turns out to be a very + simple and easy way to have a {\bf Tripwire} like feature using {\bf + Bacula}. In other words, it allows you to save the state of a set of + files defined by the {\bf FileSet} and later check to see if those files + have been modified or deleted and if any new files have been added. + This can be used to detect system intrusion. Typically you would + specify a {\bf FileSet} that contains the set of system files that + should not change (e.g. /sbin, /boot, /lib, /bin, ...). Normally, you + run the {\bf InitCatalog} level verify one time when your system is + first setup, and then once again after each modification (upgrade) to + your system. Thereafter, when your want to check the state of your + system files, you use a {\bf Verify} {\bf level = Catalog}. This + compares the results of your {\bf InitCatalog} with the current state of + the files. + +\item [Catalog] +\index[dir]{Catalog} + Compares the current state of the files against the state previously + saved during an {\bf InitCatalog}. Any discrepancies are reported. The + items reported are determined by the {\bf verify} options specified on + the {\bf Include} directive in the specified {\bf FileSet} (see the {\bf + FileSet} resource below for more details). Typically this command will + be run once a day (or night) to check for any changes to your system + files. + + Please note! If you run two Verify Catalog jobs on the same client at + the same time, the results will certainly be incorrect. This is because + Verify Catalog modifies the Catalog database while running in order to + track new files. + +\item [VolumeToCatalog] +\index[dir]{VolumeToCatalog} + This level causes Bacula to read the file attribute data written to the + Volume from the last Job. The file attribute data are compared to the + values saved in the Catalog database and any differences are reported. + This is similar to the {\bf Catalog} level except that instead of + comparing the disk file attributes to the catalog database, the + attribute data written to the Volume is read and compared to the catalog + database. Although the attribute data including the signatures (MD5 or + SHA1) are compared, the actual file data is not compared (it is not in + the catalog). + + Please note! If you run two Verify VolumeToCatalog jobs on the same + client at the same time, the results will certainly be incorrect. This + is because the Verify VolumeToCatalog modifies the Catalog database + while running. + +\item [DiskToCatalog] +\index[dir]{DiskToCatalog} + This level causes Bacula to read the files as they currently are on + disk, and to compare the current file attributes with the attributes + saved in the catalog from the last backup for the job specified on the + {\bf VerifyJob} directive. This level differs from the {\bf Catalog} + level described above by the fact that it doesn't compare against a + previous Verify job but against a previous backup. When you run this + level, you must supply the verify options on your Include statements. + Those options determine what attribute fields are compared. + + This command can be very useful if you have disk problems because it + will compare the current state of your disk against the last successful + backup, which may be several jobs. + + Note, the current implementation (1.32c) does not identify files that + have been deleted. +\end{description} + +\item [Verify Job = \lt{}Job-Resource-Name\gt{}] + \index[dir]{Verify Job} + \index[dir]{Directive!Verify Job} + If you run a verify job without this directive, the last job run will be + compared with the catalog, which means that you must immediately follow + a backup by a verify command. If you specify a {\bf Verify Job} Bacula + will find the last job with that name that ran. This permits you to run + all your backups, then run Verify jobs on those that you wish to be + verified (most often a {\bf VolumeToCatalog}) so that the tape just + written is re-read. + +\item [JobDefs = \lt{}JobDefs-Resource-Name\gt{}] +\index[dir]{JobDefs} +\index[dir]{Directive!JobDefs} + If a JobDefs-Resource-Name is specified, all the values contained in the + named JobDefs resource will be used as the defaults for the current Job. + Any value that you explicitly define in the current Job resource, will + override any defaults specified in the JobDefs resource. The use of + this directive permits writing much more compact Job resources where the + bulk of the directives are defined in one or more JobDefs. This is + particularly useful if you have many similar Jobs but with minor + variations such as different Clients. A simple example of the use of + JobDefs is provided in the default bacula-dir.conf file. + +\item [Bootstrap = \lt{}bootstrap-file\gt{}] +\index[dir]{Bootstrap} +\index[dir]{Directive!Bootstrap} + The Bootstrap directive specifies a bootstrap file that, if provided, + will be used during {\bf Restore} Jobs and is ignored in other Job + types. The {\bf bootstrap} file contains the list of tapes to be used + in a restore Job as well as which files are to be restored. + Specification of this directive is optional, and if specified, it is + used only for a restore job. In addition, when running a Restore job + from the console, this value can be changed. + + If you use the {\bf Restore} command in the Console program, to start a + restore job, the {\bf bootstrap} file will be created automatically from + the files you select to be restored. + + For additional details of the {\bf bootstrap} file, please see + \ilink{Restoring Files with the Bootstrap File}{BootstrapChapter} chapter + of this manual. + +\label{writebootstrap} +\item [Write Bootstrap = \lt{}bootstrap-file-specification\gt{}] +\index[dir]{Write Bootstrap} +\index[dir]{Directive!Write Bootstrap} + The {\bf writebootstrap} directive specifies a file name where Bacula + will write a {\bf bootstrap} file for each Backup job run. This + directive applies only to Backup Jobs. If the Backup job is a Full + save, Bacula will erase any current contents of the specified file + before writing the bootstrap records. If the Job is an Incremental + or Differential + save, Bacula will append the current bootstrap record to the end of the + file. + + Using this feature, permits you to constantly have a bootstrap file that + can recover the current state of your system. Normally, the file + specified should be a mounted drive on another machine, so that if your + hard disk is lost, you will immediately have a bootstrap record + available. Alternatively, you should copy the bootstrap file to another + machine after it is updated. Note, it is a good idea to write a separate + bootstrap file for each Job backed up including the job that backs up + your catalog database. + + If the {\bf bootstrap-file-specification} begins with a vertical bar + (|), Bacula will use the specification as the name of a program to which + it will pipe the bootstrap record. It could for example be a shell + script that emails you the bootstrap record. + + On versions 1.39.22 or greater, before opening the file or executing the + specified command, Bacula performs + \ilink{character substitution}{character substitution} like in RunScript + directive. To automatically manage your bootstrap files, you can use + this in your {\bf JobDefs} resources: +\begin{verbatim} +JobDefs { + Write Bootstrap = "%c_%n.bsr" + ... +} +\end{verbatim} + + For more details on using this file, please see the chapter entitled + \ilink{The Bootstrap File}{BootstrapChapter} of this manual. + +\item [Client = \lt{}client-resource-name\gt{}] +\index[dir]{Client} +\index[dir]{Directive!Client} + The Client directive specifies the Client (File daemon) that will be used in + the current Job. Only a single Client may be specified in any one Job. The + Client runs on the machine to be backed up, and sends the requested files to + the Storage daemon for backup, or receives them when restoring. For + additional details, see the + \ilink{Client Resource section}{ClientResource2} of this chapter. + This directive is required. + +\item [FileSet = \lt{}FileSet-resource-name\gt{}] +\index[dir]{FileSet} +\index[dir]{FileSet} + The FileSet directive specifies the FileSet that will be used in the + current Job. The FileSet specifies which directories (or files) are to + be backed up, and what options to use (e.g. compression, ...). Only a + single FileSet resource may be specified in any one Job. For additional + details, see the \ilink{FileSet Resource section}{FileSetResource} of + this chapter. This directive is required. + +\item [Messages = \lt{}messages-resource-name\gt{}] +\index[dir]{Messages} +\index[dir]{Directive!Messages} + The Messages directive defines what Messages resource should be used for + this job, and thus how and where the various messages are to be + delivered. For example, you can direct some messages to a log file, and + others can be sent by email. For additional details, see the + \ilink{Messages Resource}{MessagesChapter} Chapter of this manual. This + directive is required. + +\item [Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Pool} +\index[dir]{Directive!Pool} + The Pool directive defines the pool of Volumes where your data can be + backed up. Many Bacula installations will use only the {\bf Default} + pool. However, if you want to specify a different set of Volumes for + different Clients or different Jobs, you will probably want to use + Pools. For additional details, see the \ilink{Pool Resource + section}{PoolResource} of this chapter. This directive is required. + +\item [Full Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Full Backup Pool} +\index[dir]{Directive!Full Backup Pool} + The {\it Full Backup Pool} specifies a Pool to be used for Full backups. + It will override any Pool specification during a Full backup. This + directive is optional. + +\item [Differential Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Differential Backup Pool} +\index[dir]{Directive!Differential Backup Pool} + The {\it Differential Backup Pool} specifies a Pool to be used for + Differential backups. It will override any Pool specification during a + Differential backup. This directive is optional. + +\item [Incremental Backup Pool = \lt{}pool-resource-name\gt{}] +\index[dir]{Incremental Backup Pool} +\index[dir]{Directive!Incremental Backup Pool} + The {\it Incremental Backup Pool} specifies a Pool to be used for + Incremental backups. It will override any Pool specification during an + Incremental backup. This directive is optional. + +\item [Schedule = \lt{}schedule-name\gt{}] +\index[dir]{Schedule} +\index[dir]{Directive!Schedule} + The Schedule directive defines what schedule is to be used for the Job. + The schedule in turn determines when the Job will be automatically + started and what Job level (i.e. Full, Incremental, ...) is to be run. + This directive is optional, and if left out, the Job can only be started + manually using the Console program. Although you may specify only a + single Schedule resource for any one job, the Schedule resource may + contain multiple {\bf Run} directives, which allow you to run the Job at + many different times, and each {\bf run} directive permits overriding + the default Job Level Pool, Storage, and Messages resources. This gives + considerable flexibility in what can be done with a single Job. For + additional details, see the \ilink{Schedule Resource + Chapter}{ScheduleResource} of this manual. + + +\item [Storage = \lt{}storage-resource-name\gt{}] +\index[dir]{Storage} +\index[dir]{Directive!Storage} + The Storage directive defines the name of the storage services where you + want to backup the FileSet data. For additional details, see the + \ilink{Storage Resource Chapter}{StorageResource2} of this manual. + The Storage resource may also be specified in the Job's Pool resource, + in which case the value in the Pool resource overrides any value + in the Job. This Storage resource definition is not required by either + the Job resource or in the Pool, but it must be specified in + one or the other, if not an error will result. + +\item [Max Start Delay = \lt{}time\gt{}] +\index[dir]{Max Start Delay} +\index[dir]{Directive!Max Start Delay} + The time specifies the maximum delay between the scheduled time and the + actual start time for the Job. For example, a job can be scheduled to + run at 1:00am, but because other jobs are running, it may wait to run. + If the delay is set to 3600 (one hour) and the job has not begun to run + by 2:00am, the job will be canceled. This can be useful, for example, + to prevent jobs from running during day time hours. The default is 0 + which indicates no limit. + +\item [Max Run Time = \lt{}time\gt{}] +\index[dir]{Max Run Time} +\index[dir]{Directive!Max Run Time} + The time specifies the maximum allowed time that a job may run, counted + from when the job starts, ({\bf not} necessarily the same as when the + job was scheduled). This directive is implemented in version 1.33 and + later. + +\item [Max Wait Time = \lt{}time\gt{}] +\index[dir]{Max Wait Time} +\index[dir]{Directive!Max Wait Time} + The time specifies the maximum allowed time that a job may block waiting + for a resource (such as waiting for a tape to be mounted, or waiting for + the storage or file daemons to perform their duties), counted from the + when the job starts, ({\bf not} necessarily the same as when the job was + scheduled). This directive is implemented only in version 1.33 and + later. + +\item [Incremental Max Wait Time = \lt{}time\gt{}] +\index[dir]{Incremental Max Wait Time} +\index[dir]{Directive!Incremental Max Wait Time} + The time specifies the maximum allowed time that an Incremental backup + job may block waiting for a resource (such as waiting for a tape to be + mounted, or waiting for the storage or file daemons to perform their + duties), counted from the when the job starts, ({\bf not} necessarily + the same as when the job was scheduled). Please note that if there is a + {\bf Max Wait Time} it may also be applied to the job. + +\item [Differential Max Wait Time = \lt{}time\gt{}] +\index[dir]{Differential Max Wait Time} +\index[dir]{Directive!Differential Max Wait Time} + The time specifies the maximum allowed time that a Differential backup + job may block waiting for a resource (such as waiting for a tape to be + mounted, or waiting for the storage or file daemons to perform their + duties), counted from the when the job starts, ({\bf not} necessarily + the same as when the job was scheduled). Please note that if there is a + {\bf Max Wait Time} it may also be applied to the job. + +\label{PreferMountedVolumes} +\item [Prefer Mounted Volumes = \lt{}yes|no\gt{}] +\index[dir]{Prefer Mounted Volumes} +\index[dir]{Directive!Prefer Mounted Volumes} + If the Prefer Mounted Volumes directive is set to {\bf yes} (default + yes), the Storage daemon is requested to select either an Autochanger or + a drive with a valid Volume already mounted in preference to a drive + that is not ready. This means that all jobs will attempt to append + to the same Volume (providing the Volume is appropriate -- right Pool, + ... for that job). If no drive with a suitable Volume is available, it + will select the first available drive. Note, any Volume that has + been requested to be mounted, will be considered valid as a mounted + volume by another job. This if multiple jobs start at the same time + and they all prefer mounted volumes, the first job will request the + mount, and the other jobs will use the same volume. + + If the directive is set to {\bf no}, the Storage daemon will prefer + finding an unused drive, otherwise, each job started will append to the + same Volume (assuming the Pool is the same for all jobs). Setting + Prefer Mounted Volumes to no can be useful for those sites + with multiple drive autochangers that prefer to maximize backup + throughput at the expense of using additional drives and Volumes. + This means that the job will prefer to use an unused drive rather + than use a drive that is already in use. + +\item [Prune Jobs = \lt{}yes|no\gt{}] +\index[dir]{Prune Jobs} +\index[dir]{Directive!Prune Jobs} + Normally, pruning of Jobs from the Catalog is specified on a Client by + Client basis in the Client resource with the {\bf AutoPrune} directive. + If this directive is specified (not normally) and the value is {\bf + yes}, it will override the value specified in the Client resource. The + default is {\bf no}. + + +\item [Prune Files = \lt{}yes|no\gt{}] +\index[dir]{Prune Files} +\index[dir]{Directive!Prune Files} + Normally, pruning of Files from the Catalog is specified on a Client by + Client basis in the Client resource with the {\bf AutoPrune} directive. + If this directive is specified (not normally) and the value is {\bf + yes}, it will override the value specified in the Client resource. The + default is {\bf no}. + +\item [Prune Volumes = \lt{}yes|no\gt{}] +\index[dir]{Prune Volumes} +\index[dir]{Directive!Prune Volumes} + Normally, pruning of Volumes from the Catalog is specified on a Client + by Client basis in the Client resource with the {\bf AutoPrune} + directive. If this directive is specified (not normally) and the value + is {\bf yes}, it will override the value specified in the Client + resource. The default is {\bf no}. + +\item [RunScript \{\lt{}body-of-runscript\gt{}\}] + \index[dir]{RunScript} + \index[dir]{Directive!Run Script} + + This directive is implemented in version 1.39.22 and later. + The RunScript directive behaves like a resource in that it + requires opening and closing braces around a number of directives + that make up the body of the runscript. + + The specified {\bf Command} (see below for details) is run as an + external program prior or after the current Job. This is optional. + + You can use following options may be specified in the body + of the runscript:\\ + +\begin{tabular}{|c|c|c|l} +Options & Value & Default & Information \\ +\hline +\hline +Runs On Success & Yes/No & {\it Yes} & Run command if JobStatus is successful\\ +\hline +Runs On Failure & Yes/No & {\it No} & Run command if JobStatus isn't successful\\ +\hline +Runs On Client & Yes/No & {\it Yes} & Run command on client\\ +\hline +Runs When & Before|After|Always & {\it Never} & When run commands\\ +\hline +Fail Job On Error & Yes/No & {\it Yes} & Fail job if script returns + something different from 0 \\ +\hline +Command & & & Path to your script\\ +\hline +\end{tabular} + \\ + + Any output sent by the command to standard output will be included in the + Bacula job report. The command string must be a valid program name or name + of a shell script. + + In addition, the command string is parsed then fed to the OS, + which means that the path will be searched to execute your specified + command, but there is no shell interpretation, as a consequence, if you + invoke complicated commands or want any shell features such as redirection + or piping, you must call a shell script and do it inside that script. + + Before submitting the specified command to the operating system, Bacula + performs character substitution of the following characters: + +\label{character substitution} +\footnotesize +\begin{verbatim} + %% = % + %c = Client's name + %d = Director's name + %e = Job Exit Status + %i = JobId + %j = Unique Job id + %l = Job Level + %n = Job name + %s = Since time + %t = Job type (Backup, ...) + %v = Volume name + +\end{verbatim} +\normalsize + +The Job Exit Status code \%e edits the following values: + +\index[dir]{Exit Status} +\begin{itemize} +\item OK +\item Error +\item Fatal Error +\item Canceled +\item Differences +\item Unknown term code +\end{itemize} + + Thus if you edit it on a command line, you will need to enclose + it within some sort of quotes. + + +You can use these following shortcuts:\\ + +\begin{tabular}{|c|c|c|c|c|c} +Keyword & RunsOnSuccess & RunsOnFailure & FailJobOnError & Runs On Client & RunsWhen \\ +\hline +Run Before Job & & & Yes & No & Before \\ +\hline +Run After Job & Yes & No & & No & After \\ +\hline +Run After Failed Job & No & Yes & & No & After \\ +\hline +Client Run Before Job & & & Yes & Yes & Before \\ +\hline +Client Run After Job & Yes & No & & Yes & After \\ +\end{tabular} + +Examples: +\begin{verbatim} +RunScript { + RunsWhen = Before + FailJobOnError = No + Command = "/etc/init.d/apache stop" +} + +RunScript { + RunsWhen = After + RunsOnFailure = yes + Command = "/etc/init.d/apache start" +} +\end{verbatim} + + {\bf Special Windows Considerations} + + In addition, for a Windows client on version 1.33 and above, please take + note that you must ensure a correct path to your script. The script or + program can be a .com, .exe or a .bat file. If you just put the program + name in then Bacula will search using the same rules that cmd.exe uses + (current directory, Bacula bin directory, and PATH). It will even try the + different extensions in the same order as cmd.exe. + The command can be anything that cmd.exe or command.com will recognize + as an executable file. + + However, if you have slashes in the program name then Bacula figures you + are fully specifying the name, so you must also explicitly add the three + character extension. + + The command is run in a Win32 environment, so Unix like commands will not + work unless you have installed and properly configured Cygwin in addition + to and separately from Bacula. + + The System \%Path\% will be searched for the command. (under the + environment variable dialog you have have both System Environment and + User Environment, we believe that only the System environment will be + available to bacula-fd, if it is running as a service.) + + System environment variables can be referenced with \%var\% and + used as either part of the command name or arguments. + + So if you have a script in the Bacula\\bin directory then the following lines + should work fine: + +\footnotesize +\begin{verbatim} + Client Run Before Job = systemstate +or + Client Run Before Job = systemstate.bat +or + Client Run Before Job = "systemstate" +or + Client Run Before Job = "systemstate.bat" +or + ClientRunBeforeJob = "\"C:/Program Files/Bacula/systemstate.bat\"" +\end{verbatim} +\normalsize + +The outer set of quotes is removed when the configuration file is parsed. +You need to escape the inner quotes so that they are there when the code +that parses the command line for execution runs so it can tell what the +program name is. + + +\footnotesize +\begin{verbatim} +ClientRunBeforeJob = "\"C:/Program Files/Software + Vendor/Executable\" /arg1 /arg2 \"foo bar\"" +\end{verbatim} +\normalsize + + The special characters +\begin{verbatim} +&<>()@^| +\end{verbatim} + will need to be quoted, + if they are part of a filename or argument. + + If someone is logged in, a blank "command" window running the commands + will be present during the execution of the command. + + Some Suggestions from Phil Stracchino for running on Win32 machines with + the native Win32 File daemon: + + \begin{enumerate} + \item You might want the ClientRunBeforeJob directive to specify a .bat + file which runs the actual client-side commands, rather than trying + to run (for example) regedit /e directly. + \item The batch file should explicitly 'exit 0' on successful completion. + \item The path to the batch file should be specified in Unix form: + + ClientRunBeforeJob = "c:/bacula/bin/systemstate.bat" + + rather than DOS/Windows form: + + ClientRunBeforeJob = + +"c:\textbackslash{}bacula\textbackslash{}bin\textbackslash{}systemstate.bat" + INCORRECT + \end{enumerate} + +For Win32, please note that there are certain limitations: + +ClientRunBeforeJob = "C:/Program Files/Bacula/bin/pre-exec.bat" + +Lines like the above do not work because there are limitations of +cmd.exe that is used to execute the command. +Bacula prefixes the string you supply with {\bf cmd.exe /c }. To test that +your command works you should type {\bf cmd /c "C:/Program Files/test.exe"} at a +cmd prompt and see what happens. Once the command is correct insert a +backslash (\textbackslash{}) before each double quote ("), and +then put quotes around the whole thing when putting it in +the director's .conf file. You either need to have only one set of quotes +or else use the short name and don't put quotes around the command path. + +Below is the output from cmd's help as it relates to the command line +passed to the /c option. + + + If /C or /K is specified, then the remainder of the command line after + the switch is processed as a command line, where the following logic is + used to process quote (") characters: + +\begin{enumerate} +\item + If all of the following conditions are met, then quote characters + on the command line are preserved: + \begin{itemize} + \item no /S switch. + \item exactly two quote characters. + \item no special characters between the two quote characters, + where special is one of: +\begin{verbatim} +&<>()@^| +\end{verbatim} + \item there are one or more whitespace characters between the + the two quote characters. + \item the string between the two quote characters is the name + of an executable file. + \end{itemize} + +\item Otherwise, old behavior is to see if the first character is + a quote character and if so, strip the leading character and + remove the last quote character on the command line, preserving + any text after the last quote character. + +\end{enumerate} + + +The following example of the use of the Client Run Before Job directive was +submitted by a user:\\ +You could write a shell script to back up a DB2 database to a FIFO. The shell +script is: + +\footnotesize +\begin{verbatim} + #!/bin/sh + # ===== backupdb.sh + DIR=/u01/mercuryd + + mkfifo $DIR/dbpipe + db2 BACKUP DATABASE mercuryd TO $DIR/dbpipe WITHOUT PROMPTING & + sleep 1 +\end{verbatim} +\normalsize + +The following line in the Job resource in the bacula-dir.conf file: +\footnotesize +\begin{verbatim} + Client Run Before Job = "su - mercuryd -c \"/u01/mercuryd/backupdb.sh '%t' +'%l'\"" +\end{verbatim} +\normalsize + +When the job is run, you will get messages from the output of the script +stating that the backup has started. Even though the command being run is +backgrounded with \&, the job will block until the "db2 BACKUP DATABASE" +command, thus the backup stalls. + +To remedy this situation, the "db2 BACKUP DATABASE" line should be changed to +the following: + +\footnotesize +\begin{verbatim} + db2 BACKUP DATABASE mercuryd TO $DIR/dbpipe WITHOUT PROMPTING > $DIR/backup.log +2>&1 < /dev/null & +\end{verbatim} +\normalsize + +It is important to redirect the input and outputs of a backgrounded command to +/dev/null to prevent the script from blocking. + +\item [Run Before Job = \lt{}command\gt{}] +\index[dir]{Run Before Job} +\index[dir]{Directive!Run Before Job} +\index[dir]{Directive!Run Before Job} +The specified {\bf command} is run as an external program prior to running the +current Job. This directive is not required, but if it is defined, and if the +exit code of the program run is non-zero, the current Bacula job will be +canceled. + +\begin{verbatim} +Run Before Job = "echo test" +\end{verbatim} + it's equivalent to : +\begin{verbatim} +RunScript { + Command = "echo test" + RunsOnClient = No + RunsWhen = Before +} +\end{verbatim} + + Lutz Kittler has pointed out that using the RunBeforeJob directive can be a + simple way to modify your schedules during a holiday. For example, suppose + that you normally do Full backups on Fridays, but Thursday and Friday are + holidays. To avoid having to change tapes between Thursday and Friday when + no one is in the office, you can create a RunBeforeJob that returns a + non-zero status on Thursday and zero on all other days. That way, the + Thursday job will not run, and on Friday the tape you inserted on Wednesday + before leaving will be used. + +\item [Run After Job = \lt{}command\gt{}] +\index[dir]{Run After Job} +\index[dir]{Directive!Run After Job} + The specified {\bf command} is run as an external program if the current + job terminates normally (without error or without being canceled). This + directive is not required. If the exit code of the program run is + non-zero, Bacula will print a warning message. Before submitting the + specified command to the operating system, Bacula performs character + substitution as described above for the {\bf RunScript} directive. + + An example of the use of this directive is given in the + \ilink{Tips Chapter}{JobNotification} of this manual. + + See the {\bf Run After Failed Job} if you + want to run a script after the job has terminated with any + non-normal status. + +\item [Run After Failed Job = \lt{}command\gt{}] +\index[dir]{Run After Job} +\index[dir]{Directive!Run After Job} + The specified {\bf command} is run as an external program after the current + job terminates with any error status. This directive is not required. The + command string must be a valid program name or name of a shell script. If + the exit code of the program run is non-zero, Bacula will print a + warning message. Before submitting the specified command to the + operating system, Bacula performs character substitution as described above + for the {\bf RunScript} directive. Note, if you wish that your script + will run regardless of the exit status of the Job, you can use this : +\begin{verbatim} +RunScript { + Command = "echo test" + RunsWhen = After + RunsOnFailure = yes + RunsOnClient = no + RunsOnSuccess = yes # default, you can drop this line +} +\end{verbatim} + + An example of the use of this directive is given in the + \ilink{Tips Chapter}{JobNotification} of this manual. + + +\item [Client Run Before Job = \lt{}command\gt{}] +\index[dir]{Client Run Before Job} +\index[dir]{Directive!Client Run Before Job} + This directive is the same as {\bf Run Before Job} except that the + program is run on the client machine. The same restrictions apply to + Unix systems as noted above for the {\bf RunScript}. + +\item [Client Run After Job = \lt{}command\gt{}] + \index[dir]{Client Run After Job} + \index[dir]{Directive!Client Run After Job} + The specified {\bf command} is run on the client machine as soon + as data spooling is complete in order to allow restarting applications + on the client as soon as possible. . + + Note, please see the notes above in {\bf RunScript} + concerning Windows clients. + +\item [Rerun Failed Levels = \lt{}yes|no\gt{}] + \index[dir]{Rerun Failed Levels} + \index[dir]{Directive!Rerun Failed Levels} + If this directive is set to {\bf yes} (default no), and Bacula detects that + a previous job at a higher level (i.e. Full or Differential) has failed, + the current job level will be upgraded to the higher level. This is + particularly useful for Laptops where they may often be unreachable, and if + a prior Full save has failed, you wish the very next backup to be a Full + save rather than whatever level it is started as. + + There are several points that must be taken into account when using this + directive: first, a failed job is defined as one that has not terminated + normally, which includes any running job of the same name (you need to + ensure that two jobs of the same name do not run simultaneously); + secondly, the {\bf Ignore FileSet Changes} directive is not considered + when checking for failed levels, which means that any FileSet change will + trigger a rerun. + +\item [Spool Data = \lt{}yes|no\gt{}] + \index[dir]{Spool Data} + \index[dir]{Directive!Spool Data} + + If this directive is set to {\bf yes} (default no), the Storage daemon will + be requested to spool the data for this Job to disk rather than write it + directly to tape. Once all the data arrives or the spool files' maximum sizes + are reached, the data will be despooled and written to tape. Spooling data + prevents tape shoe-shine (start and stop) during + Incremental saves. If you are writing to a disk file using this option + will probably just slow down the backup jobs. + + NOTE: When this directive is set to yes, Spool Attributes is also + automatically set to yes. + +\item [Spool Attributes = \lt{}yes|no\gt{}] + \index[dir]{Spool Attributes} + \index[dir]{Directive!Spool Attributes} + \index[dir]{slow} + \index[general]{slow} + \index[dir]{Backups!slow} + \index[general]{Backups!slow} + The default is set to {\bf no}, which means that the File attributes are + sent by the Storage daemon to the Director as they are stored on tape. + However, if you want to avoid the possibility that database updates will + slow down writing to the tape, you may want to set the value to {\bf + yes}, in which case the Storage daemon will buffer the File attributes + and Storage coordinates to a temporary file in the Working Directory, + then when writing the Job data to the tape is completed, the attributes + and storage coordinates will be sent to the Director. + + NOTE: When Spool Data is set to yes, Spool Attributes is also + automatically set to yes. + +\item [Where = \lt{}directory\gt{}] + \index[dir]{Where} + \index[dir]{Directive!Where} + This directive applies only to a Restore job and specifies a prefix to + the directory name of all files being restored. This permits files to + be restored in a different location from which they were saved. If {\bf + Where} is not specified or is set to backslash ({\bf /}), the files will + be restored to their original location. By default, we have set {\bf + Where} in the example configuration files to be {\bf + /tmp/bacula-restores}. This is to prevent accidental overwriting of + your files. + +\item [Add Prefix = \lt{}directory\gt{}] + \label{confaddprefix} + \index[dir]{AddPrefix} + \index[dir]{Directive!AddPrefix} + This directive applies only to a Restore job and specifies a prefix to the + directory name of all files being restored. This will use \ilink{File + Relocation}{filerelocation} feature implemented in Bacula 2.1.8 or later. + +\item [Add Suffix = \lt{}extention\gt{}] + \index[dir]{AddSuffix} + \index[dir]{Directive!AddSuffix} + This directive applies only to a Restore job and specifies a suffix to all + files being restored. This will use \ilink{File Relocation}{filerelocation} + feature implemented in Bacula 2.1.8 or later. + + Using \texttt{Add Suffix=.old}, \texttt{/etc/passwd} will be restored to + \texttt{/etc/passwsd.old} + +\item [Strip Prefix = \lt{}directory\gt{}] + \index[dir]{StripPrefix} + \index[dir]{Directive!StripPrefix} + This directive applies only to a Restore job and specifies a prefix to remove + from the directory name of all files being restored. This will use the + \ilink{File Relocation}{filerelocation} feature implemented in Bacula 2.1.8 + or later. + + Using \texttt{Strip Prefix=/etc}, \texttt{/etc/passwd} will be restored to + \texttt{/passwd} + + Under Windows, if you want to restore \texttt{c:/files} to \texttt{d:/files}, + you can use : + +\begin{verbatim} + Strip Prefix = c: + Add Prefix = d: +\end{verbatim} + +\item [RegexWhere = \lt{}expressions\gt{}] + \index[dir]{RegexWhere} + \index[dir]{Directive!RegexWhere} + This directive applies only to a Restore job and specifies a regex filename + manipulation of all files being restored. This will use \ilink{File + Relocation}{filerelocation} feature implemented in Bacula 2.1.8 or later. + + For more informations about how use this option, see + \ilink{this}{useregexwhere}. + +\item [Replace = \lt{}replace-option\gt{}] + \index[dir]{Replace} + \index[dir]{Directive!Replace} + This directive applies only to a Restore job and specifies what happens + when Bacula wants to restore a file or directory that already exists. + You have the following options for {\bf replace-option}: + +\begin{description} + +\item [always] + \index[dir]{always} + when the file to be restored already exists, it is deleted and then + replaced by the copy that was backed up. + +\item [ifnewer] +\index[dir]{ifnewer} + if the backed up file (on tape) is newer than the existing file, the + existing file is deleted and replaced by the back up. + +\item [ifolder] + \index[dir]{ifolder} + if the backed up file (on tape) is older than the existing file, the + existing file is deleted and replaced by the back up. + +\item [never] + \index[dir]{never} + if the backed up file already exists, Bacula skips restoring this file. +\end{description} + +\item [Prefix Links=\lt{}yes|no\gt{}] + \index[dir]{Prefix Links} + \index[dir]{Directive!Prefix Links} + If a {\bf Where} path prefix is specified for a recovery job, apply it + to absolute links as well. The default is {\bf No}. When set to {\bf + Yes} then while restoring files to an alternate directory, any absolute + soft links will also be modified to point to the new alternate + directory. Normally this is what is desired -- i.e. everything is self + consistent. However, if you wish to later move the files to their + original locations, all files linked with absolute names will be broken. + +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] + \index[dir]{Maximum Concurrent Jobs} + \index[dir]{Directive!Maximum Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of Jobs from the current + Job resource that can run concurrently. Note, this directive limits + only Jobs with the same name as the resource in which it appears. Any + other restrictions on the maximum concurrent jobs such as in the + Director, Client, or Storage resources will also apply in addition to + the limit specified here. The default is set to 1, but you may set it + to a larger number. We strongly recommend that you read the WARNING + documented under \ilink{ Maximum Concurrent Jobs}{DirMaxConJobs} in the + Director's resource. + +\item [Reschedule On Error = \lt{}yes|no\gt{}] + \index[dir]{Reschedule On Error} + \index[dir]{Directive!Reschedule On Error} + If this directive is enabled, and the job terminates in error, the job + will be rescheduled as determined by the {\bf Reschedule Interval} and + {\bf Reschedule Times} directives. If you cancel the job, it will not + be rescheduled. The default is {\bf no} (i.e. the job will not be + rescheduled). + + This specification can be useful for portables, laptops, or other + machines that are not always connected to the network or switched on. + +\item [Reschedule Interval = \lt{}time-specification\gt{}] + \index[dir]{Reschedule Interval} + \index[dir]{Directive!Reschedule Interval} + If you have specified {\bf Reschedule On Error = yes} and the job + terminates in error, it will be rescheduled after the interval of time + specified by {\bf time-specification}. See \ilink{the time + specification formats}{Time} in the Configure chapter for details of + time specifications. If no interval is specified, the job will not be + rescheduled on error. + +\item [Reschedule Times = \lt{}count\gt{}] + \index[dir]{Reschedule Times} + \index[dir]{Directive!Reschedule Times} + This directive specifies the maximum number of times to reschedule the + job. If it is set to zero (the default) the job will be rescheduled an + indefinite number of times. + +\item [Run = \lt{}job-name\gt{}] + \index[dir]{Run} + \index[dir]{Directive!Run} + \index[dir]{Clone a Job} + The Run directive (not to be confused with the Run option in a + Schedule) allows you to start other jobs or to clone jobs. By using the + cloning keywords (see below), you can backup + the same data (or almost the same data) to two or more drives + at the same time. The {\bf job-name} is normally the same name + as the current Job resource (thus creating a clone). However, it + may be any Job name, so one job may start other related jobs. + + The part after the equal sign must be enclosed in double quotes, + and can contain any string or set of options (overrides) that you + can specify when entering the Run command from the console. For + example {\bf storage=DDS-4 ...}. In addition, there are two special + keywords that permit you to clone the current job. They are {\bf level=\%l} + and {\bf since=\%s}. The \%l in the level keyword permits + entering the actual level of the current job and the \%s in the since + keyword permits putting the same time for comparison as used on the + current job. Note, in the case of the since keyword, the \%s must be + enclosed in double quotes, and thus they must be preceded by a backslash + since they are already inside quotes. For example: + +\begin{verbatim} + run = "Nightly-backup level=%l since=\"%s\" storage=DDS-4" +\end{verbatim} + + A cloned job will not start additional clones, so it is not + possible to recurse. + + Please note that all cloned jobs, as specified in the Run directives are + submitted for running before the original job is run (while it is being + initialized). This means that any clone job will actually start before + the original job, and may even block the original job from starting + until the original job finishes unless you allow multiple simultaneous + jobs. Even if you set a lower priority on the clone job, if no other + jobs are running, it will start before the original job. + + If you are trying to prioritize jobs by using the clone feature (Run + directive), you will find it much easier to do using a RunScript + resource, or a RunBeforeJob directive. + +\label{Priority} +\item [Priority = \lt{}number\gt{}] + \index[dir]{Priority} + \index[dir]{Directive!Priority} + This directive permits you to control the order in which your jobs will + be run by specifying a positive non-zero number. The higher the number, + the lower the job priority. Assuming you are not running concurrent jobs, + all queued jobs of priority 1 will run before queued jobs of priority 2 + and so on, regardless of the original scheduling order. + + The priority only affects waiting jobs that are queued to run, not jobs + that are already running. If one or more jobs of priority 2 are already + running, and a new job is scheduled with priority 1, the currently + running priority 2 jobs must complete before the priority 1 job is run. + + The default priority is 10. + + If you want to run concurrent jobs you should + keep these points in mind: + +\begin{itemize} +\item See \ilink{Running Concurrent Jobs}{ConcurrentJobs} on how to setup + concurrent jobs. + +\item Bacula concurrently runs jobs of only one priority at a time. It + will not simultaneously run a priority 1 and a priority 2 job. + +\item If Bacula is running a priority 2 job and a new priority 1 job is + scheduled, it will wait until the running priority 2 job terminates even + if the Maximum Concurrent Jobs settings would otherwise allow two jobs + to run simultaneously. + +\item Suppose that bacula is running a priority 2 job and a new priority 1 + job is scheduled and queued waiting for the running priority 2 job to + terminate. If you then start a second priority 2 job, the waiting + priority 1 job will prevent the new priority 2 job from running + concurrently with the running priority 2 job. That is: as long as there + is a higher priority job waiting to run, no new lower priority jobs will + start even if the Maximum Concurrent Jobs settings would normally allow + them to run. This ensures that higher priority jobs will be run as soon + as possible. +\end{itemize} + +If you have several jobs of different priority, it may not best to start +them at exactly the same time, because Bacula must examine them one at a +time. If by Bacula starts a lower priority job first, then it will run +before your high priority jobs. If you experience this problem, you may +avoid it by starting any higher priority jobs a few seconds before lower +priority ones. This insures that Bacula will examine the jobs in the +correct order, and that your priority scheme will be respected. + +\label{WritePartAfterJob} +\item [Write Part After Job = \lt{}yes|no\gt{}] +\index[dir]{Write Part After Job} +\index[dir]{Directive!Write Part After Job} + This directive is only implemented in version 1.37 and later. + If this directive is set to {\bf yes} (default {\bf no}), a new part file + will be created after the job is finished. + + It should be set to {\bf yes} when writing to devices that require mount + (for example DVD), so you are sure that the current part, containing + this job's data, is written to the device, and that no data is left in + the temporary file on the hard disk. However, on some media, like DVD+R + and DVD-R, a lot of space (about 10Mb) is lost every time a part is + written. So, if you run several jobs each after another, you could set + this directive to {\bf no} for all jobs, except the last one, to avoid + wasting too much space, but to ensure that the data is written to the + medium when all jobs are finished. + + This directive is ignored with tape and FIFO devices. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[dir]{Heartbeat Interval} + \index[dir]{Directive!Heartbeat} + This directive is optional and if specified will cause the Director to + set a keepalive interval (heartbeat) in seconds on each of the sockets + it opens for the Client resource. This value will override any + specified at the Director level. It is implemented only on systems + (Linux, ...) that provide the {\bf setsockopt} TCP\_KEEPIDLE function. + The default value is zero, which means no change is made to the socket. + +\end{description} + +The following is an example of a valid Job resource definition: + +\footnotesize +\begin{verbatim} +Job { + Name = "Minou" + Type = Backup + Level = Incremental # default + Client = Minou + FileSet="Minou Full Set" + Storage = DLTDrive + Pool = Default + Schedule = "MinouWeeklyCycle" + Messages = Standard +} +\end{verbatim} +\normalsize + +\section{The JobDefs Resource} +\label{JobDefsResource} +\index[general]{JobDefs Resource} +\index[general]{Resource!JobDefs} + +The JobDefs resource permits all the same directives that can appear in a Job +resource. However, a JobDefs resource does not create a Job, rather it can be +referenced within a Job to provide defaults for that Job. This permits you to +concisely define several nearly identical Jobs, each one referencing a JobDefs +resource which contains the defaults. Only the changes from the defaults need to +be mentioned in each Job. + +\section{The Schedule Resource} +\label{ScheduleResource} +\index[general]{Resource!Schedule} +\index[general]{Schedule Resource} + +The Schedule resource provides a means of automatically scheduling a Job as +well as the ability to override the default Level, Pool, Storage and Messages +resources. If a Schedule resource is not referenced in a Job, the Job can only +be run manually. In general, you specify an action to be taken and when. + +\begin{description} + +\item [Schedule] +\index[dir]{Schedule} +\index[dir]{Directive!Schedule} + Start of the Schedule directives. No {\bf Schedule} resource is + required, but you will need at least one if you want Jobs to be + automatically started. + +\item [Name = \lt{}name\gt{}] + \index[dir]{Name} + \index[dir]{Directive!Name} + The name of the schedule being defined. The Name directive is required. + +\item [Run = \lt{}Job-overrides\gt{} \lt{}Date-time-specification\gt{}] + \index[dir]{Run} + \index[dir]{Directive!Run} + The Run directive defines when a Job is to be run, and what overrides if + any to apply. You may specify multiple {\bf run} directives within a + {\bf Schedule} resource. If you do, they will all be applied (i.e. + multiple schedules). If you have two {\bf Run} directives that start at + the same time, two Jobs will start at the same time (well, within one + second of each other). + + The {\bf Job-overrides} permit overriding the Level, the Storage, the + Messages, and the Pool specifications provided in the Job resource. In + addition, the FullPool, the IncrementalPool, and the DifferentialPool + specifications permit overriding the Pool specification according to + what backup Job Level is in effect. + + By the use of overrides, you may customize a particular Job. For + example, you may specify a Messages override for your Incremental + backups that outputs messages to a log file, but for your weekly or + monthly Full backups, you may send the output by email by using a + different Messages override. + + {\bf Job-overrides} are specified as: {\bf keyword=value} where the + keyword is Level, Storage, Messages, Pool, FullPool, DifferentialPool, + or IncrementalPool, and the {\bf value} is as defined on the respective + directive formats for the Job resource. You may specify multiple {\bf + Job-overrides} on one {\bf Run} directive by separating them with one or + more spaces or by separating them with a trailing comma. For example: + +\begin{description} + +\item [Level=Full] + \index[dir]{Level} + \index[dir]{Directive!Level} + is all files in the FileSet whether or not they have changed. + +\item [Level=Incremental] + \index[dir]{Level} + \index[dir]{Directive!Level} + is all files that have changed since the last backup. + +\item [Pool=Weekly] + \index[dir]{Pool} + \index[dir]{Directive!Pool} + specifies to use the Pool named {\bf Weekly}. + +\item [Storage=DLT\_Drive] + \index[dir]{Storage} + \index[dir]{Directive!Storage} + specifies to use {\bf DLT\_Drive} for the storage device. + +\item [Messages=Verbose] + \index[dir]{Messages} + \index[dir]{Directive!Messages} + specifies to use the {\bf Verbose} message resource for the Job. + +\item [FullPool=Full] + \index[dir]{FullPool} + \index[dir]{Directive!FullPool} + specifies to use the Pool named {\bf Full} if the job is a full backup, or +is +upgraded from another type to a full backup. + +\item [DifferentialPool=Differential] + \index[dir]{DifferentialPool} + \index[dir]{Directive!DifferentialPool} + specifies to use the Pool named {\bf Differential} if the job is a + differential backup. + +\item [IncrementalPool=Incremental] + \index[dir]{IncrementalPool} + \index[dir]{Directive!IncrementalPool} + specifies to use the Pool named {\bf Incremental} if the job is an +incremental backup. + +\item [SpoolData=yes|no] + \index[dir]{SpoolData} + \index[dir]{Directive!SpoolData} + tells Bacula to request the Storage daemon to spool data to a disk file + before writing it to the Volume (normally a tape). Thus the data is + written in large blocks to the Volume rather than small blocks. This + directive is particularly useful when running multiple simultaneous + backups to tape. It prevents interleaving of the job data and reduces + or eliminates tape drive stop and start commonly known as "shoe-shine". + +\item [SpoolSize={\it bytes}] + \index[dir]{SpoolSize} + \index[dir]{Directive!SpoolSize} + where the bytes specify the maximum spool size for this job. + The default is take from Device Maximum Spool Size limit. + This directive is available only in Bacula version 2.3.5 or + later. + +\item [WritePartAfterJob=yes|no] + \index[dir]{WritePartAfterJob} + \index[dir]{Directive!WritePartAfterJob} + tells Bacula to request the Storage daemon to write the current part + file to the device when the job is finished (see \ilink{Write Part After + Job directive in the Job resource}{WritePartAfterJob}). Please note, + this directive is implemented only in version 1.37 and later. The + default is yes. We strongly recommend that you keep this set to yes + otherwise, when the last job has finished one part will remain in the + spool file and restore may or may not work. + +\end{description} + +{\bf Date-time-specification} determines when the Job is to be run. The +specification is a repetition, and as a default Bacula is set to run a job at +the beginning of the hour of every hour of every day of every week of every +month of every year. This is not normally what you want, so you must specify +or limit when you want the job to run. Any specification given is assumed to +be repetitive in nature and will serve to override or limit the default +repetition. This is done by specifying masks or times for the hour, day of the +month, day of the week, week of the month, week of the year, and month when +you want the job to run. By specifying one or more of the above, you can +define a schedule to repeat at almost any frequency you want. + +Basically, you must supply a {\bf month}, {\bf day}, {\bf hour}, and {\bf +minute} the Job is to be run. Of these four items to be specified, {\bf day} +is special in that you may either specify a day of the month such as 1, 2, +... 31, or you may specify a day of the week such as Monday, Tuesday, ... +Sunday. Finally, you may also specify a week qualifier to restrict the +schedule to the first, second, third, fourth, or fifth week of the month. + +For example, if you specify only a day of the week, such as {\bf Tuesday} the +Job will be run every hour of every Tuesday of every Month. That is the {\bf +month} and {\bf hour} remain set to the defaults of every month and all +hours. + +Note, by default with no other specification, your job will run at the +beginning of every hour. If you wish your job to run more than once in any +given hour, you will need to specify multiple {\bf run} specifications each +with a different minute. + +The date/time to run the Job can be specified in the following way in +pseudo-BNF: + +\footnotesize +\begin{verbatim} + = on + = at + = 1st | 2nd | 3rd | 4th | 5th | first | + second | third | fourth | fifth + = sun | mon | tue | wed | thu | fri | sat | + sunday | monday | tuesday | wednesday | + thursday | friday | saturday + = w00 | w01 | ... w52 | w53 + = jan | feb | mar | apr | may | jun | jul | + aug | sep | oct | nov | dec | january | + february | ... | december + = daily + = weekly + = monthly + = hourly + = 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 0 + = | +<12hour> = 0 | 1 | 2 | ... 12 + = 0 | 1 | 2 | ... 23 + = 0 | 1 | 2 | ... 59 + = 1 | 2 | ... 31 +
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/install/install.css b/docs/manuals/en/install/install.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/install/install.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/install/install.tex b/docs/manuals/en/install/install.tex new file mode 100644 index 00000000..3b325fe3 --- /dev/null +++ b/docs/manuals/en/install/install.tex @@ -0,0 +1,95 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{dir}{ddx}{dnd}{Director Index} +\newindex{fd}{fdx}{fnd}{File Daemon Index} +\newindex{sd}{sdx}{snd}{Storage Daemon Index} +\newindex{console}{cdx}{cnd}{Console Index} +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Installation and Configuration Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{quickstart} +\include{installation} +\include{critical} +\include{configure} +\include{dirdconf} +\include{filedconf} +\include{storedconf} +\include{messagesres} +\include{consoleconf} +\include{monitorconf} +\include{security} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] +\printindex[dir] +\printindex[fd] +\printindex[sd] +\printindex[console] + +\end{document} diff --git a/docs/manuals/en/install/installation.tex b/docs/manuals/en/install/installation.tex new file mode 100644 index 00000000..953fa440 --- /dev/null +++ b/docs/manuals/en/install/installation.tex @@ -0,0 +1,1705 @@ +%% +%% + +\chapter{Installing Bacula} +\label{InstallChapter} +\index[general]{Bacula!Installing} +\index[general]{Installing Bacula} + +In general, you will need the Bacula source release, and if you want to run +a Windows client, you will need the Bacula Windows binary release. +However, Bacula needs certain third party packages (such as {\bf MySQL}, +{\bf PostgreSQL}, or {\bf SQLite} to build and run +properly depending on the +options you specify. Normally, {\bf MySQL} and {\bf PostgreSQL} are +packages that can be installed on your distribution. However, if you do +not have them, to simplify your task, we have combined a number of these +packages into three {\bf depkgs} releases (Dependency Packages). This can +vastly simplify your life by providing you with all the necessary packages +rather than requiring you to find them on the Web, load them, and install +them. + +\section{Source Release Files} +\index[general]{Source Files} +\index[general]{Release Files} + Beginning with Bacula 1.38.0, the source code has been broken into + four separate tar files each corresponding to a different module in + the Bacula SVN. The released files are: + +\begin{description} +\item [bacula-2.0.3.tar.gz] + This is the primary source code release for Bacula. On each + release the version number (2.0.3) will be updated. + +\item [bacula-docs-2.0.3.tar.gz] + This file contains a copy of the docs directory with the + documents prebuild. English HTML directory, single HTML + file, and pdf file. The French and German translations + are in progress, but are not built. + +\item [bacula-gui-2.0.3.tar.gz] + This file contains the non-core GUI programs. Currently, + it contains bacula-web, a PHP program for producing management + viewing of your Bacula job status in a browser; and bimagemgr + a browser program for burning CDROM images with Bacula Volumes. + +\item [bacula-rescue-2.0.0.tar.gz] + This is the Bacula Rescue CDROM code. Note, the version number + of this package is not tied to the Bacula release version, so + it will be different. Using this code, you can burn a CDROM + with your system configuration and containing a statically + linked version of the File daemon. This can permit you to easily + repartition and reformat your hard disks and reload your + system with Bacula in the case of a hard disk failure. + + Note, this package evolves slower than the Bacula source code, + so there may not always be a new release of the rescue package when + making minor updates to the Bacula code. For example, when releasing + Bacula version 2.0.3, the rescue package may still be at version + 2.0.0 if there were no updates. + +\item [winbacula-2.0.3.exe] + This file is the 32 bit Windows installer for installing + the Windows client (File daemon) on a Windows machine. + This client will also run on 64 bit Windows machines. + Beginning with Bacula version 1.39.20, this executable will + also optionally load the Win32 Director and the Win32 + Storage daemon. + +\end{description} + +\label{upgrading1} +\section{Upgrading Bacula} +\index[general]{Bacula!Upgrading} +\index[general]{Upgrading Bacula} +\index[general]{Upgrading} + +If you are upgrading from one Bacula version to another, you should first +carefully read the ReleaseNotes of all major versions between your current +version and the version to which you are upgrading. If the Bacula catalog +database has been upgraded (as it is almost every major release), you will +either need to reinitialize your database starting from scratch (not +normally a good idea), or save an ASCII copy of your database, then proceed +to upgrade it. If you are upgrading two major versions (e.g. 1.36 to 2.0) +then life will be more complicated because you must do two database +upgrades. See below for more on this. + +Upgrading the catalog is normally done after Bacula is build and installed +by: + +\begin{verbatim} +cd (default /etc/bacula) +./update_bacula_tables +\end{verbatim} + +This update script can also be find in the Bacula source src/cats +directory. + +If there are several database upgrades between your version and the +version to which you are upgrading, you will need to apply each database +upgrade script. For your convenience, you can find all the old upgrade scripts +in the {\bf upgradedb} directory of the source code. You will need to edit the +scripts to correspond to your system configuration. The final upgrade script, +if any, can be applied as noted above. + +If you are upgrading from one major version to another, you will need to +replace all your components at the same time as generally the inter-daemon +protocol will change. However, within any particular release (e.g. version +1.32.x) unless there is an oversight or bug, the daemon protocol will not +change. If this is confusing, simply read the ReleaseNotes very carefully as +they will note if all daemons must be upgraded at the same time. + +Finally, please note that in general it is not necessary to do a +{\bf make uninstall} before doing an upgrade providing you are careful +not to change the installation directories. In fact, if you do so, you will +most likely delete all your conf files, which could be disastrous. +The normal procedure during an upgrade is simply: + +\begin{verbatim} +./configure (your options) +make +make install +\end{verbatim} + +In general none of your existing .conf or .sql files will be overwritten, +and you must do both the {\bf make} and {\bf make install} commands, a +{\bf make install} without the preceding {\bf make} will not work. + +For additional information on upgrading, please see the \ilink{Upgrading Bacula +Versions}{upgrading} in the Tips chapter of this manual. + +\section{Releases Numbering} +\index[general]{Release Numbering} +\index[general]{Version Numbering} +Every Bacula release whether beta or production has a different number +as well as the date of the release build. The numbering system follows +traditional Open Source conventions in that it is of the form. + +\begin{verbatim} +major.minor.release +\end{verbatim} + +For example: +\begin{verbatim} +1.38.11 +\end{verbatim} + +where each component (major, minor, patch) is a number. +The major number is currently 1 and normally does not change +very frequently. The minor number starts at 0 and increases +each for each production release by 2 (i.e. it is always an +even number for a production release), and the patch number is +starts at zero each time the minor number changes. The patch +number is increased each time a bug fix (or fixes) is released +to production. + +So, as of this date (10 September 2006), the current production Bacula +release is version 1.38.11. If there are bug fixes, the next release +will be 1.38.12 (i.e. the patch number has increased by one). + +For all patch releases where the minor version number does not change, +the database and all the daemons will be compatible. That means that +you can safely run a 1.38.0 Director with a 1.38.11 Client. Of course, +in this case, the Director may have bugs that are not fixed. Generally, +within a minor release (some minor releases are not so minor), all +patch numbers are officially released to production. This means that while +the current Bacula version is 1.38.11, versions 1.38.0, 1.38.1, ... 1.38.10 +have all been previously released. + +When the minor number is odd, it indicates that the package is under +development and thus may not be stable. For example, while the current +production release of Bacula is currently 1.38.11, the current development +version is 1.39.22. All patch versions of the development code are +available in the SVN (source repository). However, not all patch versions +of the development code (odd minor version) are officially released. When +they are released, they are released as beta versions (see below for a +definition of what beta means for Bacula releases). + +In general when the minor number increases from one production release +to the next (i.e. 1.38.x to 1.40.0), the catalog database must be upgraded, +the Director and Storage daemon must always be on the same minor release +number, and often (not always), the Clients must also be on the same minor +release. As often as possible, we attempt to make new releases that are +downwards compatible with prior clients, but this is not always possible. +You must check the release notes. In general, you will have fewer problems +if you always run all the components on the same minor version number (i.e. +all either 1.38.x or 1.40.x but not mixed). + + +\label{BetaReleases} +\section*{Beta Releases} +\index[general]{Beta Releases} +Towards the end of the development cycle, which typically runs +one year from a major release to another, there will be several beta +releases of the development code prior to a production release. +As noted above, beta versions always have odd minor version numbers +(e.g 1.37.x or 1.39.x). +The purpose of the beta releases is to allow early adopter users to test +the new code. Beta releases are made with the following considerations: + +\begin{itemize} +\item The code passes the regression testing on FreeBSD, Linux, and Solaris + machines. + +\item There are no known major bugs, or on the rare occasion that + there are, they will be documented or already in the bugs database. + +\item Some of the new code/features may not yet be tested. + +\item Bugs are expected to be found, especially in the new + code before the final production release. + +\item The code will have been run in production in at least one small + site (mine). + +\item The Win32 client will have been run in production at least + one night at that small site. + +\item The documentation in the manual is unlikely to be complete especially + for the new features, and the Release Notes may not be fully + organized. + +\item Beta code is not generally recommended for everyone, but + rather for early adopters. +\end{itemize} + + +\label{Dependency} +\section{Dependency Packages} +\index[general]{Dependency Packages} +\index[general]{Packages!Dependency} + +As discussed above, we have combined a number of third party packages that +Bacula might need into the {\bf depkgs} release. You can, +of course, get the latest packages from the original authors or +from your operating system supplier. The locations of +where we obtained the packages are in the README file in each package. +However, be aware that the packages in the depkgs files have been tested by us +for compatibility with Bacula. + +Typically, a dependency package will be named {\bf depkgs-ddMMMyy.tar.gz} +where {\bf dd} is the day we release it, {\bf MMM} +is the abbreviated month (e.g. Jan), and {\bf yy} is the year. An actual +example is: {\bf depkgs-07Apr02.tar.gz}. To install and build this package (if +needed), you do the following: + +\begin{enumerate} +\item Create a {\bf bacula} directory, into which you will place both the + Bacula source as well as the dependency package. +\item Detar the {\bf depkgs} into the {\bf bacula} directory. +\item cd bacula/depkgs +\item make +\end{enumerate} + +Although the exact composition of the dependency packages may change from time +to time, the current makeup is the following: + +\addcontentsline{lot}{table}{Dependency Packages} +\begin{longtable}{|l|l|l|} + \hline +\multicolumn{1}{|c| }{\bf 3rd Party Package} & \multicolumn{1}{c| }{\bf depkgs} + & \multicolumn{1}{c| }{\bf depkgs-qt} \\ + \hline {SQLite } & \multicolumn{1}{c| }{X } & \multicolumn{1}{c| }{ }\\ + \hline {SQLite3 } & \multicolumn{1}{c| }{X } & \multicolumn{1}{c| }{ }\\ + \hline {mtx } & \multicolumn{1}{c| }{X } & \multicolumn{1}{c| }{ } \\ + \hline {qt4 } & \multicolumn{1}{c| }{ } & \multicolumn{1}{c| }{X } \\ + \hline {qwt } & \multicolumn{1}{c| }{ } & \multicolumn{1}{c| }{X } \\ + \hline +\end{longtable} + +Note, some of these packages are quite large, so that building them can be a +bit time consuming. The above instructions will build all the packages +contained in the directory. However, when building Bacula, it will take only +those pieces that it actually needs. + +Alternatively, you can make just the packages that are needed. For example, + +\footnotesize +\begin{verbatim} +cd bacula/depkgs +make sqlite +\end{verbatim} +\normalsize + +will configure and build only the SQLite package. + +You should build the packages that you will require in {\bf depkgs} a +prior to configuring and building Bacula, since Bacula will need +them during the build process. + +For more information on the {\bf depkgs-qt} package, please read the +INSTALL file in the main directory of that package. If you are going to +build Qt4 using {\bf depkgs-qt}, you must source the {\bf qt4-paths} file +included in the package prior to building Bacula. Please read the INSTALL +file for more details. + +Even if you do not use SQLite, you might find it worthwhile to build {\bf mtx} +because the {\bf tapeinfo} program that comes with it can often provide you +with valuable information about your SCSI tape drive (e.g. compression, +min/max block sizes, ...). Note, most distros provide {\bf mtx} as part of +their release. + +The {\bf depkgs1} package is depreciated and previously contained +readline, which should be available on all operating systems. + +The {\bf depkgs-win32} package is deprecated and no longer used in +Bacula version 1.39.x and later. It was previously used to build +the native Win32 client program, but this program is now built on Linux +systems using cross-compiling. All the tools and third party libraries +are automatically downloaded by executing the appropriate scripts. See +src/win32/README.mingw32 for more details. + +\section{Supported Operating Systems} +\label{Systems} +\index[general]{Systems!Supported Operating} +\index[general]{Supported Operating Systems} + +Please see the +\ilink{ Supported Operating Systems}{SupportedOSes} section +of the QuickStart chapter of this manual. + +\section{Building Bacula from Source} +\label{Building} +\index[general]{Source!Building Bacula from} +\index[general]{Building Bacula from Source} + +The basic installation is rather simple. + +\begin{enumerate} +\item Install and build any {\bf depkgs} as noted above. This + should be unnecessary on most modern Operating Systems. + +\item Configure and install MySQL or PostgreSQL (if desired). + \ilink{Installing and Configuring MySQL Phase I}{MySqlChapter} or + \ilink{Installing and Configuring PostgreSQL Phase + I}{PostgreSqlChapter}. If you are installing from rpms, and are + using MySQL, please be sure to install {\bf mysql-devel}, so that the MySQL + header files are available while compiling Bacula. In addition, the MySQL + client library {\bf mysqlclient} requires the gzip compression library {\bf + libz.a} or {\bf libz.so}. If you are using rpm packages, these libraries are + in the {\bf libz-devel} package. On Debian systems, you will need to load the + {\bf zlib1g-dev} package. If you are not using rpms or debs, you will need to + find the appropriate package for your system. + + Note, if you already have a running MySQL or PostgreSQL on your system, you + can skip this phase provided that you have built the thread safe libraries. + And you have already installed the additional rpms noted above. + + SQLite is not supported on Solaris. This is because it + frequently fails with bus errors. However SQLite3 may work. + +\item Detar the Bacula source code preferably into the {\bf bacula} directory + discussed above. + +\item {\bf cd} to the directory containing the source code. + +\item ./configure (with appropriate options as described below). Any + path names you specify as options on the ./configure command line + must be absolute paths and not relative. + +\item Check the output of ./configure very carefully, especially the Install + binaries and Install config directories. If they are not correct, + please rerun ./configure until they are. The output from ./configure is + stored in {\bf config.out} and can be re-displayed at any time without + rerunning the ./configure by doing {\bf cat config.out}. + +\item If after running ./configure once, you decide to change options and + re-run it, that is perfectly fine, but before re-running it, you should run: + +\footnotesize +\begin{verbatim} + make distclean +\end{verbatim} +\normalsize + +so that you are sure to start from scratch and not have a mixture of the two +options. This is because ./configure caches much of the information. The {\bf +make distclean} is also critical if you move the source directory from one +machine to another. If the {\bf make distclean} fails, just ignore it and +continue on. + +\item make + If you get errors while linking in the Storage daemon directory + (src/stored), it is probably because you have not loaded the static + libraries on your system. I noticed this problem on a Solaris system. + To correct it, make sure that you have not added {\bf + {-}{-}enable-static-tools} to the {\bf ./configure} command. + + If you skip this step ({\bf make}) and proceed immediately to the {\bf + make install} you are making two serious errors: 1. your install will + fail because Bacula requires a {\bf make} before a {\bf make install}. + 2. you are depriving yourself of the chance to make sure there are no + errors before beginning to write files to your system directories. + + +\item make install + Please be sure you have done a {\bf make} before entering this command, + and that everything has properly compiled and linked without errors. + + +\item If you are new to Bacula, we {\bf strongly} recommend that you skip + the next step and use the default configuration files, then run the + example program in the next chapter, then come back and modify your + configuration files to suit your particular needs. + +\item Customize the configuration files for each of the three daemons + (Directory, File, Storage) and for the Console program. For the details + of how to do this, please see \ilink{Setting Up Bacula Configuration + Files}{ConfigureChapter} in the Configuration chapter of this manual. We + recommend that you start by modifying the default configuration files + supplied, making the minimum changes necessary. Complete customization + can be done after you have Bacula up and running. Please take care when + modifying passwords, which were randomly generated, and the {\bf Name}s + as the passwords and names must agree between the configuration files + for security reasons. + +\label{CreateDatabase} +\item Create the Bacula MySQL database and tables + (if using MySQL) + \ilink{Installing and Configuring MySQL Phase II}{mysql_phase2} or + create the Bacula PostgreSQL database and tables + \ilink{Configuring PostgreSQL + II}{PostgreSQL_configure} or alternatively if you are using + SQLite \ilink{Installing and Configuring SQLite Phase II}{phase2}. + +\item Start Bacula ({\bf ./bacula start}) Note. the next chapter shows you + how to do this in detail. + +\item Interface with Bacula using the Console program + +\item For the previous two items, please follow the instructions in the + \ilink{Running Bacula}{TutorialChapter} chapter of this manual, + where you will run a simple backup and do a restore. Do this before you make + heavy modifications to the configuration files so that you are sure that + Bacula works and are familiar with it. After that changing the conf files + will be easier. + +\item If after installing Bacula, you decide to "move it", that is to + install it in a different set of directories, proceed as follows: + +\footnotesize +\begin{verbatim} + make uninstall + make distclean + ./configure (your-new-options) + make + make install + +\end{verbatim} +\normalsize + +\end{enumerate} + +If all goes well, the {\bf ./configure} will correctly determine which +operating system you are running and configure the source code appropriately. +Currently, FreeBSD, Linux (Red Hat), and Solaris are supported. The Bacula +client (File daemon) is reported to work with MacOS X 10.3 is if +readline support is not enabled (default) when building the client. + +If you install Bacula on more than one system, and they are identical, you can +simply transfer the source tree to that other system and do a "make +install". However, if there are differences in the libraries or OS versions, +or you wish to install on a different OS, you should start from the original +compress tar file. If you do transfer the source tree, and you have previously +done a ./configure command, you MUST do: + +\footnotesize +\begin{verbatim} +make distclean +\end{verbatim} +\normalsize + +prior to doing your new ./configure. This is because the GNU autoconf tools +cache the configuration, and if you re-use a configuration for a Linux machine +on a Solaris, you can be sure your build will fail. To avoid this, as +mentioned above, either start from the tar file, or do a "make distclean". + +In general, you will probably want to supply a more complicated {\bf +configure} statement to ensure that the modules you want are built and that +everything is placed into the correct directories. + +For example, on Fedora, Red Hat, or SuSE one could use the following: + +\footnotesize +\begin{verbatim} +CFLAGS="-g -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=$USER +\end{verbatim} +\normalsize + +Note, the advantage of using the above configuration to start is that +everything will be put into a single directory, which you can later delete +once you have run the examples in the next chapter and learned how Bacula +works. In addition, the above can be installed and run as non-root. + +For the developer's convenience, I have added a {\bf defaultconfig} script to +the {\bf examples} directory. This script contains the statements that you +would normally use, and each developer/user may modify them to suit his needs. +You should find additional useful examples in this directory as well. + +The {\bf \verb:--:enable-conio} or {\bf \verb:--:enable-readline} options are useful because +they provide a command line history and editing capability for the Console +program. If you have included either option in the build, either the {\bf +termcap} or the {\bf ncurses} package will be needed to link. On most +systems, including Red Hat and SuSE, you should include the ncurses package. +If Bacula's configure process finds the ncurses libraries, it will use +those rather than the termcap library. +On some systems, such as SuSE, the termcap library is not in the standard +library directory. As a consequence, the option may be disabled or you may +get an error message such as: + +\footnotesize +\begin{verbatim} +/usr/lib/gcc-lib/i586-suse-linux/3.3.1/.../ld: +cannot find -ltermcap +collect2: ld returned 1 exit status +\end{verbatim} +\normalsize + +while building the Bacula Console. In that case, you will need to set the {\bf +LDFLAGS} environment variable prior to building. + +\footnotesize +\begin{verbatim} +export LDFLAGS="-L/usr/lib/termcap" +\end{verbatim} +\normalsize + +The same library requirements apply if you wish to use the readline +subroutines for command line editing and history or + if you are using a MySQL library that requires encryption. If you need encryption, +you can either export the appropriate additional library options as shown +above or, alternatively, you can include them directly on the ./configure line +as in: + +\footnotesize +\begin{verbatim} +LDFLAGS="-lssl -lcyrpto" \ + ./configure +\end{verbatim} +\normalsize + +On some systems such as Mandriva, readline tends to +gobble up prompts, which makes it totally useless. If this happens to you, use +the disable option, or if you are using version 1.33 and above try using {\bf +\verb:--:enable-conio} to use a built-in readline replacement. You will still need +either the termcap or the ncurses library, but it is unlikely that the {\bf conio} +package will gobble up prompts. + +readline is no longer supported after version 1.34. The code within Bacula +remains, so it should be usable, and if users submit patches for it, we will +be happy to apply them. However, due to the fact that each version of +readline seems to be incompatible with previous versions, and that there +are significant differences between systems, we can no longer afford to +support it. + +\section{What Database to Use?} +\label{DB} +\index[general]{What Database to Use?} +\index[general]{Use!What Database to} + +Before building Bacula you need to decide if you want to use SQLite, MySQL, or +PostgreSQL. If you are not already running MySQL or PostgreSQL, you might +want to start by testing with SQLite (not supported on Solaris). +This will greatly simplify the setup for you +because SQLite is compiled into Bacula an requires no administration. It +performs well and is suitable for small to medium sized installations (maximum +10-20 machines). However, we should note that a number of users have +had unexplained database corruption with SQLite. For that reason, we +recommend that you install either MySQL or PostgreSQL for production +work. + +If you wish to use MySQL as the Bacula catalog, please see the +\ilink{Installing and Configuring MySQL}{MySqlChapter} chapter of +this manual. You will need to install MySQL prior to continuing with the +configuration of Bacula. MySQL is a high quality database that is very +efficient and is suitable for any sized installation. It is slightly more +complicated than SQLite to setup and administer because it has a number of +sophisticated features such as userids and passwords. It runs as a separate +process, is truly professional and can manage a database of any size. + +If you wish to use PostgreSQL as the Bacula catalog, please see the +\ilink{Installing and Configuring PostgreSQL}{PostgreSqlChapter} +chapter of this manual. You will need to install PostgreSQL prior to +continuing with the configuration of Bacula. PostgreSQL is very similar to +MySQL, though it tends to be slightly more SQL92 compliant and has many more +advanced features such as transactions, stored procedures, and the such. It +requires a certain knowledge to install and maintain. + +If you wish to use SQLite as the Bacula catalog, please see +\ilink{Installing and Configuring SQLite}{SqlLiteChapter} chapter of +this manual. SQLite is not supported on Solaris. + +\section{Quick Start} +\index[general]{Quick Start} +\index[general]{Start!Quick} + +There are a number of options and important considerations given below +that you can skip for the moment if you have not had any problems building +Bacula with a simplified configuration as shown above. + +If the ./configure process is unable to find specific libraries (e.g. +libintl, you should ensure that the appropriate package is installed on +your system. Alternatively, if the package is installed in a non-standard +location (as far as Bacula is concerned), then there is generally an +option listed below (or listed with "./configure {-}{-}help" that will +permit you to specify the directory that should be searched. In other +cases, there are options that will permit you to disable to feature +(e.g. {-}{-}disable-nls). + +If you want to dive right into it, we recommend you skip to the next chapter, +and run the example program. It will teach you a lot about Bacula and as an +example can be installed into a single directory (for easy removal) and run as +non-root. If you have any problems or when you want to do a real installation, +come back to this chapter and read the details presented below. + +\section{Configure Options} +\label{Options} +\index[general]{Options!Configure} +\index[general]{Configure Options} + +The following command line options are available for {\bf configure} to +customize your installation. + +\begin{description} +\item [ {-}{-}sbindir=\lt{}binary-path\gt{}] + \index[general]{{-}{-}sbindir} + Defines where the Bacula binary (executable) files will be placed during a + {\bf make install} command. + +\item [ {-}{-}sysconfdir=\lt{}config-path\gt{}] + \index[general]{{-}{-}sysconfdir} + Defines where the Bacula configuration files should be placed during a + {\bf make install} command. + +\item [ {-}{-}mandir=\lt{}path\gt{}] + \index[general]{{-}{-}mandir} + Note, as of Bacula version 1.39.14, the meaning of any path + specified on this option is change from prior versions. It + now specifies the top level man directory. + Previously the mandir specified the full path to where you + wanted the man files installed. + The man files will be installed in gzip'ed format under + mandir/man1 and mandir/man8 as appropriate. + For the install to succeed you must have {\bf gzip} installed + on your system. + + By default, Bacula will install the Unix man pages in + /usr/share/man/man1 and /usr/share/man/man8. + If you wish the man page to be installed in + a different location, use this option to specify the path. + Note, the main HTML and PDF Bacula documents are in a separate + tar file that is not part of the source distribution. + +\item [ {-}{-}datadir=\lt{}path\gt{} ] + \index[general]{{-}{-}datadir} + If you translate Bacula or parts of Bacula into a different language + you may specify the location of the po files using the {\bf + {-}{-}datadir} option. You must manually install any po files as + Bacula does not (yet) automatically do so. + +\item [ {-}{-}disable-ipv6 ] + \index[general]{{-}{-}disable-ipv6} + +\item [ {-}{-}enable-smartalloc ] + \index[general]{{-}{-}enable-smartalloc} + This enables the inclusion of the Smartalloc orphaned buffer detection + code. This option is highly recommended. Because we never build + without this option, you may experience problems if it is not enabled. + In this case, simply re-enable the option. We strongly recommend + keeping this option enabled as it helps detect memory leaks. This + configuration parameter is used while building Bacula + +\item [ {-}{-}enable-bat ] + \label{enablebat} + \index[general]{{-}{-}enable-bat} + If you have Qt4 >= 4.2 installed on your computer including the + libqt4 and libqt4-devel (libqt4-dev on Debian) libraries, and you want + to use the Bacula Administration Tool (bat) GUI Console interface to + Bacula, you must specify this option. Doing so will build everything in + the {\bf src/qt-console} directory. The build with enable-bat will work + only with a full Bacula build (i.e. it will not work with a client-only + build). In addition to the Qt4 libraries, linking bat requires + the qwt package installed on your system. Please see the next + configure option (with-qwt) for how to build the qwt package. + + Qt4 is available on OpenSUSE 10.2, CentOS 5, Fedora, and Debian. If it + is not available on your system, you can download the {\bf depkgs-qt} + package from the Bacula Source Forge download area and build it and + the qwt package, both of which are needed to build bat. See the + INSTALL file in that package for more details. In particular to use + the Qt4 built by {\bf depkgs-qt} you {bf must} source the file + {\bf qt4-paths}. + +\item [ {-}{-}with-qwt=\lt{}path\gt{} ] + \index[general]{{-}{-}with-qwt} + To build bat, you need the qwt graphics package installed on + your system. The path specified must be an absolute path and + not relative. + + The qwt package is available for download from + the qwt project on Source Forge. If you wish, you may build and + install it on your system (by default in /usr/lib). + If you have done so, you would specify: + +\begin{verbatim} + --with-qwt=/usr/lib/qwt-5.0.2 +\end{verbatim} + + Alternatively, you can download the Bacula depkgs package (currently + version 11Jul07) and build it, then assuming that you have put it + into a directory named bacula, you would specify: + +\begin{verbatim} + --with-qwt=$HOME/bacula/depkgs/qwt +\end{verbatim} + + Some packages such as Debian do not adhere to the standard of + naming the library libqwt.a or libqwt.so, and you will either need + to manually add a soft link to the name they use or use the + depkgs version, which handles the naming correctly. + + +\item [ {-}{-}enable-batch-insert ] + \index[general]{{-}{-}enable-batch-insert} + This option enables batch inserts of the attribute records (default) in + the catalog database, which is much faster (10 times or more) than + without this option for large numbers of files. However, this option + will automatically be disabled if your SQL libraries are not + thread safe. If you find that batch mode is not enabled on your Bacula + installation, then your database most likely does not support threads. + + SQLite2 is not thread safe. Batch insert cannot be enabled when using + SQLite2 + + On most systems, MySQL, PostgreSQL and SQLite3 are thread safe. + + To verify that your PostgreSQL is thread safe, you can try this + (change the path to point to your particular installed libpq.a; + these commands were issued on FreeBSD 6.2): + +\begin{verbatim} +$ nm /usr/local/lib/libpq.a | grep PQputCopyData +00001b08 T PQputCopyData +$ nm /usr/local/lib/libpq.a | grep mutex + U pthread_mutex_lock + U pthread_mutex_unlock + U pthread_mutex_init + U pthread_mutex_lock + U pthread_mutex_unlock +\end{verbatim} + + The above example shows a libpq that contains the required function + PQputCopyData and is thread enabled (i.e. the pthread\_mutex* entries). + If you do not see PQputCopyData, your version of PostgreSQL is too old + to allow batch insert. If you do not see the mutex entries, then thread + support has not been enabled. Our tests indicate you usually need to + change the configuration options and recompile/reinstall the PostgreSQL + client software to get thread support. + + Bacula always links to the thread safe MySQL libraries. + + As a default, Bacula runs SQLite3 with {\bf PRAGMA synchronous=OFF} + because it improves performance by more than 30 times. However, it + increases the possibility of a corrupted database. If you want more + security, please modify src/version.h appropriately (it should be + obvious when you look at the file). + + Running with Batch Insert turned on is recommended because it can + significantly improve attribute insertion times. However, it does + put a significantly larger part of the work on your SQL engine, so + you may need to pay more attention to tuning it. In particular, + Batch Insert can require large temporary table space, and consequently, + the default location (often /tmp) may run out of space causing errors. + For MySQL, the location is set in my.conf with "tmpdir". You may also + want to increase the memory available to your SQL engine to further + improve performance during Batch Inserts. + +\item [ {-}{-}enable-gnome ] + \index[general]{{-}{-}enable-gnome} + If you have GNOME installed on your computer including the + GNOME development libraries, and you want to use the + GNOME GUI Console interface to Bacula, you must specify this option. + Doing so will build everything in the {\bf src/gnome2-console} directory. + +\item [ {-}{-}enable-bwx-console ] + \index[general]{{-}{-}enable-bwx-console} + If you have wxWidgets installed on your computer and you want to use the + wxWidgets GUI Console interface to Bacula, you must specify this option. + Doing so will build everything in the {\bf src/wx-console} directory. + This could also be useful to users who want a GUI Console and don't want + to install GNOME, as wxWidgets can work with GTK+, Motif or even X11 + libraries. + +\item [ {-}{-}enable-tray-monitor ] + \index[general]{{-}{-}enable-tray-monitor} + If you have GTK installed on your computer, you run a graphical + environment or a window manager compatible with the FreeDesktop system + tray standard (like KDE and GNOME) and you want to use a GUI to monitor + Bacula daemons, you must specify this option. Doing so will build + everything in the {\bf src/tray-monitor} directory. Note, due to + restrictions on what can be linked with GPLed code, we were forced to + remove the egg code that dealt with the tray icons and replace it by + calls to the GTK+ API, and unfortunately, the tray icon API necessary + was not implemented until GTK version 2.10 or later. + +\item [ {-}{-}enable-static-tools] + \index[general]{{-}{-}enable-static-tools} + This option causes the linker to link the Storage daemon utility tools + ({\bf bls}, {\bf bextract}, and {\bf bscan}) statically. This permits + using them without having the shared libraries loaded. If you have + problems linking in the {\bf src/stored} directory, make sure you have + not enabled this option, or explicitly disable static linking by adding + {\bf \verb:--:disable-static-tools}. + +\item [ {-}{-}enable-static-fd] + \index[general]{{-}{-}enable-static-fd} + This option causes the make process to build a {\bf static-bacula-fd} in + addition to the standard File daemon. This static version will include + statically linked libraries and is required for the Bare Metal recovery. + This option is largely superseded by using {\bf make static-bacula-fd} + from with in the {\bf src/filed} directory. Also, the {\bf + \verb:--:enable-client-only} option described below is useful for just + building a client so that all the other parts of the program are not + compiled. + + When linking a static binary, the linker needs the static versions + of all the libraries that are used, so frequently users will + experience linking errors when this option is used. The first + thing to do is to make sure you have the static glibc library + installed on your system. The second thing to do is the make sure + you do not specify {\bf {-}{-}openssl} or {\bf {-}{-}with-python} + on your ./configure statement as these options require additional + libraries. You may be able to enable those options, but you will + need to load additional static libraries. + + +\item [ {-}{-}enable-static-sd] + \index[general]{{-}{-}enable-static-sd} + This option causes the make process to build a {\bf static-bacula-sd} in + addition to the standard Storage daemon. This static version will + include statically linked libraries and could be useful during a Bare + Metal recovery. + + When linking a static binary, the linker needs the static versions + of all the libraries that are used, so frequently users will + experience linking errors when this option is used. The first + thing to do is to make sure you have the static glibc library + installed on your system. The second thing to do is the make sure + you do not specify {\bf {-}{-}openssl} or {\bf {-}{-}with-python} + on your ./configure statement as these options require additional + libraries. You may be able to enable those options, but you will + need to load additional static libraries. + + +\item [ {-}{-}enable-static-dir] + \index[general]{{-}{-}enable-static-dir} + This option causes the make process to build a {\bf static-bacula-dir} + in addition to the standard Director. This static version will include + statically linked libraries and could be useful during a Bare Metal + recovery. + + When linking a static binary, the linker needs the static versions + of all the libraries that are used, so frequently users will + experience linking errors when this option is used. The first + thing to do is to make sure you have the static glibc library + installed on your system. The second thing to do is the make sure + you do not specify {\bf {-}{-}openssl} or {\bf {-}{-}with-python} + on your ./configure statement as these options require additional + libraries. You may be able to enable those options, but you will + need to load additional static libraries. + + +\item [ {-}{-}enable-static-cons] + \index[general]{{-}{-}enable-static-cons} + This option causes the make process to build a {\bf static-console} and + a {\bf static-gnome-console} in addition to the standard console. This + static version will include statically linked libraries and could be + useful during a Bare Metal recovery. + + When linking a static binary, the linker needs the static versions + of all the libraries that are used, so frequently users will + experience linking errors when this option is used. The first + thing to do is to make sure you have the static glibc library + installed on your system. The second thing to do is the make sure + you do not specify {\bf {-}{-}openssl} or {\bf {-}{-}with-python} + on your ./configure statement as these options require additional + libraries. You may be able to enable those options, but you will + need to load additional static libraries. + + +\item [ {-}{-}enable-client-only] + \index[general]{{-}{-}enable-client-only} + This option causes the make process to build only the File daemon and + the libraries that it needs. None of the other daemons, storage tools, + nor the console will be built. Likewise a {\bf make install} will then + only install the File daemon. To cause all daemons to be built, you + will need to do a configuration without this option. This option + greatly facilitates building a Client on a client only machine. + + When linking a static binary, the linker needs the static versions + of all the libraries that are used, so frequently users will + experience linking errors when this option is used. The first + thing to do is to make sure you have the static glibc library + installed on your system. The second thing to do is the make sure + you do not specify {\bf {-}{-}openssl} or {\bf {-}{-}with-python} + on your ./configure statement as these options require additional + libraries. You may be able to enable those options, but you will + need to load additional static libraries. + +\item [ {-}{-}enable-build-dird] + \index[general]{{-}{-}enable-build-dird} + This option causes the make process to build the Director and the + Director's tools. By default, this option is on, but you may turn + it off by using {\bf {-}{-}disable-build-dird} to prevent the + Director from being built. + +\item [ {-}{-}enable-build-stored] + \index[general]{{-}{-}enable-build-stored} + This option causes the make process to build the Storage daemon. + By default, this option is on, but you may turn + it off by using {\bf {-}{-}disable-build-stored} to prevent the + Storage daemon from being built. + + +\item [ {-}{-}enable-largefile] + \index[general]{{-}{-}enable-largefile} + This option (default) causes Bacula to be built with 64 bit file address + support if it is available on your system. This permits Bacula to read and + write files greater than 2 GBytes in size. You may disable this feature and + revert to 32 bit file addresses by using {\bf \verb:--:disable-largefile}. + +\item [ {-}{-}disable-nls] + \index[general]{{-}{-}disable-nls} + By default, Bacula uses the GNU Native Language Support (NLS) libraries. On + some machines, these libraries may not be present or may not function + correctly (especially on non-Linux implementations). In such cases, you + may specify {\bf {-}{-}disable-nls} to disable use of those libraries. + In such a case, Bacula will revert to using English. + +\item [ {-}{-}disable-ipv6 ] + \index[general]{{-}{-}disable-ipv6} + By default, Bacula enables IPv6 protocol. On some systems, the files + for IPv6 may exist, but the functionality could be turned off in the + kernel. In that case, in order to correctly build Bacula, you will + explicitly need to use this option so that Bacula does not attempt + to reference OS function calls that do not exist. + +\item [ {-}{-}with-sqlite=\lt{}sqlite-path\gt{}] + \index[general]{{-}{-}with-sqlite} + This enables use of the SQLite version 2.8.x database. The {\bf + sqlite-path} is not normally specified as Bacula looks for the necessary + components in a standard location ({\bf depkgs/sqlite}). See + \ilink{Installing and Configuring SQLite}{SqlLiteChapter} chapter of + this manual for more details. SQLite is not supported on Solaris. + + See the note below under the {-}{-}with-postgresql item. + +\item [ {-}{-}with-sqlite3=\lt{}sqlite3-path\gt{}] + \index[general]{{-}{-}with-sqlite3} + This enables use of the SQLite version 3.x database. The {\bf + sqlite3-path} is not normally specified as Bacula looks for the + necessary components in a standard location ({\bf depkgs/sqlite3}). See + \ilink{Installing and Configuring SQLite}{SqlLiteChapter} chapter of + this manual for more details. SQLite3 is not supported on Solaris. + +\item [ {-}{-}with-mysql=\lt{}mysql-path\gt{}] + \index[general]{{-}{-}with-mysql} + This enables building of the Catalog services for Bacula. It assumes + that MySQL is running on your system, and expects it to be installed in + the {\bf mysql-path} that you specify. Normally, if MySQL is installed + in a standard system location, you can simply use {\bf {-}{-}with-mysql} + with no path specification. If you do use this option, please proceed + to installing MySQL in the \ilink{Installing and Configuring + MySQL}{MySqlChapter} chapter before proceeding with the configuration. + + See the note below under the {-}{-}with-postgresql item. + +\item [ {-}{-}with-postgresql=\lt{}path\gt{}] + \index[general]{{-}{-}with-postgresql} + This provides an explicit path to the PostgreSQL libraries if Bacula + cannot find it by default. Normally to build with PostgreSQL, you would + simply use {\bf {-}{-}with-postgresql}. + + Note, for Bacula to be configured properly, you must specify one + of the four database options supported. That is: + {-}{-}with-sqlite, {-}{-}with-sqlite3, {-}{-}with-mysql, or + {-}{-}with-postgresql, otherwise the ./configure will fail. + +\item [ {-}{-}with-openssl=\lt{}path\gt{}] + This configuration option is necessary if you want to enable TLS (ssl), + which encrypts the communications within + Bacula or if you want to use File Daemon PKI data encryption. + Normally, the {\bf path} specification is not necessary since + the configuration searches for the OpenSSL libraries in standard system + locations. Enabling OpenSSL in Bacula permits secure communications + between the daemons and/or data encryption in the File daemon. + For more information on using TLS, please see the + \ilink{Bacula TLS -- Communications Encryption}{CommEncryption} chapter + of this manual. + For more information on using PKI data encryption, please see the + \ilink{Bacula PKI -- Data Encryption}{DataEncryption} + chapter of this manual. + +\item [ {-}{-}with-python=\lt{}path\gt{}] + \index[general]{{-}{-}with-python} + This option enables Bacula support for Python. If no path is supplied, + configure will search the standard library locations for Python 2.2, + 2.3, 2.4, or 2.5. If it cannot find the library, you will need to + supply a path to your Python library directory. Please see the + \ilink{Python chapter}{PythonChapter} for the details of using Python + scripting. + +\item [ {-}{-}with-libintl-prefix=\lt{}DIR\gt{}] + \index[general]{{-}{-}with-libintl-prefix} + This option may be used to tell Bacula to search DIR/include and + DIR/lib for the libintl headers and libraries needed for Native + Language Support (NLS). + +\item [ {-}{-}enable-conio] + \index[general]{{-}{-}enable-conio} + Tells Bacula to enable building the small, light weight readline + replacement routine. It is generally much easier to configure than + readline, although, like readline, it needs either the termcap or + ncurses library. + +\item [ {-}{-}with-readline=\lt{}readline-path\gt{}] + \index[general]{{-}{-}with-readline} + Tells Bacula where {\bf readline} is installed. Normally, Bacula will + find readline if it is in a standard library. If it is not found and no + {-}{-}with-readline is specified, readline will be disabled. This + option affects the Bacula build. Readline provides the Console program + with a command line history and editing capability and is no longer + supported, so you are on your own if you have problems. + +\item [ {-}{-}enable-readline] + \index[general]{{-}{-}enable-readline} + Tells Bacula to enable readline support. It is normally disabled due to the + large number of configuration problems and the fact that the package seems to + change in incompatible ways from version to version. + +\item [ {-}{-}with-tcp-wrappers=\lt{}path\gt{}] + \index[general]{{-}{-}with-tcp-wrappers} + \index[general]{TCP Wrappers} + \index[general]{Wrappers!TCP} + \index[general]{libwrappers} + This specifies that you want TCP wrappers (man hosts\_access(5)) compiled in. + The path is optional since Bacula will normally find the libraries in the + standard locations. This option affects the Bacula build. In specifying your + restrictions in the {\bf /etc/hosts.allow} or {\bf /etc/hosts.deny} files, do + not use the {\bf twist} option (hosts\_options(5)) or the Bacula process will + be terminated. Note, when setting up your {\bf /etc/hosts.allow} + or {\bf /etc/hosts.deny}, you must identify the Bacula daemon in + question with the name you give it in your conf file rather than the + name of the executable. + + For more information on configuring and testing TCP wrappers, please see the + \ilink{Configuring and Testing TCP Wrappers}{wrappers} section + in the Security Chapter. + + On SuSE, the libwrappers libraries needed to link Bacula are + contained in the tcpd-devel package. On Red Hat, the package is named + tcp\_wrappers. + +\item [ {-}{-}with-archivedir=\lt{}path\gt{} ] + \index[general]{{-}{-}with-archivedir} + The directory used for disk-based backups. Default value is /tmp. + This parameter sets the default values in the bacula-dir.conf and bacula-sd.conf + configuration files. For example, it sets the Where directive for the + default restore job and the Archive Device directive for the FileStorage + device. + + This option is designed primarily for use in regression testing. + Most users can safely ignore this option. + +\item [ {-}{-}with-working-dir=\lt{}working-directory-path\gt{} ] + \index[general]{{-}{-}with-working-dir} + This option is mandatory and specifies a directory into which Bacula may + safely place files that will remain between Bacula executions. For example, + if the internal database is used, Bacula will keep those files in this + directory. This option is only used to modify the daemon configuration + files. You may also accomplish the same thing by directly editing them later. + The working directory is not automatically created by the install process, so + you must ensure that it exists before using Bacula for the first time. + +\item [ {-}{-}with-base-port=\lt{}port=number\gt{}] + \index[general]{{-}{-}with-base-port} + In order to run, Bacula needs three TCP/IP ports (one for the Bacula + Console, one for the Storage daemon, and one for the File daemon). The {\bf + \verb:--:with-baseport} option will automatically assign three ports beginning at + the base port address specified. You may also change the port number in the + resulting configuration files. However, you need to take care that the + numbers correspond correctly in each of the three daemon configuration + files. The default base port is 9101, which assigns ports 9101 through 9103. + These ports (9101, 9102, and 9103) have been officially assigned to Bacula by + IANA. This option is only used to modify the daemon configuration files. You + may also accomplish the same thing by directly editing them later. + +\item [ {-}{-}with-dump-email=\lt{}email-address\gt{}] + \index[general]{{-}{-}with-dump-email} + This option specifies the email address where any core dumps should be set. + This option is normally only used by developers. + +\item [ {-}{-}with-pid-dir=\lt{}PATH\gt{} ] + \index[general]{{-}{-}with-pid-dir} + This specifies where Bacula should place the process id file during + execution. The default is: {\bf /var/run}. This directory is not created by + the install process, so you must ensure that it exists before using Bacula + the first time. + +\item [ {-}{-}with-subsys-dir=\lt{}PATH\gt{}] + \index[general]{{-}{-}with-subsys-dir} + This specifies where Bacula should place the subsystem lock file during + execution. The default is {\bf /var/run/subsys}. Please make sure that you do + not specify the same directory for this directory and for the {\bf sbindir} + directory. This directory is used only within the autostart scripts. The + subsys directory is not created by the Bacula install, so you must be sure to + create it before using Bacula. + +\item [ {-}{-}with-dir-password=\lt{}Password\gt{}] + \index[general]{{-}{-}with-dir-password} + This option allows you to specify the password used to access the Director + (normally from the Console program). If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-fd-password=\lt{}Password\gt{} ] + \index[general]{{-}{-}with-fd-password} + This option allows you to specify the password used to access the File daemon + (normally called from the Director). If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-sd-password=\lt{}Password\gt{} ] + \index[general]{{-}{-}with-sd-password} + This option allows you to specify the password used to access the Storage daemon + (normally called from the Director). If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-dir-user=\lt{}User\gt{} ] + \index[general]{{-}{-}with-dir-user} + This option allows you to specify the Userid used to run the Director. The + Director must be started as root, but doesn't need to run as root, and + after doing preliminary initializations, it can "drop" to the UserId + specified on this option. + If you specify this option, you must + create the User prior to running {\bf make install}, because the + working directory owner will be set to {\bf User}. + +\item [ {-}{-}with-dir-group=\lt{}Group\gt{} ] + \index[general]{{-}{-}with-dir-group} + This option allows you to specify the GroupId used to run the Director. The + Director must be started as root, but doesn't need to run as root, and after + doing preliminary initializations, it can "drop" to the GroupId specified + on this option. + If you specify this option, you must + create the Group prior to running {\bf make install}, because the + working directory group will be set to {\bf Group}. + +\item [ {-}{-}with-sd-user=\lt{}User\gt{} ] + \index[general]{{-}{-}with-sd-user} + This option allows you to specify the Userid used to run the Storage daemon. + The Storage daemon must be started as root, but doesn't need to run as root, + and after doing preliminary initializations, it can "drop" to the UserId + specified on this option. If you use this option, you will need to take care + that the Storage daemon has access to all the devices (tape drives, ...) that + it needs. + +\item [ {-}{-}with-sd-group=\lt{}Group\gt{} ] + \index[general]{{-}{-}with-sd-group} + This option allows you to specify the GroupId used to run the Storage daemon. + The Storage daemon must be started as root, but doesn't need to run as root, + and after doing preliminary initializations, it can "drop" to the GroupId + specified on this option. + +\item [ {-}{-}with-fd-user=\lt{}User\gt{} ] + \index[general]{{-}{-}with-fd-user} + This option allows you to specify the Userid used to run the File daemon. The + File daemon must be started as root, and in most cases, it needs to run as + root, so this option is used only in very special cases, after doing + preliminary initializations, it can "drop" to the UserId specified on this + option. + +\item [ {-}{-}with-fd-group=\lt{}Group\gt{} ] + \index[general]{{-}{-}with-fd-group} + This option allows you to specify the GroupId used to run the File daemon. + The File daemon must be started as root, and in most cases, it must be run as + root, however, after doing preliminary initializations, it can "drop" to + the GroupId specified on this option. + +\item [ {-}{-}with-mon-dir-password=\lt{}Password\gt{}] + \index[general]{{-}{-}with-mon-dir-password} + This option allows you to specify the password used to access the Directory + from the monitor. If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-mon-fd-password=\lt{}Password\gt{} ] + \index[general]{{-}{-}with-mon-fd-password} + This option allows you to specify the password used to access the File daemon + from the Monitor. If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-mon-sd-password=\lt{}Password\gt{} ] + \index[general]{{-}{-}with-mon-sd-password} + This option allows you to specify the password used to access the + Storage daemon from the Monitor. If it is not specified, configure will + automatically create a random password. + +\item [ {-}{-}with-db-name=\lt{}database-name\gt{} ] + \index[general]{{-}{-}with-db-name} + This option allows you to specify the database name to be used in + the conf files. The default is bacula. + +\item [ {-}{-}with-db-user=\lt{}database-user\gt{} ] + \index[general]{{-}{-}with-db-user} + This option allows you to specify the database user name to be used in + the conf files. The default is bacula. + +\end{description} + +Note, many other options are presented when you do a {\bf ./configure +\verb:--:help}, but they are not implemented. + +\section{Recommended Options for Most Systems} +\index[general]{Systems!Recommended Options for Most} +\index[general]{Recommended Options for Most Systems} + +For most systems, we recommend starting with the following options: + +\footnotesize +\begin{verbatim} +./configure \ + --enable-smartalloc \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/working +\end{verbatim} +\normalsize + +If you want to install Bacula in an installation directory rather than run it +out of the build directory (as developers will do most of the time), you +should also include the \verb:--:sbindir and \verb:--:sysconfdir options with appropriate +paths. Neither are necessary if you do not use "make install" as is the case +for most development work. The install process will create the sbindir and +sysconfdir if they do not exist, but it will not automatically create the +pid-dir, subsys-dir, or working-dir, so you must ensure that they exist before +running Bacula for the first time. + +\section{Red Hat} +\index[general]{Red Hat} + +Using SQLite: + +\footnotesize +\begin{verbatim} + +CFLAGS="-g -Wall" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --enable-smartalloc \ + --with-sqlite=$HOME/bacula/depkgs/sqlite \ + --with-working-dir=$HOME/bacula/working \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --enable-bat \ + --with-qwt=$HOME/bacula/depkgs/qwt \ + --enable-conio +\end{verbatim} +\normalsize + +or + +\footnotesize +\begin{verbatim} + +CFLAGS="-g -Wall" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --enable-smartalloc \ + --with-mysql=$HOME/mysql \ + --with-working-dir=$HOME/bacula/working + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working + --enable-gnome \ + --enable-conio +\end{verbatim} +\normalsize + +or finally, a completely traditional Red Hat Linux install: + +\footnotesize +\begin{verbatim} +CFLAGS="-g -Wall" ./configure \ + --prefix=/usr \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc/bacula \ + --with-scriptdir=/etc/bacula \ + --enable-smartalloc \ + --enable-bat \ + --with-qwt=$HOME/bacula/depkgs/qwt \ + --with-mysql \ + --with-working-dir=/var/bacula \ + --with-pid-dir=/var/run \ + --enable-conio +\end{verbatim} +\normalsize + +Note, Bacula assumes that /var/bacula, /var/run, and /var/lock/subsys exist so +it will not automatically create them during the install process. + +\section{Solaris} +\index[general]{Solaris} + +To build Bacula from source, you will need the following installed on your +system (they are not by default): libiconv, gcc 3.3.2, stdc++, libgcc (for +stdc++ and gcc\_s libraries), make 3.8 or later. + +You will probably also need to: Add /usr/local/bin to PATH and Add +/usr/ccs/bin to PATH for ar. + +It is possible to build Bacula on Solaris with the Solaris compiler, but +we recommend using GNU C++ if possible. + +A typical configuration command might look like: + +\footnotesize +\begin{verbatim} +#!/bin/sh +CFLAGS="-g" ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --with-mysql=$HOME/mysql \ + --enable-smartalloc \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-working-dir=$HOME/bacula/working +\end{verbatim} +\normalsize + +As mentioned above, the install process will create the sbindir and sysconfdir +if they do not exist, but it will not automatically create the pid-dir, +subsys-dir, or working-dir, so you must ensure that they exist before running +Bacula for the first time. + +Note, you may need to install the following packages to build Bacula +from source: +\footnotesize +\begin{verbatim} +SUNWbinutils, +SUNWarc, +SUNWhea, +SUNWGcc, +SUNWGnutls +SUNWGnutls-devel +SUNWGmake +SUNWgccruntime +SUNWlibgcrypt +SUNWzlib +SUNWzlibs +SUNWbinutilsS +SUNWGmakeS +SUNWlibm + +export +PATH=/usr/bin::/usr/ccs/bin:/etc:/usr/openwin/bin:/usr/local/bin:/usr/sfw/bin:/opt/sfw/bin:/usr/ucb:/usr/sbin +\end{verbatim} +\normalsize + +If you have installed special software not normally in the Solaris +libraries, such as OpenSSL, or the packages shown above, then you may need +to add {\bf /usr/sfw/lib} to the library search path. Probably the +simplest way to do so is to run: + +\footnotesize +\begin{verbatim} +setenv LDFLAGS "-L/usr/sfw/lib -R/usr/sfw/lib" +\end{verbatim} +\normalsize + +Prior to running the ./configure command. + +Alternatively, you can set the LD\_LIBARY\_PATH and/or the LD\_RUN\_PATH +environment variables appropriately. + +It is also possible to use the {\bf crle} program to set the library +search path. However, this should be used with caution. + +\section{FreeBSD} +\index[general]{FreeBSD} + +Please see: +\elink{The FreeBSD Diary}{http://www.freebsddiary.org/bacula.php} for a +detailed description on how to make Bacula work on your system. In addition, +users of FreeBSD prior to 4.9-STABLE dated Mon Dec 29 15:18:01 2003 UTC who +plan to use tape devices, please see the +\ilink{Tape Testing Chapter}{FreeBSDTapes} of this manual for +{\bf important} information on how to configure your tape drive for +compatibility with Bacula. + +If you are using Bacula with MySQL, you should take care to compile MySQL with +FreeBSD native threads rather than LinuxThreads, since Bacula is normally built +with FreeBSD native threads rather than LinuxTreads. Mixing the two will +probably not work. + +\section{Win32} +\index[general]{Win32} + +To install the binary Win32 version of the File daemon please see the +\ilink{Win32 Installation Chapter}{Win32Chapter} in this document. + +\section{One File Configure Script} +\index[general]{Script!One File Configure} +\index[general]{One Files Configure Script} + +The following script could be used if you want to put everything +in a single file: + +\footnotesize +\begin{verbatim} +#!/bin/sh +CFLAGS="-g -Wall" \ + ./configure \ + --sbindir=$HOME/bacula/bin \ + --sysconfdir=$HOME/bacula/bin \ + --mandir=$HOME/bacula/bin \ + --enable-smartalloc \ + --enable-gnome \ + --enable-bat \ + --with-qwt=$HOME/bacula/depkgs/qwt \ + --enable-bwx-console \ + --enable-tray-monitor \ + --with-pid-dir=$HOME/bacula/bin/working \ + --with-subsys-dir=$HOME/bacula/bin/working \ + --with-mysql \ + --with-working-dir=$HOME/bacula/bin/working \ + --with-dump-email=$USER@your-site.com \ + --with-job-email=$USER@your-site.com \ + --with-smtp-host=mail.your-site.com +exit 0 +\end{verbatim} +\normalsize + +You may also want to put the following entries in your {\bf /etc/services} +file as it will make viewing the connections made by Bacula easier to +recognize (i.e. netstat -a): + +\footnotesize +\begin{verbatim} +bacula-dir 9101/tcp +bacula-fd 9102/tcp +bacula-sd 9103/tcp +\end{verbatim} +\normalsize + +\section{Installing Bacula} +\index[general]{Bacula!Installing} +\index[general]{Installing Bacula} + +Before setting up your configuration files, you will want to install Bacula in +its final location. Simply enter: + +\footnotesize +\begin{verbatim} +make install +\end{verbatim} +\normalsize + +If you have previously installed Bacula, the old binaries will be overwritten, +but the old configuration files will remain unchanged, and the "new" +configuration files will be appended with a {\bf .new}. Generally if you have +previously installed and run Bacula you will want to discard or ignore the +configuration files with the appended {\bf .new}. + +\section{Building a File Daemon or Client} +\index[general]{Client!Building a File Daemon or} +\index[general]{Building a File Daemon or Client} + +If you run the Director and the Storage daemon on one machine and you wish to +back up another machine, you must have a copy of the File daemon for that +machine. If the machine and the Operating System are identical, you can simply +copy the Bacula File daemon binary file {\bf bacula-fd} as well as its +configuration file {\bf bacula-fd.conf} then modify the name and password in +the conf file to be unique. Be sure to make corresponding additions to the +Director's configuration file ({\bf bacula-dir.conf}). + +If the architecture or the OS level are different, you will need to build a +File daemon on the Client machine. To do so, you can use the same {\bf +./configure} command as you did for your main program, starting either from a +fresh copy of the source tree, or using {\bf make\ distclean} before the {\bf +./configure}. + +Since the File daemon does not access the Catalog database, you can remove +the {\bf \verb:--:with-mysql} or {\bf \verb:--:with-sqlite} options, then +add {\bf \verb:--:enable-client-only}. This will compile only the +necessary libraries and the client programs and thus avoids the necessity +of installing one or another of those database programs to build the File +daemon. With the above option, you simply enter {\bf make} and just the +client will be built. + +\label{autostart} +\section{Auto Starting the Daemons} +\index[general]{Daemons!Auto Starting the} +\index[general]{Auto Starting the Daemons} + +If you wish the daemons to be automatically started and stopped when your +system is booted (a good idea), one more step is necessary. First, the +./configure process must recognize your system -- that is it must be a +supported platform and not {\bf unknown}, then you must install the platform +dependent files by doing: + +\footnotesize +\begin{verbatim} +(become root) +make install-autostart +\end{verbatim} +\normalsize + +Please note, that the auto-start feature is implemented only on systems +that we officially support (currently, FreeBSD, Red Hat/Fedora Linux, and +Solaris), and has only been fully tested on Fedora Linux. + +The {\bf make install-autostart} will cause the appropriate startup scripts +to be installed with the necessary symbolic links. On Red Hat/Fedora Linux +systems, these scripts reside in {\bf /etc/rc.d/init.d/bacula-dir} {\bf +/etc/rc.d/init.d/bacula-fd}, and {\bf /etc/rc.d/init.d/bacula-sd}. However +the exact location depends on what operating system you are using. + +If you only wish to install the File daemon, you may do so with: + +\footnotesize +\begin{verbatim} +make install-autostart-fd +\end{verbatim} +\normalsize + +\section{Other Make Notes} +\index[general]{Notes!Other Make} +\index[general]{Other Make Notes} + +To simply build a new executable in any directory, enter: + +\footnotesize +\begin{verbatim} +make +\end{verbatim} +\normalsize + +To clean out all the objects and binaries (including the files named 1, 2, or +3, which are development temporary files), enter: + +\footnotesize +\begin{verbatim} +make clean +\end{verbatim} +\normalsize + +To really clean out everything for distribution, enter: + +\footnotesize +\begin{verbatim} +make distclean +\end{verbatim} +\normalsize + +note, this cleans out the Makefiles and is normally done from the top level +directory to prepare for distribution of the source. To recover from this +state, you must redo the {\bf ./configure} in the top level directory, since +all the Makefiles will be deleted. + +To add a new file in a subdirectory, edit the Makefile.in in that directory, +then simply do a {\bf make}. In most cases, the make will rebuild the Makefile +from the new Makefile.in. In some case, you may need to issue the {\bf make} a +second time. In extreme cases, cd to the top level directory and enter: {\bf +make Makefiles}. + +To add dependencies: + +\footnotesize +\begin{verbatim} +make depend +\end{verbatim} +\normalsize + +The {\bf make depend} appends the header file dependencies for each of the +object files to Makefile and Makefile.in. This command should be done in each +directory where you change the dependencies. Normally, it only needs to be run +when you add or delete source or header files. {\bf make depend} is normally +automatically invoked during the configuration process. + +To install: + +\footnotesize +\begin{verbatim} +make install +\end{verbatim} +\normalsize + +This not normally done if you are developing Bacula, but is used if you are +going to run it to backup your system. + +After doing a {\bf make install} the following files will be installed on your +system (more or less). The exact files and location (directory) for each file +depends on your {\bf ./configure} command (e.g. bgnome-console and +bgnome-console.conf are not installed if you do not configure GNOME. Also, if +you are using SQLite instead of MySQL, some of the files will be different). + +NOTE: it is quite probable that this list is out of date. But it is a +starting point. + +\footnotesize +\begin{verbatim} +bacula +bacula-dir +bacula-dir.conf +bacula-fd +bacula-fd.conf +bacula-sd +bacula-sd.conf +bacula-tray-monitor +tray-monitor.conf +bextract +bls +bscan +btape +btraceback +btraceback.gdb +bconsole +bconsole.conf +create_mysql_database +dbcheck +delete_catalog_backup +drop_bacula_tables +drop_mysql_tables +bgnome-console +bgnome-console.conf +make_bacula_tables +make_catalog_backup +make_mysql_tables +mtx-changer +query.sql +bsmtp +startmysql +stopmysql +bwx-console +bwx-console.conf +9 man pages +\end{verbatim} +\normalsize + +\label{monitor} + +\section{Installing Tray Monitor} +\index[general]{Monitor!Installing Tray} +\index[general]{Installing Tray Monitor} + +The Tray Monitor is already installed if you used the {\bf +\verb:--:enable-tray-monitor} configure option and ran {\bf make install}. + +As you don't run your graphical environment as root (if you do, you should +change that bad habit), don't forget to allow your user to read {\bf +tray-monitor.conf}, and to execute {\bf bacula-tray-monitor} (this is not a +security issue). + +Then log into your graphical environment (KDE, GNOME or something else), run +{\bf bacula-tray-monitor} as your user, and see if a cassette icon appears +somewhere on the screen, usually on the task bar. +If it doesn't, follow the instructions below related to your environment or +window manager. + +\subsection{GNOME} +\index[general]{GNOME} + +System tray, or notification area if you use the GNOME terminology, has been +supported in GNOME since version 2.2. To activate it, right-click on one of +your panels, open the menu {\bf Add to this Panel}, then {\bf Utility} and +finally click on {\bf Notification Area}. + +\subsection{KDE} +\index[general]{KDE} + +System tray has been supported in KDE since version 3.1. To activate it, +right-click on one of your panels, open the menu {\bf Add}, then {\bf Applet} +and finally click on {\bf System Tray}. + +\subsection{Other window managers} +\index[general]{Managers!Other window} +\index[general]{Other window managers} + +Read the documentation to know if the Freedesktop system tray standard is +supported by your window manager, and if applicable, how to activate it. + +\section{Modifying the Bacula Configuration Files} +\index[general]{Modifying the Bacula Configuration Files} +\index[general]{Files!Modifying the Bacula Configuration} + +See the chapter +\ilink{Configuring Bacula}{ConfigureChapter} in this manual for +instructions on how to set Bacula configuration files. diff --git a/docs/manuals/en/install/latex2html-init.pl b/docs/manuals/en/install/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/install/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/install/messagesres.tex b/docs/manuals/en/install/messagesres.tex new file mode 100644 index 00000000..e6002d9d --- /dev/null +++ b/docs/manuals/en/install/messagesres.tex @@ -0,0 +1,372 @@ +%% +%% + +\chapter{Messages Resource} +\label{MessagesChapter} +\index[general]{Resource!Messages} +\index[general]{Messages Resource} + +The Messages resource defines how messages are to be handled and destinations +to which they should be sent. + +Even though each daemon has a full message handler, within the File daemon and +the Storage daemon, you will normally choose to send all the appropriate +messages back to the Director. This permits all the messages associated with a +single Job to be combined in the Director and sent as a single email message +to the user, or logged together in a single file. + +Each message that Bacula generates (i.e. that each daemon generates) has an +associated type such as INFO, WARNING, ERROR, FATAL, etc. Using the message +resource, you can specify which message types you wish to see and where they +should be sent. In addition, a message may be sent to multiple destinations. +For example, you may want all error messages both logged as well as sent to +you in an email. By defining multiple messages resources, you can have +different message handling for each type of Job (e.g. Full backups versus +Incremental backups). + +In general, messages are attached to a Job and are included in the Job report. +There are some rare cases, where this is not possible, e.g. when no job is +running, or if a communications error occurs between a daemon and the +director. In those cases, the message may remain in the system, and should be +flushed at the end of the next Job. However, since such messages are not +attached to a Job, any that are mailed will be sent to {\bf +/usr/lib/sendmail}. On some systems, such as FreeBSD, if your sendmail is in a +different place, you may want to link it to the the above location. + +The records contained in a Messages resource consist of a {\bf destination} +specification followed by a list of {\bf message-types} in the format: + +\begin{description} + +\item [destination = message-type1, message-type2, message-type3, ... ] +\index[dir]{destination} +\end{description} + +or for those destinations that need and address specification (e.g. email): + +\begin{description} + +\item [destination = address = message-type1, message-type2, + message-type3, ... ] +\index[dir]{destination} + + Where {\bf destination} is one of a predefined set of keywords that define + where the message is to be sent ({\bf stdout}, {\bf file}, ...), {\bf + message-type} is one of a predefined set of keywords that define the type of + message generated by {\bf Bacula} ({\bf ERROR}, {\bf WARNING}, {\bf FATAL}, + ...), and {\bf address} varies according to the {\bf destination} keyword, but + is typically an email address or a filename. +\end{description} + +The following are the list of the possible record definitions that can be used +in a message resource. + +\begin{description} + +\item [Messages] +\index[dir]{Messages} + Start of the Messages records. + +\item [Name = \lt{}name\gt{}] +\index[dir]{Name} + The name of the Messages resource. The name you specify here will be used to + tie this Messages resource to a Job and/or to the daemon. + +\label{mailcommand} +\item [MailCommand = \lt{}command\gt{}] +\index[dir]{MailCommand} + In the absence of this resource, Bacula will send all mail using the + following command: + +{\bf mail -s "Bacula Message" \lt{}recipients\gt{}} + +In many cases, depending on your machine, this command may not work. Using +the {\bf MailCommand}, you can specify exactly how to send the mail. During +the processing of the {\bf command}, normally specified as a quoted string, +the following substitutions will be used: + +\begin{itemize} +\item \%\% = \% +\item \%c = Client's name +\item \%d = Director's name +\item \%e = Job Exit code (OK, Error, ...) +\item \%i = Job Id +\item \%j = Unique Job name +\item \%l = Job level +\item \%n = Job name +\item \%r = Recipients +\item \%t = Job type (e.g. Backup, ...) + \end{itemize} + +The following is the command I (Kern) use. Note, the whole command should +appear on a single line in the configuration file rather than split as is +done here for presentation: + +{\bf mailcommand = "/home/kern/bacula/bin/bsmtp -h mail.example.com -f +\textbackslash{}"\textbackslash{}(Bacula\textbackslash{}) +\%r\textbackslash{}" -s \textbackslash{}"Bacula: \%t \%e of \%c +\%l\textbackslash{}" \%r"} + +Note, the {\bf bsmtp} program is provided as part of {\bf Bacula}. For +additional details, please see the +\ilink{ bsmtp -- Customizing Your Email Messages}{bsmtp} section of +the Bacula Utility Programs chapter of this manual. Please test any {\bf +mailcommand} that you use to ensure that your bsmtp gateway accepts the +addressing form that you use. Certain programs such as Exim can be very +selective as to what forms are permitted particularly in the from part. + +\item [OperatorCommand = \lt{}command\gt{}] +\index[fd]{OperatorCommand} + This resource specification is similar to the {\bf MailCommand} except that + it is used for Operator messages. The substitutions performed for the {\bf + MailCommand} are also done for this command. Normally, you will set this + command to the same value as specified for the {\bf MailCommand}. + +\item [\lt{}destination\gt{} = \lt{}message-type1\gt{}, + \lt{}message-type2\gt{}, ...] + \index[fd]{\lt{}destination\gt{}} + +Where {\bf destination} may be one of the following: + +\begin{description} + +\item [stdout] + \index[fd]{stdout} + Send the message to standard output. + +\item [stderr] + \index[fd]{stderr} + Send the message to standard error. + +\item [console] + \index[console]{console} + Send the message to the console (Bacula Console). These messages are held +until the console program connects to the Director. +\end{description} + +\item {\bf \lt{}destination\gt{} = \lt{}address\gt{} = + \lt{}message-type1\gt{}, \lt{}message-type2\gt{}, ...} + \index[console]{\lt{}destination\gt{}} + +Where {\bf address} depends on the {\bf destination}. + +The {\bf destination} may be one of the following: + +\begin{description} + +\item [director] + \index[dir]{director} + \index[general]{director} + Send the message to the Director whose name is given in the {\bf address} + field. Note, in the current implementation, the Director Name is ignored, and + the message is sent to the Director that started the Job. + +\item [file] +\index[dir]{file} +\index[general]{file} + Send the message to the filename given in the {\bf address} field. If the + file already exists, it will be overwritten. + +\item [append] +\index[dir]{append} +\index[general]{append} + Append the message to the filename given in the {\bf address} field. If the + file already exists, it will be appended to. If the file does not exist, it + will be created. + +\item [syslog] +\index[general]{syslog} + Send the message to the system log (syslog) using the facility specified in + the {\bf address} field. Note, for the moment, the {\bf address} field is + ignored and the message is always sent to the LOG\_DAEMON facility with + level LOG\_ERR. See {\bf man 3 syslog} for more details. Example: +\begin{verbatim} + syslog = all, !skipped +\end{verbatim} + +\item [mail] + \index[general]{mail} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field. Mail messages are grouped + together during a job and then sent as a single email message when the + job terminates. The advantage of this destination is that you are + notified about every Job that runs. However, if you backup five or ten + machines every night, the volume of email messages can be important. + Some users use filter programs such as {\bf procmail} to automatically + file this email based on the Job termination code (see {\bf + mailcommand}). + +\item [mail on error] + \index[general]{mail on error} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field if the Job terminates with an + error condition. MailOnError messages are grouped together during a job + and then sent as a single email message when the job terminates. This + destination differs from the {\bf mail} destination in that if the Job + terminates normally, the message is totally discarded (for this + destination). If the Job terminates in error, it is emailed. By using + other destinations such as {\bf append} you can ensure that even if the + Job terminates normally, the output information is saved. + +\item [mail on success] + \index[general]{mail on success} + Send the message to the email addresses that are given as a comma + separated list in the {\bf address} field if the Job terminates + normally (no error condition). MailOnSuccess messages are grouped + together during a job and then sent as a single email message when the + job terminates. This destination differs from the {\bf mail} + destination in that if the Job terminates abnormally, the message is + totally discarded (for this destination). If the Job terminates in + normally, it is emailed. + +\item [operator] + \index[general]{operator} + Send the message to the email addresses that are specified as a comma + separated list in the {\bf address} field. This is similar to {\bf + mail} above, except that each message is sent as received. Thus there + is one email per message. This is most useful for {\bf mount} messages + (see below). + +\item [console] + \index[general]{console} + Send the message to the Bacula console. + +\item [stdout] + \index[general]{stdout} + Send the message to the standard output (normally not used). + +\item [stderr] + \index[general]{stderr} + Send the message to the standard error output (normally not used). + +\item [catalog] + \index[general]{catalog} + Send the message to the Catalog database. The message will be + written to the table named {\bf Log} and a timestamp field will + also be added. This permits Job Reports and other messages to + be recorded in the Catalog so that they can be accessed by + reporting software. Bacula will prune the Log records associated + with a Job when the Job records are pruned. Otherwise, Bacula + never uses these records internally, so this destination is only + used for special purpose programs (e.g. {\bf bweb}). + +\end{description} + + For any destination, the {\bf message-type} field is a comma separated + list of the following types or classes of messages: + +\begin{description} + +\item [info] + \index[general]{info} + General information messages. + +\item [warning] + \index[general]{warning} + Warning messages. Generally this is some unusual condition but not expected + to be serious. + +\item [error] + \index[general]{error} + Non-fatal error messages. The job continues running. Any error message should + be investigated as it means that something went wrong. + +\item [fatal] + \index[general]{fatal} + Fatal error messages. Fatal errors cause the job to terminate. + +\item [terminate] + \index[general]{terminate} + Message generated when the daemon shuts down. + +\item [notsaved] + \index[fd]{notsaved} + \index[general]{notsaved} + Files not saved because of some error. Usually because the file cannot be + accessed (i.e. it does not exist or is not mounted). + +\item [skipped] + \index[fd]{skipped} + \index[general]{skipped} + Files that were skipped because of a user supplied option such as an + incremental backup or a file that matches an exclusion pattern. This is + not considered an error condition such as the files listed for the {\bf + notsaved} type because the configuration file explicitly requests these + types of files to be skipped. For example, any unchanged file during an + incremental backup, or any subdirectory if the no recursion option is + specified. + +\item [mount] + \index[dir]{mount} + \index[general]{mount} + Volume mount or intervention requests from the Storage daemon. These + requests require a specific operator intervention for the job to + continue. + +\item [restored] + \index[fd]{restored} + \index[general]{restored} + The {\bf ls} style listing generated for each file restored is sent to + this message class. + +\item [all] + \index[general]{all} + All message types. + +\item [security] + \index[general]{security} + Security info/warning messages principally from unauthorized + connection attempts. + +\item [alert] + \index[general]{alert} + Alert messages. These are messages generated by tape alerts. + +\item [volmgmt] + \index[general]{volmgmt} + Volume management messages. Currently there are no volume mangement + messages generated. +\end{description} + +\end{description} + +The following is an example of a valid Messages resource definition, where +all messages except files explicitly skipped or daemon termination messages +are sent by email to enforcement@sec.com. In addition all mount messages +are sent to the operator (i.e. emailed to enforcement@sec.com). Finally +all messages other than explicitly skipped files and files saved are sent +to the console: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mail = enforcement@sec.com = all, !skipped, !terminate + operator = enforcement@sec.com = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize + +With the exception of the email address (changed to avoid junk mail from +robot's), an example Director's Messages resource is as follows. Note, the {\bf +mailcommand} and {\bf operatorcommand} are on a single line -- they had to be +split for this manual: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mailcommand = "bacula/bin/bsmtp -h mail.example.com \ + -f \"\(Bacula\) %r\" -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "bacula/bin/bsmtp -h mail.example.com \ + -f \"\(Bacula\) %r\" -s \"Bacula: Intervention needed \ + for %j\" %r" + MailOnError = security@example.com = all, !skipped, \ + !terminate + append = "bacula/bin/log" = all, !skipped, !terminate + operator = security@example.com = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/install/monitorconf.tex b/docs/manuals/en/install/monitorconf.tex new file mode 100644 index 00000000..20c70b9d --- /dev/null +++ b/docs/manuals/en/install/monitorconf.tex @@ -0,0 +1,341 @@ +%% +%% + +\chapter{Monitor Configuration} +\label{_MonitorChapter} +\index[general]{Monitor Configuration } +\index[general]{Configuration!Monitor } + +The Monitor configuration file is a stripped down version of the Director +configuration file, mixed with a Console configuration file. It simply +contains the information necessary to contact Directors, Clients, and Storage +daemons you want to monitor. + +For a general discussion of configuration file and resources including the +data types recognized by {\bf Bacula}, please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. + +The following Monitor Resource definition must be defined: + +\begin{itemize} +\item + \ilink{Monitor}{MonitorResource} -- to define the Monitor's + name used to connect to all the daemons and the password used to connect to +the Directors. Note, you must not define more than one Monitor resource in +the Monitor configuration file. +\item At least one + \ilink{Client}{ClientResource1}, + \ilink{Storage}{StorageResource1} or +\ilink{Director}{DirectorResource2} resource, to define the +daemons to monitor. +\end{itemize} + +\section{The Monitor Resource} +\label{MonitorResource} +\index[general]{Monitor Resource } +\index[general]{Resource!Monitor } + +The Monitor resource defines the attributes of the Monitor running on the +network. The parameters you define here must be configured as a Director +resource in Clients and Storages configuration files, and as a Console +resource in Directors configuration files. + +\begin{description} + +\item [Monitor] + \index[fd]{Monitor } + Start of the Monitor records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + Specify the Director name used to connect to Client and Storage, and the +Console name used to connect to Director. This record is required. + +\item [Password = \lt{}password\gt{}] + \index[fd]{Password } + Where the password is the password needed for Directors to accept the Console +connection. This password must be identical to the {\bf Password} specified +in the {\bf Console} resource of the +\ilink{Director's configuration}{DirectorChapter} file. This +record is required if you wish to monitor Directors. + +\item [Refresh Interval = \lt{}time\gt{}] + \index[fd]{Refresh Interval } + Specifies the time to wait between status requests to each daemon. It can't +be set to less than 1 second, or more than 10 minutes, and the default value +is 5 seconds. +% TODO: what is format of the time? +% TODO: should the digits in this definition be spelled out? should +% TODO: this say "time-period-specification" above??) +\end{description} + +\section{The Director Resource} +\label{DirectorResource2} +\index[general]{Director Resource } +\index[general]{Resource!Director } + +The Director resource defines the attributes of the Directors that are +monitored by this Monitor. + +As you are not permitted to define a Password in this resource, to avoid +obtaining full Director privileges, you must create a Console resource in the +\ilink{Director's configuration}{DirectorChapter} file, using the +Console Name and Password defined in the Monitor resource. To avoid security +problems, you should configure this Console resource to allow access to no +other daemons, and permit the use of only two commands: {\bf status} and {\bf +.status} (see below for an example). + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Director] + \index[fd]{Director } + Start of the Director records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Director name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Director's +configuration file. This record is required. + +\item [DIRPort = \lt{}port-number\gt{}] + \index[fd]{DIRPort } + Specify the port to use to connect to the Director. This value will most +likely already be set to the value you specified on the {\bf +\verb:--:with-base-port} option of the {\bf ./configure} command. This port must be +identical to the {\bf DIRport} specified in the {\bf Director} resource of +the +\ilink{Director's configuration}{DirectorChapter} file. The +default is 9101 so this record is not normally specified. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address used to connect to the Director. This record is required. +\end{description} + +\section{The Client Resource} +\label{ClientResource1} +\index[general]{Resource!Client } +\index[general]{Client Resource } + +The Client resource defines the attributes of the Clients that are monitored +by this Monitor. + +You must create a Director resource in the +\ilink{Client's configuration}{FiledConfChapter} file, using the +Director Name defined in the Monitor resource. To avoid security problems, you +should set the {\bf Monitor} directive to {\bf Yes} in this Director resource. + + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Client (or FileDaemon)] + \index[fd]{Client (or FileDaemon) } + Start of the Client records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Client name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Client's +configuration file. This record is required. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address in dotted quad notation for a Bacula File daemon. This record is +required. + +\item [FD Port = \lt{}port-number\gt{}] + \index[fd]{FD Port } + Where the port is a port number at which the Bacula File daemon can be +contacted. The default is 9102. + +\item [Password = \lt{}password\gt{}] + \index[fd]{Password } + This is the password to be used when establishing a connection with the File +services, so the Client configuration file on the machine to be backed up +must have the same password defined for this Director. This record is +required. +\end{description} + +\section{The Storage Resource} +\label{StorageResource1} +\index[general]{Resource!Storage } +\index[general]{Storage Resource } + +The Storage resource defines the attributes of the Storages that are monitored +by this Monitor. + +You must create a Director resource in the +\ilink{Storage's configuration}{StoredConfChapter} file, using the +Director Name defined in the Monitor resource. To avoid security problems, you +should set the {\bf Monitor} directive to {\bf Yes} in this Director resource. + + +You may have multiple Director resource specifications in a single Monitor +configuration file. + +\begin{description} + +\item [Storage] + \index[fd]{Storage } + Start of the Storage records. + +\item [Name = \lt{}name\gt{}] + \index[fd]{Name } + The Storage name used to identify the Director in the list of monitored +daemons. It is not required to be the same as the one defined in the Storage's +configuration file. This record is required. + +\item [Address = \lt{}address\gt{}] + \index[fd]{Address } + Where the address is a host name, a fully qualified domain name, or a network +address in dotted quad notation for a Bacula Storage daemon. This record is +required. + +\item [SD Port = \lt{}port\gt{}] + \index[fd]{SD Port } + Where port is the port to use to contact the storage daemon for information +and to start jobs. This same port number must appear in the Storage resource +of the Storage daemon's configuration file. The default is 9103. + +\item [Password = \lt{}password\gt{}] + \index[sd]{Password } + This is the password to be used when establishing a connection with the +Storage services. This same password also must appear in the Director +resource of the Storage daemon's configuration file. This record is required. + +\end{description} + +\section{Tray Monitor Security} +\index[general]{Tray Monitor Security} + +There is no security problem in relaxing the permissions on +tray-monitor.conf as long as FD, SD and DIR are configured properly, so +the passwords contained in this file only gives access to the status of +the daemons. It could be a security problem if you consider the status +information as potentially dangerous (I don't think it is the case). + +Concerning Director's configuration: \\ +In tray-monitor.conf, the password in the Monitor resource must point to +a restricted console in bacula-dir.conf (see the documentation). So, if +you use this password with bconsole, you'll only have access to the +status of the director (commands status and .status). +It could be a security problem if there is a bug in the ACL code of the +director. + +Concerning File and Storage Daemons' configuration:\\ +In tray-monitor.conf, the Name in the Monitor resource must point to a +Director resource in bacula-fd/sd.conf, with the Monitor directive set +to Yes (once again, see the documentation). +It could be a security problem if there is a bug in the code which check +if a command is valid for a Monitor (this is very unlikely as the code +is pretty simple). + + +\section{Sample Tray Monitor configuration} +\label{SampleConfiguration1} +\index[general]{Sample Tray Monitor configuration} + +An example Tray Monitor configuration file might be the following: + +\footnotesize +\begin{verbatim} +# +# Bacula Tray Monitor Configuration File +# +Monitor { + Name = rufus-mon # password for Directors + Password = "GN0uRo7PTUmlMbqrJ2Gr1p0fk0HQJTxwnFyE4WSST3MWZseR" + RefreshInterval = 10 seconds +} + +Client { + Name = rufus-fd + Address = rufus + FDPort = 9102 # password for FileDaemon + Password = "FYpq4yyI1y562EMS35bA0J0QC0M2L3t5cZObxT3XQxgxppTn" +} +Storage { + Name = rufus-sd + Address = rufus + SDPort = 9103 # password for StorageDaemon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" +} +Director { + Name = rufus-dir + DIRport = 9101 + address = rufus +} +\end{verbatim} +\normalsize + +\subsection{Sample File daemon's Director record.} +\index[general]{Sample File daemon's Director record. } +\index[general]{Record!Sample File daemon's Director } + +Click +\ilink{here to see the full example.}{SampleClientConfiguration} + + +\footnotesize +\begin{verbatim} +# +# Restricted Director, used by tray-monitor to get the +# status of the file daemon +# +Director { + Name = rufus-mon + Password = "FYpq4yyI1y562EMS35bA0J0QC0M2L3t5cZObxT3XQxgxppTn" + Monitor = yes +} +\end{verbatim} +\normalsize + +\subsection{Sample Storage daemon's Director record.} +\index[general]{Record!Sample Storage daemon's Director } +\index[general]{Sample Storage daemon's Director record. } + +Click +\ilink{here to see the full example.}{SampleConfiguration} + +\footnotesize +\begin{verbatim} +# +# Restricted Director, used by tray-monitor to get the +# status of the storage daemon +# +Director { + Name = rufus-mon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" + Monitor = yes +} +\end{verbatim} +\normalsize + +\subsection{Sample Director's Console record.} +\index[general]{Record!Sample Director's Console } +\index[general]{Sample Director's Console record. } + +Click +\ilink{here to see the full +example.}{SampleDirectorConfiguration} + +\footnotesize +\begin{verbatim} +# +# Restricted console used by tray-monitor to get the status of the director +# +Console { + Name = Monitor + Password = "GN0uRo7PTUmlMbqrJ2Gr1p0fk0HQJTxwnFyE4WSST3MWZseR" + CommandACL = status, .status +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/install/quickstart.tex b/docs/manuals/en/install/quickstart.tex new file mode 100644 index 00000000..153a17d7 --- /dev/null +++ b/docs/manuals/en/install/quickstart.tex @@ -0,0 +1,389 @@ +%% +%% + +\chapter{Getting Started with Bacula} +\label{QuickStartChapter} +\index[general]{Getting Started with Bacula } + +If you are like me, you want to get Bacula running immediately to get a feel +for it, then later you want to go back and read about all the details. This +chapter attempts to accomplish just that: get you going quickly without all +the details. If you want to skip the section on Pools, Volumes and Labels, you +can always come back to it, but please read to the end of this chapter, and in +particular follow the instructions for testing your tape drive. + +We assume that you have managed to build and install Bacula, if not, you might +want to first look at the +\ilink{System Requirements}{SysReqs} then at the +\ilink{Compiling and Installing Bacula}{InstallChapter} chapter of +this manual. + +\label{JobsandSchedules} +\section{Understanding Jobs and Schedules} +\index[general]{Jobs!Understanding} +\index[general]{Schedules!Understanding} + +In order to make Bacula as flexible as possible, the directions given +to Bacula are specified in several pieces. The main instruction is the +job resource, which defines a job. A backup job generally consists of a +FileSet, a Client, a Schedule for one or several levels or times of backups, +a Pool, as well as additional instructions. Another way of looking +at it is the FileSet is what to backup; the Client is who to backup; the +Schedule defines when, and the Pool defines where (i.e. what Volume). + +Typically one FileSet/Client combination will have one corresponding job. +Most of the directives, such as FileSets, Pools, Schedules, can be mixed +and matched among the jobs. So you might have two different Job +definitions (resources) backing up different servers using the same +Schedule, the same Fileset (backing up the same directories on two machines) +and maybe even the same Pools. The Schedule will define what type of +backup will run when (e.g. Full on Monday, incremental the rest of the +week), and when more than one job uses the same schedule, the job priority +determines which actually runs first. If you have a lot of jobs, you might +want to use JobDefs, where you can set defaults for the jobs, which can +then be changed in the job resource, but this saves rewriting the +identical parameters for each job. In addition to the FileSets you want to +back up, you should also have a job that backs up your catalog. + +Finally, be aware that in addition to the backup jobs there are +restore, verify, and admin jobs, which have different requirements. + +\label{PoolsVolsLabels} +\section{Understanding Pools, Volumes and Labels} +\index[general]{Labels!Understanding Pools Volumes and } +\index[general]{Understanding Pools, Volumes and Labels } + +If you have been using a program such as {\bf tar} to backup your system, +Pools, Volumes, and labeling may be a bit confusing at first. A Volume is a +single physical tape (or possibly a single file) on which Bacula will write +your backup data. Pools group together Volumes so that a backup is not +restricted to the length of a single Volume (tape). Consequently, rather than +explicitly naming Volumes in your Job, you specify a Pool, and Bacula will +select the next appendable Volume from the Pool and request you to mount it. +% TODO: can't it mount it itself if already available? + +Although the basic Pool options are specified in the Director's Pool resource, +the {\bf real} Pool is maintained in the Bacula Catalog. It contains +information taken from the Pool resource (bacula-dir.conf) as well as +information on all the Volumes that have been added to the Pool. Adding +Volumes to a Pool is usually done manually with the Console program using the +{\bf label} command. + +For each Volume, Bacula maintains a fair amount of catalog information such as +the first write date/time, the last write date/time, the number of files on +the Volume, the number of bytes on the Volume, the number of Mounts, etc. + +Before Bacula will read or write a Volume, the physical Volume must have a +Bacula software label so that Bacula can be sure the correct Volume is +mounted. This is usually done using the {\bf label} command in the Console +program. + +The steps for creating a Pool, adding Volumes to it, and writing software +labels to the Volumes, may seem tedious at first, but in fact, they are quite +simple to do, and they allow you to use multiple Volumes (rather than being +limited to the size of a single tape). Pools also give you significant +flexibility in your backup process. For example, you can have a "Daily" Pool +of Volumes for Incremental backups and a "Weekly" Pool of Volumes for Full +backups. By specifying the appropriate Pool in the daily and weekly backup +Jobs, you thereby insure that no daily Job ever writes to a Volume in the +Weekly Pool and vice versa, and Bacula will tell you what tape is needed and +when. + +For more on Pools, see the +\ilink{Pool Resource}{PoolResource} section of the Director +Configuration chapter, or simply read on, and we will come back to this +subject later. + +\section{Setting Up Bacula Configuration Files} +\label{config} +\index[general]{Setting Up Bacula Configuration Files } +\index[general]{Files!Setting Up Bacula Configuration } + +% TODO: this assumes installation from source: +After running the appropriate {\bf ./configure} command and doing +a {\bf make}, and a {\bf make install}, if this is the first time +you are running Bacula, you must create valid configuration files +for the Director, the File daemon, the Storage daemon, and the +Console programs. If you have followed our recommendations, +default configuration files as well as the daemon binaries will +be located in your installation directory. In any case, the +binaries are found in the directory you specified on the {\bf +\verb:--:sbindir} option to the {\bf ./configure} command, and +the configuration files are found in the directory you specified +on the {\bf \verb:--:sysconfdir} option. + +When initially setting up Bacula you will need to invest a bit of time in +modifying the default configuration files to suit your environment. This may +entail starting and stopping Bacula a number of times until you get everything +right. Please do not despair. Once you have created your configuration files, +you will rarely need to change them nor will you stop and start Bacula very +often. Most of the work will simply be in changing the tape when it is full. + +\subsection{Configuring the Console Program} +\index[general]{Configuring the Console Program } +\index[general]{Program!Configuring the Console } + +The Console program is used by the administrator to interact with the Director +and to manually start/stop Jobs or to obtain Job status information. + +The Console configuration file is found in the directory specified on the +{\bf \verb:--:sysconfdir} option that you specified on the {\bf +./configure} command and by default is named {\bf bconsole.conf}. + +If you choose to build the GNOME console with the {\bf +\verb:--:enable-gnome} option, you also find a default configuration file +for it, named {\bf bgnome-console.conf}. + +The same applies to the wxWidgets console, which is build with the {\bf +\verb:--:enable-bwx-console} option, and the name of the default +configuration file is, in this case, {\bf bwx-console.conf}. + +Normally, for first time users, no change is needed to these files. Reasonable +defaults are set. + +Further details are in the +\ilink{Console configuration}{ConsoleConfChapter} chapter. + +\subsection{Configuring the Monitor Program} +\index[general]{Program!Configuring the Monitor } +\index[general]{Configuring the Monitor Program } + +The Monitor program is typically an icon in the system tray. However, once the +icon is expanded into a full window, the administrator or user can obtain +status information about the Director or the backup status on the local +workstation or any other Bacula daemon that is configured. + +\addcontentsline{lof}{figure}{Bacula Tray Monitor} +\includegraphics{./Bacula-tray-monitor.eps} + +% TODO: image may be too wide for 6" wide printed page. +The image shows a tray-monitor configured for three daemons. By clicking on +the radio buttons in the upper left corner of the image, you can see the +status for each of the daemons. The image shows the status for the Storage +daemon (MainSD) that is currently selected. + +The Monitor configuration file is found in the directory specified on the {\bf +\verb:--:sysconfdir} option that you specified on the {\bf ./configure} command +and +by default is named {\bf tray-monitor.conf}. Normally, for first time users, +you just need to change the permission of this file to allow non-root users to +run the Monitor, as this application must run as the same user as the +graphical environment (don't forget to allow non-root users to execute {\bf +bacula-tray-monitor}). This is not a security problem as long as you use the +default settings. + +More information is in the +\ilink{Monitor configuration}{_MonitorChapter} chapter. + +\subsection{Configuring the File daemon} +\index[general]{Daemon!Configuring the File } +\index[general]{Configuring the File daemon } + +The File daemon is a program that runs on each (Client) machine. At the +request of the Director, finds the files to be backed up and sends them (their +data) to the Storage daemon. + +The File daemon configuration file is found in the directory specified on +the {\bf \verb:--:sysconfdir} option that you specified on the {\bf ./configure} +command. By default, the File daemon's configuration file is named {\bf +bacula-fd.conf}. Normally, for first time users, no change is needed to this +file. Reasonable defaults are set. However, if you are going to back up more +than one machine, you will need to install the File daemon with a unique +configuration file on each machine to be backed up. The information about each +File daemon must appear in the Director's configuration file. +% TODO: point to section about how to install just the File daemon +% TODO: and creating the unique configuration file. + +Further details are in the +\ilink{File daemon configuration}{FiledConfChapter} chapter. + +\subsection{Configuring the Director} +\index[general]{Director!Configuring the } +\index[general]{Configuring the Director } + +The Director is the central control program for all the other daemons. It +schedules and monitors all jobs to be backed up. + +The Director configuration file is found in the directory specified on the +{\bf \verb:--:sysconfdir} option that you specified on the {\bf ./configure} +command. Normally the Director's configuration file is named {\bf bacula-dir.conf}. + +In general, the only change you must make is modify the FileSet resource so +that the {\bf Include} configuration directive contains at least one line with +a valid name of a directory (or file) to be saved. + +% TODO: is DLT still the default config? +If you do not have a DLT tape drive, you will probably want to edit the +Storage resource to contain names that are more representative of your actual +storage device. You can always use the existing names as you are free to +arbitrarily assign them, but they must agree with the corresponding names in +the Storage daemon's configuration file. + +You may also want to change the email address for notification from the +default {\bf root} to your email address. + +Finally, if you have multiple systems to be backed up, you will need a +separate File daemon or Client specification for each system, specifying its +% TODO: I don't see any example "File" configuraton in the default +% TODO: bacula-dir.conf; I do see FileDaemon config in the default +% TODO: bacula-fd.conf. Be more clear about this or point to explanation +% TODO: about this. +name, address, and password. We have found that giving your daemons the same +% TODO: what passwords should I use? I have different ones in the +% TODO: different configs on different systems. Point to explanation of +% this. +name as your system but post fixed with {\bf -fd} helps a lot in debugging. +That is, if your system name is {\bf foobaz}, you would give the File daemon +the name {\bf foobaz-fd}. For the Director, you should use {\bf foobaz-dir}, +and for the storage daemon, you might use {\bf foobaz-sd}. +Each of your Bacula components {\bf must} have a unique name. If you +make them all the same, aside from the fact that you will not +know what daemon is sending what message, if they share the same +working directory, the daemons temporary file names will not +be unique, and you will get many strange failures. +% TODO: why not check for that and not allow sharing working directory? + +More information is in the +\ilink{Director configuration}{DirectorChapter} chapter. + +\subsection{Configuring the Storage daemon} +\index[general]{Daemon!Configuring the Storage } +\index[general]{Configuring the Storage daemon } + +The Storage daemon is responsible, at the Director's request, for accepting +data from a File daemon and placing it on Storage media, or in the case of a +restore request, to find the data and send it to the File daemon. + +The Storage daemon's configuration file is found in the directory specified on +the {\bf \verb:--:sysconfdir} option that you specified on the {\bf ./configure} +command. By default, the Storage daemon's file is named {\bf bacula-sd.conf}. +Edit this file to contain the correct Archive device names for any tape +devices that you have. If the configuration process properly detected your +system, they will already be correctly set. These Storage resource name and +Media Type must be the same as the corresponding ones in the Director's +configuration file {\bf bacula-dir.conf}. If you want to backup to a file +instead of a tape, the Archive device must point to a directory in which the +Volumes will be created as files when you label the Volume. +\label{ConfigTesting} + +Further information is in the +\ilink{Storage daemon configuration}{StoredConfChapter} chapter. + +\section{Testing your Configuration Files} +\index[general]{Testing your Configuration Files } +\index[general]{Files!Testing your Configuration } + +You can test if your configuration file is syntactically correct by running +the appropriate daemon with the {\bf -t} option. The daemon will process the +configuration file and print any error messages then terminate. For example, +assuming you have installed your binaries and configuration files in the same +directory. +% TODO: why assume that? common default install has the executable +% TODO: is in ./sbin and the configs are in ./etc. So maybe just have +% TODO: example correct or change default install to be same. + +\footnotesize +\begin{verbatim} +cd +./bacula-dir -t -c bacula-dir.conf +./bacula-fd -t -c bacula-fd.conf +./bacula-sd -t -c bacula-sd.conf +./bconsole -t -c bconsole.conf +./bgnome-console -t -c bgnome-console.conf +./bwx-console -t -c bwx-console.conf +./bat -t -c bat.conf +su -c "./bacula-tray-monitor -t -c tray-monitor.conf" +\end{verbatim} +\normalsize + +will test the configuration files of each of the main programs. If the +configuration file is OK, the program will terminate without printing +anything. Please note that, depending on the configure options you choose, +some, or even all, of the three last commands will not be available on your +system. If you have installed the binaries in traditional Unix locations +rather than a single file, you will need to modify the above commands +appropriately (no ./ in front of the command name, and a path in front of the +conf file name). +\label{TapeTesting} + +\section{Testing Compatibility with Your Tape Drive} +\index[general]{Drive!Testing Bacula Compatibility with Your Tape} +\index[general]{Testing Bacula Compatibility with Your Tape Drive} + +Before spending a lot of time on Bacula only to find that it doesn't work +with your tape drive, please read the \bf{Testing Your Tape +Drive} chapter of this manual. If you have a modern +standard SCSI tape drive on a Linux or Solaris, most likely it will work, +but better test than be sorry. For FreeBSD (and probably other xBSD +flavors), reading the above mentioned tape testing chapter is a must. +Also, for FreeBSD, please see \elink{The FreeBSD +Diary}{\url{http://www.freebsddiary.org/bacula.php}} for a detailed description +%TODO: fix elink so it shows URL in PDF +on how to make Bacula work on your system. In addition, users of FreeBSD +prior to 4.9-STABLE dated Mon Dec 29 15:18:01 2003 UTC who plan to use tape +devices, please see the file {\bf platforms/freebsd/pthreads-fix.txt} in +the main Bacula directory concerning important information concerning +compatibility of Bacula and your system. \label{notls} + +\section{Get Rid of the /lib/tls Directory} +\index[general]{Directory!Get Rid of the /lib/tls } +\index[general]{Get Rid of the /lib/tls Directory } +The new pthreads library {\bf /lib/tls} installed by default on recent Red +Hat systems running Linux kernel 2.4.x is defective. You must remove it or +rename it, then reboot your system before running Bacula otherwise after a +week or so of running, Bacula will either block for long periods or +deadlock entirely. You may want to use the loader environment variable +override rather than removing /lib/tls. Please see \ilink{ Supported +Operating Systems}{SupportedOSes} for more information on this problem. + +This problem does not occur on systems running Linux 2.6.x kernels. + +\label{Running1} + +\section{Running Bacula} +\index[general]{Bacula!Running } +\index[general]{Running Bacula } + +Probably the most important part of running Bacula is being able to restore +files. If you haven't tried recovering files at least once, when you actually +have to do it, you will be under a lot more pressure, and prone to make +errors, than if you had already tried it once. + +To get a good idea how to use Bacula in a short time, we {\bf strongly} +recommend that you follow the example in the +\ilink{Running Bacula Chapter}{TutorialChapter} of this manual where +you will get detailed instructions on how to run Bacula. + +\section{Log Rotation} +\index[general]{Rotation!Log } +\index[general]{Log Rotation } +If you use the default {\bf bacula-dir.conf} or some variation of it, you will +note that it logs all the Bacula output to a file. To avoid that this file +grows without limit, we recommend that you copy the file {\bf logrotate} from +the {\bf scripts/logrotate} to {\bf /etc/logrotate.d/bacula}. This will cause +the log file to be rotated once a month and kept for a maximum of five months. +You may want to edit this file to change the default log rotation preferences. + +\section{Log Watch} +\index[general]{Watch!Log} +\index[general]{Log Watch} +Some systems such as Red Hat and Fedora run the logwatch program +every night, which does an analysis of your log file and sends an +email report. If you wish to include the output from your Bacula +jobs in that report, please look in the {\bf scripts/logwatch} +directory. The {\bf README} file in that directory gives a brief +explanation on how to install it and what kind of output to expect. + + +\section{Disaster Recovery} +\index[general]{Recovery!Disaster } +\index[general]{Disaster Recovery } + +If you intend to use Bacula as a disaster recovery tool rather than simply a +program to restore lost or damaged files, you will want to read the +\ilink{Disaster Recovery Using Bacula Chapter}{RescueChapter} of +this manual. + +In any case, you are strongly urged to carefully test restoring some files +that you have saved rather than wait until disaster strikes. This way, you +will be prepared. diff --git a/docs/manuals/en/install/security.tex b/docs/manuals/en/install/security.tex new file mode 100644 index 00000000..7866410a --- /dev/null +++ b/docs/manuals/en/install/security.tex @@ -0,0 +1,332 @@ +%% +%% + +\chapter{Bacula Security Issues} +\label{SecurityChapter} +\index[general]{Bacula Security Issues} +\index[general]{Security} +\index[general]{Issues!Bacula Security} + +\begin{itemize} +\item Security means being able to restore your files, so read the + \ilink{Critical Items Chapter}{Critical} of this manual. +\item The Clients ({\bf bacula-fd}) must run as root to be able to access all + the system files. +\item It is not necessary to run the Director as root. +\item It is not necessary to run the Storage daemon as root, but you must + ensure that it can open the tape drives, which are often restricted to root + access by default. In addition, if you do not run the Storage daemon as root, + it will not be able to automatically set your tape drive parameters on most + OSes since these functions, unfortunately require root access. +\item You should restrict access to the Bacula configuration files, so that + the passwords are not world-readable. The {\bf Bacula} daemons are password + protected using CRAM-MD5 (i.e. the password is not sent across the network). + This will ensure that not everyone can access the daemons. It is a reasonably + good protection, but can be cracked by experts. +\item If you are using the recommended ports 9101, 9102, and 9103, you will + probably want to protect these ports from external access using a firewall + and/or using tcp wrappers ({\bf etc/hosts.allow}). +\item By default, all data that is sent across the network is unencrypted. + However, Bacula does support TLS (transport layer security) and can + encrypt transmitted data. Please read the + \ilink{TLS (SSL) Communications Encryption}{CommEncryption} + section of this manual. +\item You should ensure that the Bacula working directories are readable and + writable only by the Bacula daemons. +\item If you are using {\bf MySQL} it is not necessary for it to run with + {\bf root} permission. +\item The default Bacula {\bf grant-mysql-permissions} script grants all + permissions to use the MySQL database without a password. If you want + security, please tighten this up! +\item Don't forget that Bacula is a network program, so anyone anywhere on + the network with the console program and the Director's password can access + Bacula and the backed up data. +\item You can restrict what IP addresses Bacula will bind to by using the + appropriate {\bf DirAddress}, {\bf FDAddress}, or {\bf SDAddress} records in + the respective daemon configuration files. +\item Be aware that if you are backing up your database using the default + script, if you have a password on your database, it will be passed as + a command line option to that script, and any user will be able to see + this information. If you want it to be secure, you will need to pass it + by an environment variable or a secure file. + + See also \ilink{Backing Up Your Bacula + Database - Security Considerations }{BackingUpBaculaSecurityConsiderations} + for more information. +\end{itemize} + + +\section{Backward Compatibility} +\index[general]{Backward Compatibility} +One of the major goals of Bacula is to ensure that you can restore +tapes (I'll use the word tape to include disk Volumes) that you wrote years +ago. This means that each new version of Bacula should be able to read old +format tapes. The first problem you will have is to ensure that the +hardware is still working some years down the road, and the second +problem will be to ensure that the media will still be good, then +your OS must be able to interface to the device, and finally Bacula +must be able to recognize old formats. All the problems except the +last are ones that we cannot solve, but by careful planning you can. + +Since the very beginning of Bacula (January 2000) until today (December +2005), there have been two major Bacula tape formats. The second format +was introduced in version 1.27 in November of 2002, and it has not +changed since then. In principle, Bacula can still read the original +format, but I haven't tried it lately so who knows ... + +Though the tape format is fixed, the kinds of data that we can put on the +tapes are extensible, and that is how we added new features +such as ACLs, Win32 data, encrypted data, ... Obviously, an older +version of Bacula would not know how to read these newer data streams, +but each newer version of Bacula should know how to read all the +older streams. + +If you want to be 100% sure that you can read old tapes, you +should: + +1. Try reading old tapes from time to time -- e.g. at least once +a year. + +2. Keep statically linked copies of every version of Bacula that you use +in production then if for some reason, we botch up old tape compatibility, you +can always pull out an old copy of Bacula ... + +The second point is probably overkill but if you want to be sure, it may +save you someday. + + + +\label{wrappers} +\section{Configuring and Testing TCP Wrappers} +\index[general]{Configuring and Testing TCP Wrappers} +\index[general]{TCP Wrappers} +\index[general]{Wrappers!TCP} +\index[general]{libwrappers} + +TCP Wrappers are implemented if you turn them on when configuring +({\bf ./configure \verb:--:with-tcp-wrappers}). +With this code enabled, you may control who may access your +daemons. This control is done by modifying the file: {\bf +/etc/hosts.allow}. The program name that {\bf Bacula} uses when +applying these access restrictions is the name you specify in the +daemon configuration file (see below for examples). +You must not use the {\bf twist} option in your {\bf +/etc/hosts.allow} or it will terminate the Bacula daemon when a +connection is refused. + +The exact name of the package you need loaded to build with TCP wrappers +depends on the system. For example, +on SuSE, the TCP wrappers libraries needed to link Bacula are +contained in the tcpd-devel package. On Red Hat, the package is named +tcp\_wrappers. + +Dan Langille has provided the following information on configuring and +testing TCP wrappers with Bacula. + +If you read hosts\_options(5), you will see an option called twist. This +option replaces the current process by an instance of the specified shell +command. Typically, something like this is used: + +\footnotesize +\begin{verbatim} +ALL : ALL \ + : severity auth.info \ + : twist /bin/echo "You are not welcome to use %d from %h." +\end{verbatim} +\normalsize + +The libwrap code tries to avoid {\bf twist} if it runs in a resident process, +but that test will not protect the first hosts\_access() call. This will +result in the process (e.g. bacula-fd, bacula-sd, bacula-dir) being terminated +if the first connection to their port results in the twist option being +invoked. The potential, and I stress potential, exists for an attacker to +prevent the daemons from running. This situation is eliminated if your +/etc/hosts.allow file contains an appropriate rule set. The following example +is sufficient: + +\footnotesize +\begin{verbatim} +undef-fd : localhost : allow +undef-sd : localhost : allow +undef-dir : localhost : allow +undef-fd : ALL : deny +undef-sd : ALL : deny +undef-dir : ALL : deny +\end{verbatim} +\normalsize + +You must adjust the names to be the same as the Name directives found +in each of the daemon configuration files. They are, in general, not the +same as the binary daemon names. It is not possible to use the +daemon names because multiple daemons may be running on the same machine +but with different configurations. + +In these examples, the Director is undef-dir, the +Storage Daemon is undef-sd, and the File Daemon is undef-fd. Adjust to suit +your situation. The above example rules assume that the SD, FD, and DIR all +reside on the same box. If you have a remote FD client, then the following +rule set on the remote client will suffice: + +\footnotesize +\begin{verbatim} +undef-fd : director.example.org : allow +undef-fd : ALL : deny +\end{verbatim} +\normalsize + +where director.example.org is the host which will be contacting the client +(ie. the box on which the Bacula Director daemon runs). The use of "ALL : +deny" ensures that the twist option (if present) is not invoked. To properly +test your configuration, start the daemon(s), then attempt to connect from an +IP address which should be able to connect. You should see something like +this: + +\footnotesize +\begin{verbatim} +$ telnet undef 9103 +Trying 192.168.0.56... +Connected to undef.example.org. +Escape character is '^]'. +Connection closed by foreign host. +$ +\end{verbatim} +\normalsize + +This is the correct response. If you see this: + +\footnotesize +\begin{verbatim} +$ telnet undef 9103 +Trying 192.168.0.56... +Connected to undef.example.org. +Escape character is '^]'. +You are not welcome to use undef-sd from xeon.example.org. +Connection closed by foreign host. +$ +\end{verbatim} +\normalsize + +then twist has been invoked and your configuration is not correct and you need +to add the deny statement. It is important to note that your testing must +include restarting the daemons after each connection attempt. You can also +tcpdchk(8) and tcpdmatch(8) to validate your /etc/hosts.allow rules. Here is a +simple test using tcpdmatch: + +\footnotesize +\begin{verbatim} +$ tcpdmatch undef-dir xeon.example.org +warning: undef-dir: no such process name in /etc/inetd.conf +client: hostname xeon.example.org +client: address 192.168.0.18 +server: process undef-dir +matched: /etc/hosts.allow line 40 +option: allow +access: granted +\end{verbatim} +\normalsize + +If you are running Bacula as a standalone daemon, the warning above can be +safely ignored. Here is an example which indicates that your rules are missing +a deny statement and the twist option has been invoked. + +\footnotesize +\begin{verbatim} +$ tcpdmatch undef-dir 10.0.0.1 +warning: undef-dir: no such process name in /etc/inetd.conf +client: address 10.0.0.1 +server: process undef-dir +matched: /etc/hosts.allow line 91 +option: severity auth.info +option: twist /bin/echo "You are not welcome to use + undef-dir from 10.0.0.1." +access: delegated +\end{verbatim} +\normalsize + +\section{Running as non-root} +\index[general]{Running as non-root } + +Security advice from Dan Langille: +% TODO: don't use specific name + +% TODO: don't be too specific on operating system + +% TODO: maybe remove personalization? + +It is a good idea to run daemons with the lowest possible privileges. In +other words, if you can, don't run applications as root which do not have to +be root. The Storage Daemon and the Director Daemon do not need to be root. +The File Daemon needs to be root in order to access all files on your system. +In order to run as non-root, you need to create a user and a group. Choosing +{\tt bacula} as both the user name and the group name sounds like a good idea +to me. + +The FreeBSD port creates this user and group for you. +Here is what those entries looked like on my FreeBSD laptop: + +\footnotesize +\begin{verbatim} +bacula:*:1002:1002::0:0:Bacula Daemon:/var/db/bacula:/sbin/nologin +\end{verbatim} +\normalsize + +I used vipw to create this entry. I selected a User ID and Group ID of 1002 +as they were unused on my system. + +I also created a group in /etc/group: + +\footnotesize +\begin{verbatim} +bacula:*:1002: +\end{verbatim} +\normalsize + +The bacula user (as opposed to the Bacula daemon) will have a home directory +of {\tt /var/db/bacula} which is the default location for the Bacula +database. + +Now that you have both a bacula user and a bacula group, you can secure the +bacula home directory by issuing this command: + +\footnotesize +\begin{verbatim} +chown -R bacula:bacula /var/db/bacula/ +\end{verbatim} +\normalsize + +This ensures that only the bacula user can access this directory. It also +means that if we run the Director and the Storage daemon as bacula, those +daemons also have restricted access. This would not be the case if they were +running as root. + +It is important to note that the storage daemon actually needs to be in the +operator group for normal access to tape drives etc (at least on a FreeBSD +system, that's how things are set up by default) Such devices are normally +chown root:operator. It is easier and less error prone to make Bacula a +member of that group than it is to play around with system permissions. + +Starting the Bacula daemons + +To start the bacula daemons on a FreeBSD system, issue the following command: + +\footnotesize +\begin{verbatim} +/usr/local/etc/rc.d/bacula-dir start +/usr/local/etc/rc.d/bacula-sd start +/usr/local/etc/rc.d/bacula-fd start +\end{verbatim} +\normalsize + +To confirm they are all running: + +\footnotesize +\begin{verbatim} +$ ps auwx | grep bacula +root 63418 0.0 0.3 1856 1036 ?? Ss 4:09PM 0:00.00 + /usr/local/sbin/bacula-fd -v -c /usr/local/etc/bacula-fd.conf +bacula 63416 0.0 0.3 2040 1172 ?? Ss 4:09PM 0:00.01 + /usr/local/sbin/bacula-sd -v -c /usr/local/etc/bacula-sd.conf +bacula 63422 0.0 0.4 2360 1440 ?? Ss 4:09PM 0:00.00 + /usr/local/sbin/bacula-dir -v -c /usr/local/etc/bacula-dir.conf +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/install/setup.sm b/docs/manuals/en/install/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/install/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/install/storedconf.tex b/docs/manuals/en/install/storedconf.tex new file mode 100644 index 00000000..34133bf9 --- /dev/null +++ b/docs/manuals/en/install/storedconf.tex @@ -0,0 +1,1374 @@ +%% +%% + +\chapter{Storage Daemon Configuration} +\label{StoredConfChapter} +\index[general]{Storage Daemon Configuration} +\index[general]{Configuration!Storage Daemon} + +The Storage Daemon configuration file has relatively few resource definitions. +However, due to the great variation in backup media and system capabilities, +the storage daemon must be highly configurable. As a consequence, there are +quite a large number of directives in the Device Resource definition that +allow you to define all the characteristics of your Storage device (normally a +tape drive). Fortunately, with modern storage devices, the defaults are +sufficient, and very few directives are actually needed. + +Examples of {\bf Device} resource directives that are known to work for a +number of common tape drives can be found in the {\bf +\lt{}bacula-src\gt{}/examples/devices} directory, and most will also be listed +here. + +For a general discussion of configuration file and resources including the +data types recognized by {\bf Bacula}, please see the +\ilink{Configuration}{ConfigureChapter} chapter of this manual. The +following Storage Resource definitions must be defined: + +\begin{itemize} +\item + \ilink{Storage}{StorageResource} -- to define the name of the + Storage daemon. +\item + \ilink{Director}{DirectorResource1} -- to define the Director's + name and his access password. +\item + \ilink{Device}{DeviceResource} -- to define the + characteristics of your storage device (tape drive). +\item + \ilink{Messages}{MessagesChapter} -- to define where error and + information messages are to be sent. +\end{itemize} + +\section{Storage Resource} +\label{StorageResource} +\index[general]{Resource!Storage} +\index[general]{Storage Resource} + +In general, the properties specified under the Storage resource define global +properties of the Storage daemon. Each Storage daemon configuration file must +have one and only one Storage resource definition. + +\begin{description} + +\item [Name = \lt{}Storage-Daemon-Name\gt{}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name of the Storage daemon. This directive is required. + +\item [Working Directory = \lt{}Directory\gt{}] + \index[sd]{Working Directory} + \index[sd]{Directive!Working Directory} + This directive is mandatory and specifies a directory in which the Storage + daemon may put its status files. This directory should be used only by {\bf + Bacula}, but may be shared by other Bacula daemons provided the names + given to each daemon are unique. This directive is + required + +\item [Pid Directory = \lt{}Directory\gt{}] + \index[sd]{Pid Directory} + \index[sd]{Directive!Pid Directory} + This directive is mandatory and specifies a directory in which the Director + may put its process Id file files. The process Id file is used to shutdown + Bacula and to prevent multiple copies of Bacula from running simultaneously. + This directive is required. Standard shell expansion of the {\bf Directory} + is done when the configuration file is read so that values such as {\bf + \$HOME} will be properly expanded. + + Typically on Linux systems, you will set this to: {\bf /var/run}. If you are + not installing Bacula in the system directories, you can use the {\bf Working + Directory} as defined above. + +\item [Heartbeat Interval = \lt{}time-interval\gt{}] + \index[sd]{Heartbeat Interval} + \index[sd]{Directive!Heartbeat Interval} + \index[general]{Heartbeat Interval} + \index[general]{Broken pipe} + This directive defines an interval of time in seconds. When + the Storage daemon is waiting for the operator to mount a + tape, each time interval, it will send a heartbeat signal to + the File daemon. The default interval is zero which disables + the heartbeat. This feature is particularly useful if you + have a router such as 3Com that does not follow Internet + standards and times out an valid connection after a short + duration despite the fact that keepalive is set. This usually + results in a broken pipe error message. + +\item [Client Connect Wait = \lt{}time-interval\gt{}] + \index[sd]{Connect Wait} + \index[sd]{Directive!Connect Wait} + \index[general]{Client Connect Wait} + This directive defines an interval of time in seconds that + the Storage daemon will wait for a Client (the File daemon) + to connect. The default is 30 seconds. Be aware that the + longer the Storage daemon waits for a Client, the more + resources will be tied up. + +\item [Maximum Concurrent Jobs = \lt{}number\gt{}] + \index[sd]{Maximum Concurrent Jobs} + \index[sd]{Directive!Maximum Concurrent Jobs} + where \lt{}number\gt{} is the maximum number of Jobs that should run + concurrently. The default is set to 10, but you may set it to a larger + number. Each contact from the Director (e.g. status request, job start + request) is considered as a Job, so if you want to be able to do a {\bf + status} request in the console at the same time as a Job is running, you + will need to set this value greater than 1. To run simultaneous Jobs, + you will need to set a number of other directives in the Director's + configuration file. Which ones you set depend on what you want, but you + will almost certainly need to set the {\bf Maximum Concurrent Jobs} in + the Storage resource in the Director's configuration file and possibly + those in the Job and Client resources. + +\item [SDAddresses = \lt{}IP-address-specification\gt{}] + \index[sd]{SDAddresses} + \index[sd]{Directive!SDAddresses} + Specify the ports and addresses on which the Storage daemon will listen + for Director connections. Normally, the default is sufficient and you + do not need to specify this directive. Probably the simplest way to + explain how this directive works is to show an example: + +\footnotesize +\begin{verbatim} + SDAddresses = { ip = { + addr = 1.2.3.4; port = 1205; } + ipv4 = { + addr = 1.2.3.4; port = http; } + ipv6 = { + addr = 1.2.3.4; + port = 1205; + } + ip = { + addr = 1.2.3.4 + port = 1205 + } + ip = { + addr = 1.2.3.4 + } + ip = { + addr = 201:220:222::2 + } + ip = { + addr = bluedot.thun.net + } +} +\end{verbatim} +\normalsize + +where ip, ip4, ip6, addr, and port are all keywords. Note, that the address +can be specified as either a dotted quadruple, or IPv6 colon notation, or as +a symbolic name (only in the ip specification). Also, port can be specified +as a number or as the mnemonic value from the /etc/services file. If a port +is not specified, the default will be used. If an ip section is specified, +the resolution can be made either by IPv4 or IPv6. If ip4 is specified, then +only IPv4 resolutions will be permitted, and likewise with ip6. + +Using this directive, you can replace both the SDPort and SDAddress +directives shown below. + +\item [SDPort = \lt{}port-number\gt{}] + \index[sd]{SDPort} + \index[sd]{Directive!SDPort} + Specifies port number on which the Storage daemon listens for Director + connections. The default is 9103. + +\item [SDAddress = \lt{}IP-Address\gt{}] + \index[sd]{SDAddress} + \index[sd]{Directive!SDAddress} + This directive is optional, and if it is specified, it will cause the + Storage daemon server (for Director and File daemon connections) to bind + to the specified {\bf IP-Address}, which is either a domain name or an + IP address specified as a dotted quadruple. If this directive is not + specified, the Storage daemon will bind to any available address (the + default). + +\end{description} + +The following is a typical Storage daemon Storage definition. + +\footnotesize +\begin{verbatim} +# +# "Global" Storage daemon configuration specifications appear +# under the Storage resource. +# +Storage { + Name = "Storage daemon" + Address = localhost + WorkingDirectory = "~/bacula/working" + Pid Directory = "~/bacula/working" +} +\end{verbatim} +\normalsize + +\section{Director Resource} +\label{DirectorResource1} +\index[general]{Director Resource} +\index[general]{Resource!Director} + +The Director resource specifies the Name of the Director which is permitted +to use the services of the Storage daemon. There may be multiple Director +resources. The Director Name and Password must match the corresponding +values in the Director's configuration file. + +\begin{description} + +\item [Name = \lt{}Director-Name\gt{}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name of the Director allowed to connect to the Storage daemon. + This directive is required. + +\item [Password = \lt{}Director-password\gt{}] + \index[sd]{Password} + \index[sd]{Directive!Password} + Specifies the password that must be supplied by the above named Director. + This directive is required. + +\item [Monitor = \lt{}yes|no\gt{}] + \index[sd]{Monitor} + \index[sd]{Directive!Monitor} + If Monitor is set to {\bf no} (default), this director will have full + access to this Storage daemon. If Monitor is set to {\bf yes}, this + director will only be able to fetch the current status of this Storage + daemon. + + Please note that if this director is being used by a Monitor, we highly + recommend to set this directive to {\bf yes} to avoid serious security + problems. + +\end{description} + +The following is an example of a valid Director resource definition: + +\footnotesize +\begin{verbatim} +Director { + Name = MainDirector + Password = my_secret_password +} +\end{verbatim} +\normalsize + +\label{DeviceResource} +\section{Device Resource} +\index[general]{Resource!Device} +\index[general]{Device Resource} + +The Device Resource specifies the details of each device (normally a tape +drive) that can be used by the Storage daemon. There may be multiple +Device resources for a single Storage daemon. In general, the properties +specified within the Device resource are specific to the Device. + +\begin{description} + +\item [Name = {\it Device-Name}] + \index[sd]{Name} + \index[sd]{Directive!Name} + Specifies the Name that the Director will use when asking to backup or + restore to or from to this device. This is the logical Device name, and may + be any string up to 127 characters in length. It is generally a good idea to + make it correspond to the English name of the backup device. The physical + name of the device is specified on the {\bf Archive Device} directive + described below. The name you specify here is also used in your Director's + conf file on the + \ilink{Device directive}{StorageResource2} in its Storage + resource. + +\item [Archive Device = {\it name-string}] + \index[sd]{Archive Device} + \index[sd]{Directive!Archive Device} + The specified {\bf name-string} gives the system file name of the storage + device managed by this storage daemon. This will usually be the device file + name of a removable storage device (tape drive), for example "{\bf + /dev/nst0}" or "{\bf /dev/rmt/0mbn}". For a DVD-writer, it will be for + example {\bf /dev/hdc}. It may also be a directory name if you are archiving + to disk storage. In this case, you must supply the full absolute path to the + directory. When specifying a tape device, it is preferable that the + "non-rewind" variant of the device file name be given. In addition, on + systems such as Sun, which have multiple tape access methods, you must be + sure to specify to use Berkeley I/O conventions with the device. The {\bf b} + in the Solaris (Sun) archive specification {\bf /dev/rmt/0mbn} is what is + needed in this case. Bacula does not support SysV tape drive behavior. + + As noted above, normally the Archive Device is the name of a tape drive, but + you may also specify an absolute path to an existing directory. If the Device + is a directory Bacula will write to file storage in the specified directory, + and the filename used will be the Volume name as specified in the Catalog. + If you want to write into more than one directory (i.e. to spread the load to + different disk drives), you will need to define two Device resources, each + containing an Archive Device with a different directory. + \label{SetupFifo} + In addition to a tape device name or a directory name, Bacula will accept the + name of a FIFO. A FIFO is a special kind of file that connects two programs + via kernel memory. If a FIFO device is specified for a backup operation, you + must have a program that reads what Bacula writes into the FIFO. When the + Storage daemon starts the job, it will wait for {\bf MaximumOpenWait} seconds + for the read program to start reading, and then time it out and terminate + the job. As a consequence, it is best to start the read program at the + beginning of the job perhaps with the {\bf RunBeforeJob} directive. For this + kind of device, you never want to specify {\bf AlwaysOpen}, because you want + the Storage daemon to open it only when a job starts, so you must explicitly + set it to {\bf No}. Since a FIFO is a one way device, Bacula will not attempt + to read a label of a FIFO device, but will simply write on it. To create a + FIFO Volume in the catalog, use the {\bf add} command rather than the {\bf + label} command to avoid attempting to write a label. + +\footnotesize +\begin{verbatim} +Device { + Name = FifoStorage + Media Type = Fifo + Device Type = Fifo + Archive Device = /tmp/fifo + LabelMedia = yes + Random Access = no + AutomaticMount = no + RemovableMedia = no + MaximumOpenWait = 60 + AlwaysOpen = no +} +\end{verbatim} +\normalsize + + During a restore operation, if the Archive Device is a FIFO, Bacula will + attempt to read from the FIFO, so you must have an external program that + writes into the FIFO. Bacula will wait {\bf MaximumOpenWait} seconds for the + program to begin writing and will then time it out and terminate the job. As + noted above, you may use the {\bf RunBeforeJob} to start the writer program + at the beginning of the job. + + The Archive Device directive is required. + +\item [Device Type = {\it type-specification}] + \index[sd]{Device Type} + \index[sd]{Directive!Device Type} + The Device Type specification allows you to explicitly tell Bacula + what kind of device you are defining. It the {\it type-specification} + may be one of the following: + \begin{description} + \item [File] + Tells Bacula that the device is a file. It may either be a + file defined on fixed medium or a removable filesystem such as + USB. All files must be random access devices. + \item [Tape] + The device is a tape device and thus is sequential access. Tape devices + are controlled using ioctl() calls. + \item [Fifo] + The device is a first-in-first out sequential access read-only + or write-only device. + \item [DVD] + The device is a DVD. DVDs are sequential access for writing, but + random access for reading. + \end{description} + + The Device Type directive is not required, and if not specified, Bacula + will attempt to guess what kind of device has been specified using the + Archive Device specification supplied. There are several advantages to + explicitly specifying the Device Type. First, on some systems, block and + character devices have the same type, which means that on those systems, + Bacula is unlikely to be able to correctly guess that a device is a DVD. + Secondly, if you explicitly specify the Device Type, the mount point + need not be defined until the device is opened. This is the case with + most removable devices such as USB that are mounted by the HAL daemon. + If the Device Type is not explicitly specified, then the mount point + must exist when the Storage daemon starts. + + This directive was implemented in Bacula version 1.38.6. + + +\item [Media Type = {\it name-string}] + \index[sd]{Media Type} + \index[sd]{Directive!Media Type} + The specified {\bf name-string} names the type of media supported by this + device, for example, "DLT7000". Media type names are arbitrary in that you + set them to anything you want, but they must be known to the volume + database to keep track of which storage daemons can read which volumes. In + general, each different storage type should have a unique Media Type + associated with it. The same {\bf name-string} must appear in the + appropriate Storage resource definition in the Director's configuration + file. + + Even though the names you assign are arbitrary (i.e. you choose the name + you want), you should take care in specifying them because the Media Type + is used to determine which storage device Bacula will select during + restore. Thus you should probably use the same Media Type specification + for all drives where the Media can be freely interchanged. This is not + generally an issue if you have a single Storage daemon, but it is with + multiple Storage daemons, especially if they have incompatible media. + + For example, if you specify a Media Type of "DDS-4" then during the + restore, Bacula will be able to choose any Storage Daemon that handles + "DDS-4". If you have an autochanger, you might want to name the Media Type + in a way that is unique to the autochanger, unless you wish to possibly use + the Volumes in other drives. You should also ensure to have unique Media + Type names if the Media is not compatible between drives. This + specification is required for all devices. + + In addition, if you are using disk storage, each Device resource will + generally have a different mount point or directory. In order for + Bacula to select the correct Device resource, each one must have a + unique Media Type. + +\label{Autochanger} +\item [Autochanger = {\it Yes|No}] + \index[sd]{Autochanger} + \index[sd]{Directive!Autochanger} + If {\bf Yes}, this device belongs to an automatic tape changer, and you + must specify an {\bf Autochanger} resource that points to the {\bf + Device} resources. You must also specify a + {\bf Changer Device}. If the Autochanger directive is set to {\bf + No} (default), the volume must be manually changed. You should also + have an identical directive to the + \ilink{Storage resource}{Autochanger1} in the Director's + configuration file so that when labeling tapes you are prompted for the slot. + +\item [Changer Device = {\it name-string}] + \index[sd]{Changer Device} + \index[sd]{Directive!Changer Device} + The specified {\bf name-string} must be the {\bf generic SCSI} device + name of the autochanger that corresponds to the normal read/write + {\bf Archive Device} specified in the Device resource. This + generic SCSI device name should be specified if you have an autochanger + or if you have a standard tape drive and want to use the + {\bf Alert Command} (see below). For example, on Linux systems, for + an Archive Device name of {\bf /dev/nst0}, you would specify {\bf + /dev/sg0} for the Changer Device name. Depending on your exact + configuration, and the number of autochangers or the type of + autochanger, what you specify here can vary. This directive is + optional. See the \ilink{ Using Autochangers}{AutochangersChapter} chapter + of this manual for more details of using this and the following + autochanger directives. + +\item [Changer Command = {\it name-string}] + \index[sd]{Changer Command} + \index[sd]{Directive!Changer Command} + The {\bf name-string} specifies an external program to be called that will + automatically change volumes as required by {\bf Bacula}. Normally, + this directive will be specified only in the {\bf AutoChanger} resource, + which is then used for all devices. However, you may also specify + the different {\bf Changer Command} in each Device resource. + Most frequently, + you will specify the Bacula supplied {\bf mtx-changer} script as follows: + +\footnotesize +\begin{verbatim} +Changer Command = "/path/mtx-changer %c %o %S %a %d" +\end{verbatim} +\normalsize + + and you will install the {\bf mtx} on your system (found in the {\bf depkgs} + release). An example of this command is in the default bacula-sd.conf file. + For more details on the substitution characters that may be specified to + configure your autochanger please see the + \ilink{Autochangers}{AutochangersChapter} chapter of this manual. + For FreeBSD users, you might want to see one of the several {\bf chio} + scripts in {\bf examples/autochangers}. + +\item [Alert Command = {\it name-string}] + \index[sd]{Alert Command} + The {\bf name-string} specifies an external program to be called at the + completion of each Job after the device is released. The purpose of this + command is to check for Tape Alerts, which are present when something is + wrong with your tape drive (at least for most modern tape drives). The same + substitution characters that may be specified in the Changer Command may also + be used in this string. For more information, please see the + \ilink{Autochangers}{AutochangersChapter} chapter of this manual. + + + Note, it is not necessary to have an autochanger to use this command. The + example below uses the {\bf tapeinfo} program that comes with the {\bf mtx} + package, but it can be used on any tape drive. However, you will need to + specify a {\bf Changer Device} directive in your Device resource (see above) + so that the generic SCSI device name can be edited into the command (with the + \%c). + + An example of the use of this command to print Tape Alerts in the Job report + is: + +\footnotesize +\begin{verbatim} +Alert Command = "sh -c 'tapeinfo -f %c | grep TapeAlert'" + +\end{verbatim} +\normalsize + +and an example output when there is a problem could be: + +\footnotesize +\begin{verbatim} +bacula-sd Alert: TapeAlert[32]: Interface: Problem with SCSI interface + between tape drive and initiator. + +\end{verbatim} +\normalsize + +\item [Drive Index = {\it number}] + \index[sd]{Drive Index} + \index[sd]{Directive!Drive Index} + The {\bf Drive Index} that you specify is passed to the {\bf + mtx-changer} script and is thus passed to the {\bf mtx} program. By + default, the Drive Index is zero, so if you have only one drive in your + autochanger, everything will work normally. However, if you have + multiple drives, you must specify multiple Bacula Device resources (one + for each drive). The first Device should have the Drive Index set to 0, + and the second Device Resource should contain a Drive Index set to 1, + and so on. This will then permit you to use two or more drives in your + autochanger. As of Bacula version 1.38.0, using the {\bf Autochanger} + resource, Bacula will automatically ensure that only one drive at a time + uses the autochanger script, so you no longer need locking scripts as in + the past -- the default mtx-changer script works for any number of + drives. + +\item [Autoselect = {\it Yes|No}] + \index[sd]{Autoselect} + \index[sd]{Directive!Autoselect} + If this directive is set to {\bf yes} (default), and the Device + belongs to an autochanger, then when the Autochanger is referenced + by the Director, this device can automatically be selected. If this + directive is set to {\bf no}, then the Device can only be referenced + by directly using the Device name in the Director. This is useful + for reserving a drive for something special such as a high priority + backup or restore operations. + +\item [Maximum Changer Wait = {\it time}] + \index[sd]{Maximum Changer Wait} + \index[sd]{Directive!Maximum Changer Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for an autochanger to change the volume. If this time is exceeded, + Bacula will invalidate the Volume slot number stored in the catalog and + try again. If no additional changer volumes exist, Bacula will ask the + operator to intervene. The default is 5 minutes. +% TODO: if this is the format, then maybe "5 minutes" should be in +% TODO: quotes? define style. see others. + +\item [Maximum Rewind Wait = {\it time}] + \index[sd]{Maximum Rewind Wait} + \index[sd]{Directive!Maximum Rewind Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for a rewind before timing out. If this time is exceeded, + Bacula will cancel the job. The default is 5 minutes. + +\item [Maximum Open Wait = {\it time}] + \index[sd]{Maximum Open Wait} + \index[sd]{Directive!Maximum Open Wait} + This directive specifies the maximum time in seconds for Bacula to wait + for a open before timing out. If this time is exceeded, + Bacula will cancel the job. The default is 5 minutes. + +\item [Always Open = {\it Yes|No}] + \index[sd]{Always Open} + \index[sd]{Directive!Always Open} + If {\bf Yes} (default), Bacula will always keep the device open unless + specifically {\bf unmounted} by the Console program. This permits + Bacula to ensure that the tape drive is always available, and properly + positioned. If you set + {\bf AlwaysOpen} to {\bf no} {\bf Bacula} will only open the drive when + necessary, and at the end of the Job if no other Jobs are using the + drive, it will be freed. The next time Bacula wants to append to a tape + on a drive that was freed, Bacula will rewind the tape and position it to + the end. To avoid unnecessary tape positioning and to minimize + unnecessary operator intervention, it is highly recommended that {\bf + Always Open = yes}. This also ensures that the drive is available when + Bacula needs it. + + If you have {\bf Always Open = yes} (recommended) and you want to use the + drive for something else, simply use the {\bf unmount} command in the Console + program to release the drive. However, don't forget to remount the drive with + {\bf mount} when the drive is available or the next Bacula job will block. + + For File storage, this directive is ignored. For a FIFO storage device, you + must set this to {\bf No}. + + Please note that if you set this directive to {\bf No} Bacula will release + the tape drive between each job, and thus the next job will rewind the tape + and position it to the end of the data. This can be a very time consuming + operation. In addition, with this directive set to no, certain multiple + drive autochanger operations will fail. We strongly recommend to keep + {\bf Always Open} set to {\bf Yes} + +\item [Volume Poll Interval = {\it time}] + \index[sd]{Volume Poll Interval} + \index[sd]{Directive!Volume Poll Interval} + If the time specified on this directive is non-zero, after asking the + operator to mount a new volume Bacula will periodically poll (or read) the + drive at the specified interval to see if a new volume has been mounted. If + the time interval is zero (the default), no polling will occur. This + directive can be useful if you want to avoid operator intervention via the + console. Instead, the operator can simply remove the old volume and insert + the requested one, and Bacula on the next poll will recognize the new tape + and continue. Please be aware that if you set this interval too small, you + may excessively wear your tape drive if the old tape remains in the drive, + since Bacula will read it on each poll. This can be avoided by ejecting the + tape using the {\bf Offline On Unmount} and the {\bf Close on Poll} + directives. + However, if you are using a Linux 2.6 kernel or other OSes + such as FreeBSD or Solaris, the Offline On Unmount will leave the drive + with no tape, and Bacula will not be able to properly open the drive and + may fail the job. For more information on this problem, please see the + \ilink{description of Offline On Unmount}{NoTapeInDrive} in the Tape + Testing chapter. + +\item [Close on Poll= {\it Yes|No}] + \index[sd]{Close on Poll} + \index[sd]{Directive!Close on Poll} + If {\bf Yes}, Bacula close the device (equivalent to an unmount except no + mount is required) and reopen it at each poll. Normally this is not too + useful unless you have the {\bf Offline on Unmount} directive set, in which + case the drive will be taken offline preventing wear on the tape during any + future polling. Once the operator inserts a new tape, Bacula will recognize + the drive on the next poll and automatically continue with the backup. + Please see above more more details. + +\item [Maximum Open Wait = {\it time}] + \index[sd]{Maximum Open Wait} + \index[sd]{Directive!Maximum Open Wait} + This directive specifies the maximum amount of time in seconds that + Bacula will wait for a device that is busy. The default is 5 minutes. + If the device cannot be obtained, the current Job will be terminated in + error. Bacula will re-attempt to open the drive the next time a Job + starts that needs the the drive. + +\label{removablemedia} +\item [Removable media = {\it Yes|No}] + \index[sd]{Removable media} + \index[sd]{Directive!Removable media} + If {\bf Yes}, this device supports removable media (for example, tapes + or CDs). If {\bf No}, media cannot be removed (for example, an + intermediate backup area on a hard disk). If {\bf Removable media} is + enabled on a File device (as opposed to a tape) the Storage daemon will + assume that device may be something like a USB device that can be + removed or a simply a removable harddisk. When attempting to open + such a device, if the Volume is not found (for File devices, the Volume + name is the same as the Filename), then the Storage daemon will search + the entire device looking for likely Volume names, and for each one + found, it will ask the Director if the Volume can be used. If so, + the Storage daemon will use the first such Volume found. Thus it + acts somewhat like a tape drive -- if the correct Volume is not found, + it looks at what actually is found, and if it is an appendable Volume, + it will use it. + + If the removable medium is not automatically mounted (e.g. udev), then + you might consider using additional Storage daemon device directives + such as {\bf Requires Mount}, {\bf Mount Point}, {\bf Mount Command}, + and {\bf Unmount Command}, all of which can be used in conjunction with + {\bf Removable Media}. + + +\item [Random access = {\it Yes|No}] + \index[sd]{Random access} + \index[sd]{Directive!Random access} + If {\bf Yes}, the archive device is assumed to be a random access medium + which supports the {\bf lseek} (or {\bf lseek64} if Largefile is enabled + during configuration) facility. This should be set to {\bf Yes} for all + file systems such as DVD, USB, and fixed files. It should be set to + {\bf No} for non-random access devices such as tapes and named pipes. + + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount } + When this directive is enabled, the Storage daemon will submit + a {\bf Mount Command} before attempting to open the device. + You must set this directive to {\bf yes} for DVD-writers and removable + file systems such as USB devices that are not automatically mounted + by the operating system when plugged in or opened by Bacula. + It should be set to {\bf no} for + all other devices such as tapes and fixed filesystems. It should also + be set to {\bf no} for any removable device that is automatically + mounted by the operating system when opened (e.g. USB devices mounted + by udev or hotplug). This directive + indicates if the device requires to be mounted using the {\bf Mount + Command}. To be able to write a DVD, the following directives must also + be defined: {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount + Command} and {\bf Write Part Command}. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + Directory where the device can be mounted. + This directive is used only + for devices that have {\bf Requires Mount} enabled such as DVD or + USB file devices. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + This directive specifies the command that must be executed to mount + devices such as DVDs and many USB devices. For DVDs, the + device is written directly, but the mount command is necessary in + order to determine the free space left on the DVD. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, for a DVD, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +However, if you have defined a mount point in /etc/fstab, you might be +able to use a mount command such as: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount /media/dvd" +\end{verbatim} +\normalsize + +See the \ilink {Edit Codes}{mountcodes} section below for more details of +the editing codes that can be used in this directive. + + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + This directive specifies the command that must be executed to unmount + devices such as DVDs and many USB devices. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +See the \ilink {Edit Codes}{mountcodes} section below for more details of +the editing codes that can be used in this directive. + + +\item [Minimum block size = {\it size-in-bytes}] + \index[sd]{Minimum block size} + \index[sd]{Directive!Minimum block size} + On most modern tape drives, you will not need or want to specify this + directive, and if you do so, it will be to make Bacula use fixed block + sizes. This statement applies only to non-random access devices (e.g. + tape drives). Blocks written by the storage daemon to a non-random + archive device will never be smaller than the given {\bf size-in-bytes}. + The Storage daemon will attempt to efficiently fill blocks with data + received from active sessions but will, if necessary, add padding to a + block to achieve the required minimum size. + + To force the block size to be fixed, as is the case for some non-random + access devices (tape drives), set the {\bf Minimum block size} and the + {\bf Maximum block size} to the same value (zero included). The default + is that both the minimum and maximum block size are zero and the default + block size is 64,512 bytes. + + For example, suppose you want a fixed block size of 100K bytes, then you + would specify: + +\footnotesize +\begin{verbatim} + + Minimum block size = 100K + Maximum block size = 100K + +\end{verbatim} +\normalsize + + Please note that if you specify a fixed block size as shown above, the tape + drive must either be in variable block size mode, or if it is in fixed block + size mode, the block size (generally defined by {\bf mt}) {\bf must} be + identical to the size specified in Bacula -- otherwise when you attempt to + re-read your Volumes, you will get an error. + + If you want the block size to be variable but with a 64K minimum and 200K + maximum (and default as well), you would specify: + +\footnotesize +\begin{verbatim} + + Minimum block size = 64K + Maximum blocksize = 200K + +\end{verbatim} +\normalsize + +\item [Maximum block size = {\it size-in-bytes}] + \index[sd]{Maximum block size} + \index[sd]{Directive!Maximum block size} + On most modern tape drives, you will not need to specify this directive. + If you do so, it will most likely be to use fixed block sizes (see + Minimum block size above). The Storage daemon will always attempt to + write blocks of the specified {\bf size-in-bytes} to the archive device. + As a consequence, this statement specifies both the default block size + and the maximum block size. The size written never exceed the given + {\bf size-in-bytes}. If adding data to a block would cause it to exceed + the given maximum size, the block will be written to the archive device, + and the new data will begin a new block. + + If no value is specified or zero is specified, the Storage daemon will + use a default block size of 64,512 bytes (126 * 512). + +\item [Hardware End of Medium = {\it Yes|No}] + \index[sd]{Hardware End of Medium} + \index[sd]{Directive!Hardware End of Medium} + If {\bf No}, the archive device is not required to support end of medium + ioctl request, and the storage daemon will use the forward space file + function to find the end of the recorded data. If {\bf Yes}, the archive + device must support the {\tt ioctl} {\tt MTEOM} call, which will position the + tape to the end of the recorded data. In addition, your SCSI driver must keep + track of the file number on the tape and report it back correctly by the + {\bf MTIOCGET} ioctl. Note, some SCSI drivers will correctly forward space to + the end of the recorded data, but they do not keep track of the file number. + On Linux machines, the SCSI driver has a {\bf fast-eod} option, which if set + will cause the driver to lose track of the file number. You should ensure + that this option is always turned off using the {\bf mt} program. + + Default setting for Hardware End of Medium is {\bf Yes}. This function is + used before appending to a tape to ensure that no previously written data is + lost. We recommend if you have a non-standard or unusual tape drive that you + use the {\bf btape} program to test your drive to see whether or not it + supports this function. All modern (after 1998) tape drives support this + feature. + +\item [Fast Forward Space File = {\it Yes|No}] + \index[sd]{Fast Forward Space File} + \index[sd]{Directive!Fast Forward Space File} + If {\bf No}, the archive device is not required to support keeping track of + the file number ({\bf MTIOCGET} ioctl) during forward space file. If {\bf + Yes}, the archive device must support the {\tt ioctl} {\tt MTFSF} call, which + virtually all drivers support, but in addition, your SCSI driver must keep + track of the file number on the tape and report it back correctly by the + {\bf MTIOCGET} ioctl. Note, some SCSI drivers will correctly forward space, + but they do not keep track of the file number or more seriously, they do not + report end of medium. + + Default setting for Fast Forward Space File is {\bf Yes}. + +\item [Use MTIOCGET = {\it Yes|No}] + \index[sd]{Use MTIOCGET} + \index[sd]{Directive!Use MTIOCGET} + If {\bf No}, the operating system is not required to support keeping track of + the file number and reporting it in the ({\bf MTIOCGET} ioctl). The default + is {\bf Yes}. If you must set this to No, Bacula will do the proper file + position determination, but it is very unfortunate because it means that + tape movement is very inefficient. + Fortunately, this operation system deficiency seems to be the case only + on a few *BSD systems. Operating systems known to work correctly are + Solaris, Linux and FreeBSD. + +\item [BSF at EOM = {\it Yes|No}] + \index[sd]{BSF at EOM} + \index[sd]{Directive!BSF at EOM} + If {\bf No}, the default, no special action is taken by Bacula with the End + of Medium (end of tape) is reached because the tape will be positioned after + the last EOF tape mark, and Bacula can append to the tape as desired. + However, on some systems, such as FreeBSD, when Bacula reads the End of + Medium (end of tape), the tape will be positioned after the second EOF tape + mark (two successive EOF marks indicated End of Medium). If Bacula appends + from that point, all the appended data will be lost. The solution for such + systems is to specify {\bf BSF at EOM} which causes Bacula to backspace over + the second EOF mark. Determination of whether or not you need this directive + is done using the {\bf test} command in the {\bf btape} program. + +\item [TWO EOF = {\it Yes|No}] + \index[sd]{TWO EOF} + \index[sd]{Directive!TWO EOF} + If {\bf Yes}, Bacula will write two end of file marks when terminating a tape +-- i.e. after the last job or at the end of the medium. If {\bf No}, the +default, Bacula will only write one end of file to terminate the tape. + +\item [Backward Space Record = {\it Yes|No}] + \index[sd]{Backward Space Record} + \index[sd]{Directive!Backward Space Record} + If {\it Yes}, the archive device supports the {\tt MTBSR ioctl} to backspace + records. If {\it No}, this call is not used and the device must be rewound + and advanced forward to the desired position. Default is {\bf Yes} for non + random-access devices. This function if enabled is used at the end of a + Volume after writing the end of file and any ANSI/IBM labels to determine whether + or not the last block was written correctly. If you turn this function off, + the test will not be done. This causes no harm as the re-read process is + precautionary rather than required. + +\item [Backward Space File = {\it Yes|No}] + \index[sd]{Backward Space File} + \index[sd]{Directive!Backward Space File} + If {\it Yes}, the archive device supports the {\bf MTBSF} and {\bf MTBSF + ioctl}s to backspace over an end of file mark and to the start of a file. If + {\it No}, these calls are not used and the device must be rewound and + advanced forward to the desired position. Default is {\bf Yes} for non + random-access devices. + +\item [Forward Space Record = {\it Yes|No}] + \index[sd]{Forward Space Record} + \index[sd]{Directive!Forward Space Record} + If {\it Yes}, the archive device must support the {\bf MTFSR ioctl} to + forward space over records. If {\bf No}, data must be read in order to + advance the position on the device. Default is {\bf Yes} for non + random-access devices. + +\item [Forward Space File = {\it Yes|No}] + \index[sd]{Forward Space File} + \index[sd]{Directive!Forward Space File} + If {\bf Yes}, the archive device must support the {\tt MTFSF ioctl} to + forward space by file marks. If {\it No}, data must be read to advance the + position on the device. Default is {\bf Yes} for non random-access devices. + +\item [Offline On Unmount = {\it Yes|No}] + \index[sd]{Offline On Unmount} + \index[sd]{Directive!Offline On Unmount} + The default for this directive is {\bf No}. If {\bf Yes} the archive device + must support the {\tt MTOFFL ioctl} to rewind and take the volume offline. In + this case, Bacula will issue the offline (eject) request before closing the + device during the {\bf unmount} command. If {\bf No} Bacula will not attempt + to offline the device before unmounting it. After an offline is issued, the + cassette will be ejected thus {\bf requiring operator intervention} to + continue, and on some systems require an explicit load command to be issued + ({\bf mt -f /dev/xxx load}) before the system will recognize the tape. If you + are using an autochanger, some devices require an offline to be issued prior + to changing the volume. However, most devices do not and may get very + confused. + + If you are using a Linux 2.6 kernel or other OSes + such as FreeBSD or Solaris, the Offline On Unmount will leave the drive + with no tape, and Bacula will not be able to properly open the drive and + may fail the job. For more information on this problem, please see the + \ilink{description of Offline On Unmount}{NoTapeInDrive} in the Tape + Testing chapter. + + +\item [Maximum Volume Size = {\it size}] + \index[sd]{Maximum Volume Size} + \index[sd]{Directive!Maximum Volume Size} + No more than {\bf size} bytes will be written onto a given volume on the + archive device. This directive is used mainly in testing Bacula to + simulate a small Volume. It can also be useful if you wish to limit the + size of a File Volume to say less than 2GB of data. In some rare cases + of really antiquated tape drives that do not properly indicate when the + end of a tape is reached during writing (though I have read about such + drives, I have never personally encountered one). Please note, this + directive is deprecated (being phased out) in favor of the {\bf Maximum + Volume Bytes} defined in the Director's configuration file. + +\item [Maximum File Size = {\it size}] + \index[sd]{Maximum File Size} + \index[sd]{Directive!Maximum File Size} + No more than {\bf size} bytes will be written into a given logical file + on the volume. Once this size is reached, an end of file mark is + written on the volume and subsequent data are written into the next + file. Breaking long sequences of data blocks with file marks permits + quicker positioning to the start of a given stream of data and can + improve recovery from read errors on the volume. The default is one + Gigabyte. This directive creates EOF marks only on tape media. + However, regardless of the medium type (tape, disk, DVD, ...) each time + a the Maximum File Size is exceeded, a record is put into the catalog + database that permits seeking to that position on the medium for + restore operations. If you set this to a small value (e.g. 1MB), + you will generate lots of database records (JobMedia) and may + significantly increase CPU/disk overhead. + + Note, this directive does not limit the size of Volumes that Bacula + will create regardless of whether they are tape or disk volumes. It + changes only the number of EOF marks on a tape and the number of + block positioning records (see below) that are generated. If you + want to limit the size of all Volumes for a particular device, use + the {\bf Maximum Volume Size} directive (above), or use the + {\bf Maximum Volume Bytes} directive in the Director's Pool resource, + which does the same thing but on a Pool (Volume) basis. + +\item [Block Positioning = {\it yes|no}] + \index[sd]{Block Positioning} + \index[sd]{Directive!Block Positioning} + This directive tells Bacula not to use block positioning when doing restores. + Turning this directive off can cause Bacula to be {\bf extremely} slow + when restoring files. You might use this directive if you wrote your + tapes with Bacula in variable block mode (the default), but your drive + was in fixed block mode. The default is {\bf yes}. + +\item [Maximum Network Buffer Size = {\it bytes}] + \index[sd]{Maximum Network Buffer Size} + \index[sd]{Directive!Maximum Network Buffer Size} + where {\it bytes} specifies the initial network buffer size to use with the + File daemon. This size will be adjusted down if it is too large until + it is accepted by the OS. Please use care in setting this value since if + it is too large, it will be trimmed by 512 bytes until the OS is happy, + which may require a large number of system calls. The default value is + 32,768 bytes. + + The default size was chosen to be relatively large but not too big in + the case that you are transmitting data over Internet. It is clear that + on a high speed local network, you can increase this number and improve + performance. For example, some users have found that if you use a value + of 65,536 bytes they get five to ten times the throughput. Larger values for + most users don't seem to improve performance. If you are interested + in improving your backup speeds, this is definitely a place to + experiment. You will probably also want to make the corresponding change + in each of your File daemons conf files. + + +\item [Maximum Spool Size = {\it bytes}] + \index[sd]{Maximum Spool Size} + \index[sd]{Directive!Maximum Spool Size} + where the bytes specify the maximum spool size for all jobs that are running. + The default is no limit. + +\item [Maximum Job Spool Size = {\it bytes}] + \index[sd]{Maximum Job Spool Size} + \index[sd]{Directive!Maximum Job Spool Size} + where the bytes specify the maximum spool size for any one job that is + running. The default is no limit. + This directive is implemented only in version 1.37 and later. + +\item [Spool Directory = {\it directory}] + \index[sd]{Spool Directory} + \index[sd]{Directive!Spool Directory} + specifies the name of the directory to be used to store the spool files for + this device. This directory is also used to store temporary part files when + writing to a device that requires mount (DVD). The default is to use the + working directory. + +\item [Maximum Part Size = {\it bytes}] + \index[sd]{Maximum Part Size} + \index[sd]{Directive!Maximum Part Size} + This is the maximum size of a volume part file. The default is no limit. + This directive is implemented only in version 1.37 and later. + + If the device requires mount, it is transferred to the device when this size + is reached. In this case, you must take care to have enough disk space left + in the spool directory. + + Otherwise, it is left on the hard disk. + + It is ignored for tape and FIFO devices. + + +\end{description} + +\label{mountcodes} +\section{Edit Codes for Mount and Unmount Directives} +\index[general]{Directives!Edit Codes} +\index[general]{Edit Codes for Mount and Unmount Directives } + +Before submitting the {\bf Mount Command}, {\bf Unmount Command}, +{\bf Write Part Command}, or {\bf Free Space Command} directives +to the operating system, Bacula performs character substitution of the +following characters: + +\footnotesize +\begin{verbatim} + %% = % + %a = Archive device name + %e = erase (set if cannot mount and first part) + %n = part number + %m = mount point + %v = last part name (i.e. filename) +\end{verbatim} +\normalsize + + +\section{Devices that require a mount (DVD)} +\index[general]{Devices that require a mount (DVD)} +\index[general]{DVD!Devices that require a mount} + +All the directives in this section are implemented only in +Bacula version 1.37 and later and hence are available in version 1.38.6. + +As of version 1.39.5, the directives +"Requires Mount", "Mount Point", "Mount Command", and "Unmount Command" +apply to removable filesystems such as USB in addition to DVD. + +\begin{description} + +\item [Requires Mount = {\it Yes|No}] + \index[sd]{Requires Mount} + \index[sd]{Directive!Requires Mount} + You must set this directive to {\bf yes} for DVD-writers, and to {\bf no} for + all other devices (tapes/files). This directive indicates if the device + requires to be mounted to be read, and if it must be written in a special way. + If it set, {\bf Mount Point}, {\bf Mount Command}, {\bf Unmount Command} and + {\bf Write Part Command} directives must also be defined. + +\item [Mount Point = {\it directory}] + \index[sd]{Mount Point} + \index[sd]{Directive!Mount Point} + Directory where the device can be mounted. + +\item [Mount Command = {\it name-string}] + \index[sd]{Mount Command} + \index[sd]{Directive!Mount Command} + Command that must be executed to mount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Mount Command = "/bin/mount -t iso9660 -o ro %a %m" +\end{verbatim} +\normalsize + +\item [Unmount Command = {\it name-string}] + \index[sd]{Unmount Command} + \index[sd]{Directive!Unmount Command} + Command that must be executed to unmount the device. Before the command is + executed, \%a is replaced with the Archive Device, and \%m with the Mount + Point. + + Most frequently, you will define it as follows: + +\footnotesize +\begin{verbatim} + Unmount Command = "/bin/umount %m" +\end{verbatim} +\normalsize + +\item [Write Part Command = {\it name-string}] + \index[sd]{Write Part Command} + \index[sd]{Directive!Write Part Command} + Command that must be executed to write a part to the device. Before the + command is executed, \%a is replaced with the Archive Device, \%m with the + Mount Point, \%e is replaced with 1 if we are writing the first part, + and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Write Part Command = "/path/dvd-handler %a write %e %v" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + +\item [Free Space Command = {\it name-string}] + \index[sd]{Free Space Command} + \index[sd]{Directive!Free Space Command} + Command that must be executed to check how much free space is left on the + device. Before the command is executed,\%a is replaced with the Archive + Device, \%m with the Mount Point, \%e is replaced with 1 if we are writing + the first part, and with 0 otherwise, and \%v with the current part filename. + + For a DVD, you will most frequently specify the Bacula supplied {\bf + dvd-handler} script as follows: + +\footnotesize +\begin{verbatim} + Free Space Command = "/path/dvd-handler %a free" +\end{verbatim} +\normalsize + + Where {\bf /path} is the path to your scripts install directory, and + dvd-handler is the Bacula supplied script file. + If you want to specify your own command, please look at the code of + dvd-handler to see what output Bacula expects from this command. + This command will already be present, but commented out, + in the default bacula-sd.conf file. To use it, simply remove + the comment (\#) symbol. + + If you do not set it, Bacula will expect there is always free space on the + device. + +\end{description} + +%% This pulls in the Autochanger resource from another file. +\label{AutochangerRes} +\label{AutochangerResource1} +\input{autochangerres} + + + + +\section{Capabilities} +\index[general]{Capabilities} + +\begin{description} + +\item [Label media = {\it Yes|No}] + \index[sd]{Label media} + \index[sd]{Directive!Label media} + If {\bf Yes}, permits this device to automatically label blank media + without an explicit operator command. It does so by using an internal + algorithm as defined on the \ilink{Label Format}{Label} record in each + Pool resource. If this is {\bf No} as by default, Bacula will label + tapes only by specific operator command ({\bf label} in the Console) or + when the tape has been recycled. The automatic labeling feature is most + useful when writing to disk rather than tape volumes. + +\item [Automatic mount = {\it Yes|No}] + \index[sd]{Automatic mount} + \index[sd]{Directive!Automatic mount} + If {\bf Yes} (the default), permits the daemon to examine the device to + determine if it contains a Bacula labeled volume. This is done + initially when the daemon is started, and then at the beginning of each + job. This directive is particularly important if you have set + {\bf Always Open = no} because it permits Bacula to attempt to read the + device before asking the system operator to mount a tape. However, + please note that the tape must be mounted before the job begins. + +\end{description} + +\section{Messages Resource} +\label{MessagesResource1} +\index[general]{Resource!Messages} +\index[general]{Messages Resource} + +For a description of the Messages Resource, please see the +\ilink{Messages Resource}{MessagesChapter} Chapter of this +manual. + +\section{Sample Storage Daemon Configuration File} +\label{SampleConfiguration} +\index[general]{File!Sample Storage Daemon Configuration} +\index[general]{Sample Storage Daemon Configuration File} + +A example Storage Daemon configuration file might be the following: + +\footnotesize +\begin{verbatim} +# +# Default Bacula Storage Daemon Configuration file +# +# For Bacula release 1.37.2 (07 July 2005) -- gentoo 1.4.16 +# +# You may need to change the name of your tape drive +# on the "Archive Device" directive in the Device +# resource. If you change the Name and/or the +# "Media Type" in the Device resource, please ensure +# that bacula-dir.conf has corresponding changes. +# +Storage { # definition of myself + Name = rufus-sd + Address = rufus + WorkingDirectory = "$HOME/bacula/bin/working" + Pid Directory = "$HOME/bacula/bin/working" + Maximum Concurrent Jobs = 20 +} +# +# List Directors who are permitted to contact Storage daemon +# +Director { + Name = rufus-dir + Password = "ZF9Ctf5PQoWCPkmR3s4atCB0usUPg+vWWyIo2VS5ti6k" +} +# +# Restricted Director, used by tray-monitor to get the +# status of the storage daemon +# +Director { + Name = rufus-mon + Password = "9usxgc307dMbe7jbD16v0PXlhD64UVasIDD0DH2WAujcDsc6" + Monitor = yes +} +# +# Devices supported by this Storage daemon +# To connect, the Director's bacula-dir.conf must have the +# same Name and MediaType. +# +Autochanger { + Name = Autochanger + Device = Drive-1 + Device = Drive-2 + Changer Command = "/home/kern/bacula/bin/mtx-changer %c %o %S %a %d" + Changer Device = /dev/sg0 +} + +Device { + Name = Drive-1 # + Drive Index = 0 + Media Type = DLT-8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +} + +Device { + Name = Drive-2 # + Drive Index = 1 + Media Type = DLT-8000 + Archive Device = /dev/nst1 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" +} + +Device { + Name = "HP DLT 80" + Media Type = DLT8000 + Archive Device = /dev/nst0 + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + RemovableMedia = yes; +} +#Device { +# Name = SDT-7000 # +# Media Type = DDS-2 +# Archive Device = /dev/nst0 +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = yes; +# RemovableMedia = yes; +#} +#Device { +# Name = Floppy +# Media Type = Floppy +# Archive Device = /mnt/floppy +# RemovableMedia = yes; +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = no; +#} +#Device { +# Name = FileStorage +# Media Type = File +# Archive Device = /tmp +# LabelMedia = yes; # lets Bacula label unlabeled media +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# RemovableMedia = no; +# AlwaysOpen = no; +#} +#Device { +# Name = "NEC ND-1300A" +# Media Type = DVD +# Archive Device = /dev/hda +# LabelMedia = yes; # lets Bacula label unlabeled media +# Random Access = Yes; +# AutomaticMount = yes; # when device opened, read it +# RemovableMedia = yes; +# AlwaysOpen = no; +# MaximumPartSize = 800M; +# RequiresMount = yes; +# MountPoint = /mnt/cdrom; +# MountCommand = "/bin/mount -t iso9660 -o ro %a %m"; +# UnmountCommand = "/bin/umount %m"; +# SpoolDirectory = /tmp/backup; +# WritePartCommand = "/etc/bacula/dvd-handler %a write %e %v" +# FreeSpaceCommand = "/etc/bacula/dvd-handler %a free" +#} +# +# A very old Exabyte with no end of media detection +# +#Device { +# Name = "Exabyte 8mm" +# Media Type = "8mm" +# Archive Device = /dev/nst0 +# Hardware end of medium = No; +# AutomaticMount = yes; # when device opened, read it +# AlwaysOpen = Yes; +# RemovableMedia = yes; +#} +# +# Send all messages to the Director, +# mount messages also are sent to the email address +# +Messages { + Name = Standard + director = rufus-dir = all + operator = root = mount +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/install/translate_images.pl b/docs/manuals/en/install/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/install/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/install/update_version b/docs/manuals/en/install/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/install/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/install/update_version.in b/docs/manuals/en/install/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/install/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/install/version.tex.in b/docs/manuals/en/install/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/install/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/problems/Makefile.in b/docs/manuals/en/problems/Makefile.in new file mode 100644 index 00000000..55cb58c6 --- /dev/null +++ b/docs/manuals/en/problems/Makefile.in @@ -0,0 +1,136 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=problems + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Problem Resolution Guide" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Proble*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + @rm -f ${DOC}i-*.tex + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/problems/check_tex.pl b/docs/manuals/en/problems/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/problems/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/problems/do_echo b/docs/manuals/en/problems/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/en/problems/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/en/problems/faq.css b/docs/manuals/en/problems/faq.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/problems/faq.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/problems/faq.tex b/docs/manuals/en/problems/faq.tex new file mode 100644 index 00000000..df0f0554 --- /dev/null +++ b/docs/manuals/en/problems/faq.tex @@ -0,0 +1,876 @@ +%% +%% +% TODO: maybe merge all this FAQ in with the appropriate section? +% TODO: and use detailed indexing to help reader + +\chapter{Bacula Frequently Asked Questions} +\label{FaqChapter} +\index[general]{Questions!Bacula Frequently Asked } +\index[general]{Bacula Frequently Asked Questions } + +These are questions that have been submitted over time by the +Bacula users. The following +FAQ is very useful, but it is not always up to date +with newer information, so after reading it, if you don't find what you +want, you might try the Bacula wiki maintained by Frank Sweetser, which +contains more than just a FAQ: +\elink{http://wiki.bacula.org}{\url{http://wiki.bacula.org}} +or go directly to the FAQ at: +\elink{http://wiki.bacula.org/doku.php?id=faq} +{\url{http://wiki.bacula.org/doku.php?id=faq}}. + +Please also see +\ilink{the bugs section}{BugsChapter} of this document for a list +of known bugs and solutions. + +\begin{description} +\label{what} +\section{What is Bacula?} +\item [What is {\bf Bacula}? ] + \index[general]{What is Bacula? } + {\bf Bacula} is a network backup and restore program. + +\section{Does Bacula support Windows?} +\item [Does Bacula support Windows?] +\index[general]{Does Bacula support Windows? } + Yes, Bacula compiles and runs on Windows machines (Win98, WinMe, WinXP, + WinNT, Win2003, and Win2000). We provide a binary version of the Client + (bacula-fd), but have not tested the Director nor the Storage daemon. + Note, Win95 is no longer supported because it doesn't have the + GetFileAttributesExA API call. + + +\label{lang} +\section{What language is Bacula written in?} +\item [What language is Bacula written in?] +\index[general]{What language is Bacula written in? } + It is written in C++, but it is mostly C code using only a limited set of + the C++ extensions over C. Thus Bacula is completely compiled using the + C++ compiler. There are several modules, including the Win32 interface, that + are written using the object oriented C++ features. Over time, we are slowly + adding a larger subset of C++. + +\label{run} +\section{On what machines does Bacula run?} +\item [On what machines does Bacula run? ] + \index[general]{On what machines does Bacula run? } + {\bf Bacula} builds and executes on Red Hat Linux (versions RH7.1-RHEL + 4.0, Fedora, SuSE, Gentoo, Debian, Mandriva, ...), FreeBSD, Solaris, + Alpha, SGI (client), NetBSD, OpenBSD, Mac OS X (client), and Win32. + + Bacula has been my only backup tool for over seven years backing up 8 + machines nightly (6 Linux boxes running SuSE, previously + Red Hat and Fedora, a WinXP machine, and a WinNT machine). + + +\label{stable} +\section{Is Bacula Stable?} +\item [Is Bacula Stable? ] +\index[general]{Is Bacula Stable? } + Yes, it is remarkably stable, but remember, there are still a lot of + unimplemented or partially implemented features. With a program of this + size (150,000+ lines of C++ code not including the SQL programs) there + are bound to be bugs. The current test environment (a twisted pair + local network and a HP DLT backup tape) is not exactly ideal, so + additional testing on other sites is necessary. The File daemon has + never crashed -- running months at a time with no intervention. The + Storage daemon is remarkably stable with most of the problems arising + during labeling or switching tapes. Storage daemon crashes are rare + but running multiple drives and simultaneous jobs sometimes (rarely) + problems. + The Director, given the multitude of functions it fulfills is also + relatively stable. In a production environment, it rarely if ever + crashes. Of the three daemons, the Director is the most prone to having + problems. Still, it frequently runs several months with no problems. + + There are a number of reasons for this stability. + + \begin{enumerate} + \item The program is constantly checking the chain of allocated + memory buffers to ensure that no overruns have occurred. \\ + \item All memory leaks (orphaned buffers) are reported each time the + program terminates.\\ + \item Any signal (segmentation fault, ...) generates a + traceback that is emailed to the developer. This permits quick + resolution of bugs even if they only show up rarely in a production + system.\\ + \item There is a reasonably comprehensive set of regression tests + that avoids re-creating the most common errors in new versions of + Bacula. + \end{enumerate} + +\label{AuthorizationErrors} +\section{I'm Getting Authorization Errors. What is Going On? } +\item [I'm Getting Authorization Errors. What is Going On? ] +\index[general]{Authorization Errors} +\index[general]{Concurrent Jobs} + For security reasons, Bacula requires that both the File daemon and the + Storage daemon know the name of the Director as well as its password. As a + consequence, if you change the Director's name or password, you must make + the corresponding change in the Storage daemon's and in the File daemon's + configuration files. + + During the authorization process, the Storage daemon and File daemon + also require that the Director authenticates itself, so both ends + require the other to have the correct name and password. + + If you have edited the conf files and modified any name or any password, + and you are getting authentication errors, then your best bet is to go + back to the original conf files generated by the Bacula installation + process. Make only the absolutely necessary modifications to these + files -- e.g. add the correct email address. Then follow the + instructions in the \ilink{ Running Bacula}{TutorialChapter} chapter of + this manual. You will run a backup to disk and a restore. Only when + that works, should you begin customization of the conf files. + + Another reason that you can get authentication errors is if you are + running Multiple Concurrent Jobs in the Director, but you have not set + them in the File daemon or the Storage daemon. Once you reach their + limit, they will reject the connection producing authentication (or + connection) errors. + + If you are having problems connecting to a Windows machine that + previously worked, you might try restarting the Bacula service since + Windows frequently encounters networking connection problems. + + Some users report that authentication fails if there is not a proper + reverse DNS lookup entry for the machine. This seems to be a + requirement of gethostbyname(), which is what Bacula uses to translate + names into IP addresses. If you cannot add a reverse DNS entry, or you + don't know how to do so, you can avoid the problem by specifying an IP + address rather than a machine name in the appropriate Bacula conf file. + + Here is a picture that indicates what names/passwords in which + files/Resources must match up: + + \includegraphics{./Conf-Diagram.eps} + + In the left column, you will find the Director, Storage, and Client + resources, with their names and passwords -- these are all in {\bf + bacula-dir.conf}. The right column is where the corresponding values + should be found in the Console, Storage daemon (SD), and File daemon (FD) + configuration files. + + Another thing to check is to ensure that the Bacula component you are + trying to access has {\bf Maximum Concurrent Jobs} set large enough to + handle each of the Jobs and the Console that want to connect + simultaneously. Once the maximum connections has been reached, each + Bacula component will reject all new connections. + + Finally, make sure you have no {\bf hosts.allow} or {\bf hosts.deny} + file that is not permitting access to the site trying to connect. + +\label{AccessProblems} +\section{Bacula Runs Fine but Cannot Access a Client on a Different Machine. + Why? } +\item [Bacula Runs Fine but Cannot Access a Client on a Different Machine. + Why? ] +\index[general]{Cannot Access a Client} + There are several reasons why Bacula could not contact a client on a + different machine. They are: + +\begin{itemize} +\item It is a Windows Client, and the client died because of an improper + configuration file. Check that the Bacula icon is in the system tray and the + the menu items work. If the client has died, the icon will disappear only + when you move the mouse over the icon. +\item The Client address or port is incorrect or not resolved by DNS. See if + you can ping the client machine using the same address as in the Client + record. +\item You have a firewall, and it is blocking traffic on port 9102 between + the Director's machine and the Client's machine (or on port 9103 between the + Client and the Storage daemon machines). +\item Your password or names are not correct in both the Director and the + Client machine. Try configuring everything identical to how you run the + client on the same machine as the Director, but just change the Address. If + that works, make the other changes one step at a time until it works. +\item You may also be having problems between your File daemon and your + Storage daemon. The name you use in the Storage resource of your + Director's conf file must be known (resolvable) by the File daemon, + because it is passed symbolically to the File daemon, which then + resolves it to get an IP address used to contact the Storage daemon. +\item You may have a {\bf hosts.allow} or {\bf hosts.deny} file that is + not permitting access. +\end{itemize} + +\label{startover} +\section{My Catalog is Full of Test Runs, How Can I Start Over?} +\item [My Catalog is Full of Test Runs, How Can I Start Over? ] + \index[general]{My Catalog is Full of Test Runs, How Can I Start Over? } + If you are using MySQL do the following: + +\footnotesize +\begin{verbatim} + cd /src/cats + ./drop_mysql_tables + ./make_mysql_tables + +\end{verbatim} +\normalsize + +If you are using SQLite, do the following: + +\footnotesize +\begin{verbatim} + Delete bacula.db from your working directory. + cd /src/cats + ./drop_sqlite_tables + ./make_sqlite_tables + +\end{verbatim} +\normalsize + +Then write an EOF on each tape you used with {\bf Bacula} using: + +\footnotesize +\begin{verbatim} +mt -f /dev/st0 rewind +mt -f /dev/st0 weof +\end{verbatim} +\normalsize + +where you need to adjust the device name for your system. + +\label{restorehang} +\section{I Run a Restore Job and Bacula Hangs. What do I do?} +\item [I Run a Restore Job and Bacula Hangs. What do I do?] +\index[general]{I Run a Restore Job and Bacula Hangs. What do I do? } + On Bacula version 1.25 and prior, it expects you to have the correct + tape mounted prior to a restore. On Bacula version 1.26 and higher, it + will ask you for the tape, and if the wrong one is mounted, it will + inform you. + + If you have previously done an {\bf unmount} command, all Storage daemon + sessions (jobs) will be completely blocked from using the drive + unmounted, so be sure to do a {\bf mount} after your unmount. If in + doubt, do a second {\bf mount}, it won't cause any harm. + +\label{windowstart} +\section{I Cannot Get My Windows Client to Start Automatically? } +\item [I Cannot Get My Windows Client to Start Automatically? ] +\index[general]{Windows Auto Start} + You are probably having one of two problems: either the Client is dying + due to an incorrect configuration file, or you didn't do the + Installation commands necessary to install it as a Windows Service. + + For the first problem, see the next FAQ question. For the second + problem, please review the \ilink{ Windows Installation + instructions}{Win32Chapter} in this manual. + +\label{windowsdie} +\section{My Windows Client Immediately Dies When I Start It} +\item [My Windows Client Immediately Dies When I Start It] +\index[general]{Windows Client Dies} +The most common problem is either that the configuration file is not where +it expects it to be, or that there is an error in the configuration file. +You must have the configuration file in {\bf +c:\textbackslash{}bacula\textbackslash{}bin\textbackslash{}bacula-fd.conf}. + +To {\bf see} what is going on when the File daemon starts on Windows, do the +following: + +\footnotesize +\begin{verbatim} + Start a DOS shell Window. + cd c:\bacula\bin + bacula-fd -d100 -c c:\bacula\bin\bacula-fd.conf + +\end{verbatim} +\normalsize + +This will cause the FD to write a file {\bf bacula.trace} in the current +directory, which you can examine and thereby determine the problem. + +\label{scroll} +\item [When I Start the Console, the Error Messages Fly By. How can I see + them? ] +\index[general]{Error Messages} + Either use a shell window with a scroll bar, or use the gnome-console. + In any case, you probably should be logging all output to a file, and + then you can simply view the file using an editor or the {\bf less} + program. To log all output, I have the following in my Director's + Message resource definition: + +\footnotesize +\begin{verbatim} + append = "/home/kern/bacula/bin/log" = all, !skipped + +\end{verbatim} +\normalsize + +Obviously you will want to change the filename to be appropriate for your +system. + +\label{nobackup} +\section{My backups are not working on my Windows + Client. What should I do?} +\item [I didn't realize that the backups were not working on my Windows + Client. What should I do? ] +\index[general]{Backups Failing} +You should be sending yourself an email message for each job. This will avoid +the possibility of not knowing about a failed backup. To do so put something +like: + +\footnotesize +\begin{verbatim} + Mail = yourname@yourdomain = all, !skipped + +\end{verbatim} +\normalsize + +in your Director's message resource. You should then receive one email for +each Job that ran. When you are comfortable with what is going on (it took +me 9 months), you might change that to: + +\footnotesize +\begin{verbatim} + MailOnError = yourname@yourdomain = all, !skipped + +\end{verbatim} +\normalsize + +then you only get email messages when a Job errors as is the case for your +Windows machine. + +You should also be logging the Director's messages, please see the previous +FAQ for how to do so. + +\label{sched} +\section{All my Jobs are scheduled for the same time. Will this cause + problems?} +\item [All my Jobs are scheduled for the same time. Will this cause + problems? ] +\index[general]{Schedule problems} + No, not at all. Bacula will schedule all the Jobs at the same time, but + will run them one after another unless you have increased the number of + simultaneous jobs in the configuration files for the Director, the File + daemon, and the Storage daemon. The appropriate configuration record is + {\bf Maximum Concurrent Jobs = nn}. At the current time, we recommend + that you leave this set to {\bf 1} for the Director. + +\label{disk} +\section{Can Bacula Backup My System To Files instead of Tape?} +\item [Can Bacula Backup My System To Files instead of Tape? ] +\index[general]{Backup to Disk} + Yes, in principle, Bacula can backup to any storage medium as long as + you have correctly defined that medium in the Storage daemon's Device + resource. For an example of how to backup to files, please see the + \ilink{Pruning Example}{PruningExample} in the Recycling chapter of this + manual. Also, there is a whole chapter devoted to \ilink{Basic Volume + Management}{DiskChapter}. This chapter was originally written to + explain how to write to disk, but was expanded to include volume + management. It is, however, still quite a good chapter to read. + +\label{testbackup} +\section{Can I use a dummy device to test the backup?} + Yes, to have a {\sl Virtual} device which just consumes data, you can use a + FIFO device (see \ilink{Stored configuration}{SetupFifo}). + It's useful to test a backup. +\footnotesize +\begin{verbatim} +Device { + Name = NULL + Media Type = NULL + Device Type = Fifo + Archive Device = /dev/null + LabelMedia = yes + Random Access = no + AutomaticMount = no + RemovableMedia = no + MaximumOpenWait = 60 + AlwaysOpen = no +} +\end{verbatim} +\normalsize + +\label{bigfiles} +\section{Can Bacula Backup and Restore Files Bigger than 2 Gigabytes?} +\item [Can Bacula Backup and Restore Files Bigger than 2 Gigabytes?] +\index[general]{Large file support} +If your operating system permits it, and you are running Bacula version +1.26 or later, the answer is yes. To the best of our knowledge all client +system supported by Bacula can handle files bigger 2 Gigabytes. + +\label{cancel} +\section{I want to stop a job.} +%% Is there a better way than "./bacula stop" to stop it?} +\item [I Started A Job then Decided I Really Did Not Want to Run It. Is + there a better way than {\bf ./bacula stop} to stop it?] +\index[general]{Cancelling jobs} + Yes, you normally should use the Console command {\bf cancel} to cancel + a Job that is either scheduled or running. If the Job is scheduled, it + will be marked for cancellation and will be canceled when it is + scheduled to start. If it is running, it will normally terminate after + a few minutes. If the Job is waiting on a tape mount, you may need to + do a {\bf mount} command before it will be canceled. + +\label{trademark} +\section{Why have You Trademarked the Name Bacula?} +\item [Why have You Trademarked the Name + Bacula\raisebox{.6ex}{{\footnotesize \textsuperscript{\textregistered}}}?] +\index[general]{Bacula Trademark} +We have trademarked the name Bacula to ensure that all media written by any +program named Bacula will always be compatible. Anyone may use the name +Bacula, even in a derivative product as long as it remains totally compatible +in all respects with the program defined here. + +\label{docversion} +\section{Why is the Online Document for Version 1.39 but the Released Version is 1.38?} +\item [Why is the Online Document for Version 1.39 of Bacula when the + Current Version is 1.38?] +\index[general]{Multiple manuals} +As Bacula is being developed, the document is also being enhanced, more +often than not it has clarifications of existing features that can be very +useful to our users, so we publish the very latest document. Fortunately +it is rare that there are confusions with new features. + +If you want to read a document that pertains only to a specific version, +please use the one distributed in the source code. The web site also has +online versions of both the released manual and the current development +manual. + +\label{sure} +\section{Does Bacula really save and restore all files?} +\item [How Can I Be Sure that Bacula Really Saves and Restores All Files? ] +\index[general]{Checking Restores} + It is really quite simple, but took me a while to figure + out how to "prove" it. First make a Bacula Rescue disk, see the + \ilink{Disaster Recovery Using Bacula}{RescueChapter} chapter + of this manual. + Second, you run a full backup of all your files on all partitions. + Third, you run an Verify InitCatalog Job on the same FileSet, which + effectively makes a record of all the files on your system. Fourth, you + run a Verify Catalog job and assure yourself that nothing has changed + (well, between an InitCatalog and Catalog one doesn't expect anything). + Then do the unthinkable, write zeros on your MBR (master boot record) + wiping out your hard disk. Now, restore your whole system using your + Bacula Rescue disk and the Full backup you made, and finally re-run the + Verify Catalog job. You will see that with the exception of the + directory modification and access dates and the files changed during the + boot, your system is identical to what it was before you wiped your hard + disk. + Alternatively you could do the wiping and restoring to another computer + of the same type. + +\label{upgrade} +\section{I want an Incremental but Bacula runs it as a Full backup. Why?} +\item [I did a Full backup last week, but now in running an Incremental, + Bacula says it did not find a FULL backup, so it did a FULL backup. Why?] +\index[general]{FULL backup not found} + Before doing an Incremental or a Differential + backup, Bacula checks to see if there was a prior Full backup of the + same Job that terminated successfully. If so, it uses the date that + full backup started as the time for comparing if files have changed. If + Bacula does not find a successful full backup, it proceeds to do one. + Perhaps you canceled the full backup, or it terminated in error. In + such cases, the full backup will not be successful. You can check by + entering {\bf list jobs} and look to see if there is a prior Job with + the same Name that has Level F and JobStatus T (normal termination). + + Another reason why Bacula may not find a suitable Full backup is that + every time you change the FileSet, Bacula will require a new Full + backup. This is necessary to ensure that all files are properly backed + up in the case where you have added more files to the FileSet. + Beginning with version 1.31, the FileSets are also dated when they are + created, and this date is displayed with the name when you are listing + or selecting a FileSet. For more on backup levels see below. + + See also {\bf Ignore FileSet Changes} in the + \ilink{FileSet Resource definition}{FileSetResource} in the Director + chapter of this document. + +\label{filenamelengths} +\section{Do you really handle unlimited path lengths?} +\item [How Can You Claim to Handle Unlimited Path and Filename Lengths + when All Other Programs Have Fixed Limits?] +\index[general]{Path and Filename Lengths} + Most of those other programs have been around for a long time, in fact + since the beginning of Unix, which means that they were designed for + rather small fixed length path and filename lengths. Over the years, + these restrictions have been relaxed allowing longer names. Bacula on + the other hand was designed in 2000, and so from the start, Path and + Filenames have been kept in buffers that start at 256 bytes in length, + but can grow as needed to handle any length. Most of the work is + carried out by lower level routines making the coding rather easy. + + Note that due to limitations Win32 path and filenames cannot exceed + 260 characters. By using Win32 Unicode functions, we will remove this + restriction in later versions of Bacula. + +\label{unique} +\section{What Is the Really Unique Feature of Bacula?} +\item [What Is the Really Unique Feature of Bacula?] +\index[general]{Unique Feature of Bacula} + Well, it is hard to come up with unique features when backup programs + for Unix machines have been around since the 1960s. That said, I + believe that Bacula is the first and only program to use a standard SQL + interface to catalog its database. Although this adds a bit of + complexity and possibly overhead, it provides an amazingly rich set of + features that are easy to program and enhance. The current code has + barely scratched the surface in this regard (version 1.38). + + The second feature, which gives a lot of power and flexibility to Bacula + is the Bootstrap record definition. + + The third unique feature, which is currently (1.30) unimplemented, and + thus can be called vaporware :-), is Base level saves. When + implemented, this will enormously reduce tape usage. + +\label{sequence} +\section{How can I force one job to run after another?} +\item [If I Run Multiple Simultaneous Jobs, How Can I Force One + Particular Job to Run After Another Job? ] +\index[general]{Multiple Simultaneous Jobs} +Yes, you can set Priorities on your jobs so that they run in the order you +specify. Please see: +\ilink{the Priority record}{Priority} in the Job resource. + +\label{nomail} +\section{I Am Not Getting Email Notification, What Can I Do? } +\item [I Am Not Getting Email Notification, What Can I Do? ] +\index[general]{No Email Notification} + The most common problem is that you have not specified a fully qualified + email address and your bsmtp server is rejecting the mail. The next + most common problem is that your bsmtp server doesn't like the syntax on + the From part of the message. For more details on this and other + problems, please see the \ilink{ Getting Email Notification to + Work}{email} section of the Tips chapter of this manual. The section + \ilink{ Getting Notified of Job Completion}{notification} of the Tips + chapter may also be useful. For more information on the {\bf bsmtp} + mail program, please see \ilink{bsmtp in the Volume Utility Tools + chapter}{bsmtp} of this manual. + +\label{periods} +\section{My retention periods don't work} +\item [I Change Recycling, Retention Periods, or File Sizes in my Pool + Resource and they Still Don't Work.] +\index[general]{Recycling} +\index[general]{Retention Periods} +\index[general]{Pool changes} + The different variables associated with a Pool are defined in the Pool + Resource, but are actually read by Bacula from the Catalog database. On + Bacula versions prior to 1.30, after changing your Pool Resource, you must + manually update the corresponding values in the Catalog by using the {\bf + update pool} command in the Console program. In Bacula version 1.30, Bacula + does this for you automatically every time it starts. + + When Bacula creates a Media record (Volume), it uses many default values from + the Pool record. If you subsequently change the Pool record, the new values + will be used as a default for the next Volume that is created, but if you + want the new values to apply to existing Volumes, you must manually update + the Volume Catalog entry using the {\bf update volume} command in the Console + program. + +\label{CompressionNotWorking} +\section{Why aren't my files compressed?} +\item [I Have Configured Compression On, But None of My Files Are + Compressed. Why?] +\index[general]{Compression} + There are two kinds of compression. One is tape compression. This is done by + the tape drive hardware, and you either enable or disable it with system + tools such as {\bf mt}. This compression works independently of Bacula, + and when it is enabled, you should not use the Bacula software + compression. + + Bacula also has software compression code in the File daemons, which you + normally need to enable only when backing up to file Volumes. There are + two conditions necessary to enable the Bacula software compression. + +\begin{enumerate} +\item You must have the zip development libraries loaded on your system + when building Bacula and Bacula must find this library, normally {\bf + /usr/lib/libz.a}. On Red Hat systems, this library is provided by the + {\bf zlib-devel} rpm. + + If the library is found by Bacula during the {\bf ./configure} it will + be mentioned in the {\bf config.out} line by: + +\footnotesize +\begin{verbatim} + ZLIB support: yes + +\end{verbatim} +\normalsize + +\item You must add the {\bf compression=gzip} option on your Include + statement in the Director's configuration file. +\end{enumerate} + +\label{NewTape} +\item [Bacula is Asking for a New Tape After 2 GB of Data but My Tape + holds 33 GB. Why?] +\index[general]{Tape capacity} +There are several reasons why Bacula will request a new tape. + +\begin{itemize} +\item There is an I/O error on the tape. Bacula prints an error message and + requests a new tape. Bacula does not attempt to continue writing after an + I/O error. +\item Bacula encounters and end of medium on the tape. This is not always + distinguishable from an I/O error. +\item You have specifically set some size limitation on the tape. For example + the {\bf Maximum Volume Bytes} or {\bf Maximum Volume Files} in the + Director's Pool resource, or {\bf Maximum Volume Size} in the Storage + daemon's Device resource. +\end{itemize} + +\label{LevelChanging} +\section{Incremental backups are not working} +\item [Bacula is Not Doing the Right Thing When I Request an Incremental + Backup. Why?] +\index[general]{Incremental backups} + As explained in one of the previous questions, Bacula will automatically + upgrade an Incremental or Differential job to a Full backup if it cannot + find a prior Full backup or a suitable Full backup. For the gory + details on how/when Bacula decides to upgrade levels please see the + \ilink{Level record}{Level} in the Director's configuration chapter of + this manual. + + If after reading the above mentioned section, you believe that Bacula is not + correctly handling the level (Differential/Incremental), please send us the + following information for analysis: + +\begin{itemize} +\item Your Director's configuration file. +\item The output from {\bf list jobs} covering the period where you are + having the problem. +\item The Job report output from the prior Full save (not critical). +\item An {\bf llist jobid=nnn} where nnn is the JobId of the prior Full save. + +\item The Job report output from the save that is doing the wrong thing (not + critical). +\item An {\bf llist jobid=nnn} where nnn is the JobId of the job that was not + correct. +\item An explanation of what job went wrong and why you think it did. + \end{itemize} + +The above information can allow us to analyze what happened, without it, +there is not much we can do. + +\label{WaitForever} +\section{I am waiting forever for a backup of an offsite machine} +\item [I am Backing Up an Offsite Machine with an Unreliable Connection. + The Director Waits Forever for the Client to Contact the SD. What Can I + Do?] +\index[general]{Backing Up Offsite Machines} + Bacula was written on the assumption that it will have a good TCP/IP + connection between all the daemons. As a consequence, the current + Bacula doesn't deal with faulty connections very well. This situation + is slowly being corrected over time. + + There are several things you can do to improve the situation. + +\begin{itemize} +\item Upgrade to version 1.32 and use the new SDConnectTimeout record. For + example, set: + +\footnotesize +\begin{verbatim} + SD Connect Timeout = 5 min + +\end{verbatim} +\normalsize + +in the FileDaemon resource. +\item Run these kinds of jobs after all other jobs. + \end{itemize} + +\label{sshHanging} +\section{SSH hangs forever after starting Bacula} +\item [When I ssh into a machine and start Bacula then attempt to exit, + ssh hangs forever.] +\index[general]{ssh hangs} + This happens because Bacula leaves stdin, stdout, and stderr open for + debug purposes. To avoid it, the simplest thing to do is to redirect + the output of those files to {\bf /dev/null} or another file in your + startup script (the Red Hat autostart scripts do this automatically). + For example, you start the Director with: + +\footnotesize +\begin{verbatim} + bacula-dir -c bacula-dir.conf ... 0>\&1 2>\&1 >/dev/null + +\end{verbatim} +\normalsize + +and likewise for the other daemons. + +\label{RetentionPeriods} +\section{I'm confused by retention periods} +\item [I'm confused by the different Retention periods: File Retention, + Job Retention, Volume Retention. Why are there so many?] +\index[general]{Retention Periods} + Yes, this certainly can be confusing. The basic reason for so many is + to allow flexibility. The File records take quite a lot of space in the + catalog, so they are typically records you want to remove rather + quickly. The Job records, take very little space, and they can be + useful even without the File records to see what Jobs actually ran and + when. One must understand that if the File records are removed from the + catalog, you cannot use the {\bf restore} command to restore an + individual file since Bacula no longer knows where it is. However, as + long as the Volume Retention period has not expired, the data will still + be on the tape, and can be recovered from the tape. + + For example, I keep a 30 day retention period for my Files to keep my + catalog from getting too big, but I keep my tapes for a minimum of one + year, just in case. + +\label{MaxVolumeSize} +\section{MaxVolumeSize is ignored} +\item [Why Does Bacula Ignore the MaxVolumeSize Set in my Pool?] +\index[general]{MaxVolumeSize} + The MaxVolumeSize that Bacula uses comes from the Media record, so most + likely you changed your Pool, which is used as the default for creating + Media records, {\bf after} you created your Volume. Check what is in + the Media record by doing: + +\footnotesize +\begin{verbatim} +llist Volume=xxx +\end{verbatim} +\normalsize + +If it doesn't have the right value, you can use: + +\footnotesize +\begin{verbatim} +update Volume=xxx +\end{verbatim} +\normalsize + +to change it. + +\label{ConnectionRefused} +\section{I get a Connection refused when connecting to my Client} +\item [In connecting to my Client, I get "ERR:Connection Refused. Packet + Size too big from File daemon:192.168.1.4:9102" Why?] +\index[general]{ERR:Connection Refused} + This is typically a communications error resulting from one of the + following: + + +\begin{itemize} +\item Old versions of Bacula, usually a Win32 client, where two threads were + using the same I/O packet. Fixed in more recent versions. Please upgrade. +\item Some other program such as an HP Printer using the same port (9102 in + this case). +\end{itemize} + +If it is neither of the above, please submit a bug report at +\elink{bugs.bacula.org}{http://bugs.bacula.org}. + +Another solution might be to run the daemon with the debug option by: + +\footnotesize +\begin{verbatim} + Start a DOS shell Window. + cd c:\bacula\bin + bacula-fd -d100 -c c:\bacula\bin\bacula-fd.conf + +\end{verbatim} +\normalsize + +This will cause the FD to write a file {\bf bacula.trace} in the current +directory, which you can examine to determine the problem. + +\section{Long running jobs die with Pipe Error} +\item [During long running jobs my File daemon dies with Pipe Error, or + some other communications error. Why?] +\index[general]{Communications Errors} +\index[general]{Pipe Errors} +\index[general]{slow} +\index[general]{Backups!slow} + There are a number of reasons why a connection might break. + Most often, it is a router between your two computers that times out + inactive lines (not respecting the keepalive feature that Bacula uses). + In that case, you can use the {\bf Heartbeat Interval} directive in + both the Storage daemon and the File daemon. + + In at least one case, the problem has been a bad driver for a Win32 + NVidia NForce 3 ethernet card with driver (4.4.2 17/05/2004). + In this case, a good driver is (4.8.2.0 06/04/2005). Moral of + the story, make sure you have the latest ethernet drivers + loaded, or use the following workaround as suggested by Thomas + Simmons for Win32 machines: + + Browse to: + Start \gt{} Control Panel \gt{} Network Connections + + Right click the connection for the nvidia adapter and select properties. + Under the General tab, click "Configure...". Under the Advanced tab set + "Checksum Offload" to disabled and click OK to save the change. + + Lack of communications, or communications that get interrupted can + also be caused by Linux firewalls where you have a rule that throttles + connections or traffic. For example, if you have: + +\footnotesize +\begin{verbatim} +iptables -t filter -A INPUT -m limit --limit 3/second --limit-burst 3 -j DROP +\end{verbatim} +\normalsize + + you will want to add the following rules {\bf before} the above rule: +\footnotesize +\begin{verbatim} +iptables -t filter -A INPUT --dport 9101 -j ACCEPT +iptables -t filter -A INPUT --dport 9102 -j ACCEPT +iptables -t filter -A INPUT --dport 9103 -j ACCEPT +\end{verbatim} +\normalsize + This will ensure that any Bacula traffic will not get terminated because + of high usage rates. + +\section{How do I tell the Job which Volume to use?} +\item[I can't figure out how to tell the job which volume to use] + \index[general]{What tape to mount} + This is an interesting statement. I now see that a number of people new to + Bacula have the same problem as you, probably from using programs like tar. + + In fact, you do not tell Bacula what tapes to use. It is the inverse. Bacula + tells you want tapes it wants. You put tapes at its disposition and it + chooses. + + Now, if you *really* want to be tricky and try to tell Bacula what to do, it + will be reasonable if for example you mount a valid tape that it can use on a + drive, it will most likely go ahead and use it. It also has a documented + algorithm for choosing tapes -- but you are asking for problems ... + + So, the trick is to invert your concept of things and put Bacula in charge of + handling the tapes. Once you do that, you will be fine. If you want to + anticipate what it is going to do, you can generally figure it out correctly + and get what you want. + + If you start with the idea that you are going to force or tell Bacula to use + particular tapes or you insist on trying to run in that kind of mode, you will + probably not be too happy. + + I don't want to worry about what tape has what data. That is what Bacula is + designed for. + + If you have an application where you *really* need to remove a tape each day + and insert a new one, it can be done the directives exist to accomplish that. + In such a case, one little "trick" to knowing what tape Bacula will want at + 2am while you are asleep is to run a tiny job at 4pm while you are still at + work that backs up say one directory, or even one file. You will quickly find + out what tape it wants, and you can mount it before you go home ... + +\label{Password generation} +\section{Password generation} +\item [How do I generate a password?] +\index[general]{MaxVolumeSize} + + Each daemon needs a password. This password occurs in the configuration + file for that daemon and in the bacula-dir.conf file. These passwords are + plain text. There is no special generation procedure. Most people just + use random text. + + Passwords are never sent over the wire in plain text. They are always + encrypted. + + Security surrounding these passwords is best left security to your + operating system. Passwords are not encrypted within Bacula + configuration files. + +\end{description} + \ No newline at end of file diff --git a/docs/manuals/en/problems/fdl.tex b/docs/manuals/en/problems/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/en/problems/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/problems/firewalls.tex b/docs/manuals/en/problems/firewalls.tex new file mode 100644 index 00000000..1e93c04e --- /dev/null +++ b/docs/manuals/en/problems/firewalls.tex @@ -0,0 +1,373 @@ +%% +%% + +\chapter{Dealing with Firewalls} +\label{FirewallsChapter} +\index[general]{Dealing with Firewalls } +\index[general]{Firewalls!Dealing with } + +If you have a firewall or a DMZ installed on your computer, you may experience +difficulties contacting one or more of the Clients to back them up. This is +especially true if you are trying to backup a Client across the Internet. + +\section{Technical Details} +\index[general]{Technical Details } +\index[general]{Details!Technical } + +If you are attempting to do this, the sequence of network events in Bacula to +do a backup are the following: + +\footnotesize +\begin{verbatim} +Console -> DIR:9101 +DIR -> SD:9103 +DIR -> FD:9102 +FD -> SD:9103 +\end{verbatim} +\normalsize + +Where hopefully it is obvious that DIR represents the Director, FD the File +daemon or client, and SD the Storage daemon. The numbers that follow those +names are the standard ports used by Bacula, and the -\gt{} represents the +left side making a connection to the right side (i.e. the right side is the +"server" or is listening on the specified port), and the left side is the +"client" that initiates the conversation. + +Note, port 9103 serves both the Director and the File daemon, each having its +own independent connection. + +If you are running {\bf iptables}, you might add something like: + +\footnotesize +\begin{verbatim} +-A FW-1-INPUT -m state --state NEW -m tcp -p tcp --dport 9101:9103 -j ACCEPT +\end{verbatim} +\normalsize + +on your server, and + +\footnotesize +\begin{verbatim} +-A FW-1-INPUT -m state --state NEW -m tcp -p tcp --dport 9102 -j ACCEPT +\end{verbatim} +\normalsize + +on your client. In both cases, I assume that the machine is allowed to +initiate connections on any port. If not, you will need to allow outgoing +connections on ports 9102 and 9103 on your server and 9103 on your client. +Thanks to Raymond Norton for this tip. + +\section{A Concrete Example} +\index[general]{Example!Concrete } +\index[general]{Concrete Example } + +The following discussion was originally written by +Jesse Guardiani because he has 'internal' and 'external' requiring the +Director and the Client to use different IP addresses. His original +solution was to define two different Storage resources in the Director's +conf file each pointing to the same Storage daemon but with different +IP addresses. In Bacula 1.38.x this no longer works, because Bacula makes +a one-to-one association between a Storage daemon resource and a Device (such +as an Autochanger). As a consequence, I have modified his original +text to a method that I believe will work, but is as of yet untested +(KES - July 2006). + +My bacula server is on the 192.168.1.0/24 network at IP address 192.168.1.52. +For the sake of discussion we will refer to this network as the 'internal' +network because it connects to the internet through a NAT'd firewall. We will +call the network on the public (internet) side of the NAT'd firewall the +'external' network. Also, for the sake of discussion we will call my bacula +server: + +\footnotesize +\begin{verbatim} + server.int.mydomain.tld +\end{verbatim} +\normalsize + +when a fully qualified domain name is required, or simply: + +\footnotesize +\begin{verbatim} + server +\end{verbatim} +\normalsize + +if a hostname is adequate. We will call the various bacula daemons running on +the server.int.mydomain.tld machine: + +\footnotesize +\begin{verbatim} + server-fd + server-sd + server-dir +\end{verbatim} +\normalsize + +In addition, I have two clients that I want to back up with Bacula. The first +client is on the internal network. Its fully qualified domain name is: + +\footnotesize +\begin{verbatim} + private1.int.mydomain.tld +\end{verbatim} +\normalsize + +And its hostname is: + +\footnotesize +\begin{verbatim} + private1 +\end{verbatim} +\normalsize + +This machine is a client and therefore runs just one bacula daemon: + +\footnotesize +\begin{verbatim} + private1-fd +\end{verbatim} +\normalsize + +The second client is on the external network. Its fully qualified domain name +is: + +\footnotesize +\begin{verbatim} + public1.mydomain.tld +\end{verbatim} +\normalsize + +And its hostname is: + +\footnotesize +\begin{verbatim} + public1 +\end{verbatim} +\normalsize + +This machine also runs just one bacula daemon: + +\footnotesize +\begin{verbatim} + public1-fd +\end{verbatim} +\normalsize + +Finally, I have a NAT firewall/gateway with two network interfaces. The first +interface is on the internal network and serves as a gateway to the internet +for all the machines attached to the internal network (For example, +server.int.mydomain.tld and private1.int.mydomain.tld). The second interface +is on the external (internet) network. The external interface has been +assigned the name: + +\footnotesize +\begin{verbatim} + firewall.mydomain.tld +\end{verbatim} +\normalsize + +Remember: + +\footnotesize +\begin{verbatim} + *.int.mydomain.tld = internal network + *.mydomain.tld = external network +\end{verbatim} +\normalsize + +\subsection{The Bacula Configuration Files for the Above} +\index[general]{Above!Bacula Configuration Files for the } +\index[general]{Bacula Configuration Files for the Above } + +server-sd manages a 4 tape AIT autoloader. All of my backups are written to +server-sd. I have just *one* Device resource in my server-sd.conf file: + +\footnotesize +\begin{verbatim} +Autochanger { + Name = "autochanger1";\ + Device = Drive0 + Changer Device = /dev/ch0; + Changer Command = "/usr/local/sbin/chio-bacula %c %o %S %a"; +} +Device { + Name = Drive0 + DriveIndex = 0 + Media Type = AIT-1; + Archive Device = /dev/nrsa1; + Label Media = yes; + AutoChanger = yes; + AutomaticMount = yes; # when device opened, read it + AlwaysOpen = yes; + Hardware End of Medium = No + Fast Forward Space File = No + BSF at EOM = yes +} +\end{verbatim} +\normalsize + +(note, please see +\ilink{the Tape Testing}{FreeBSDTapes} chapter of this manual +for important FreeBSD information.) However, unlike previously, there +is only one Storage definition in my server-dir.conf file: + +\footnotesize +\begin{verbatim} +Storage { + Name = "autochanger1" # Storage device for backing up + Address = Storage-server + SDPort = 9103 + Password = "mysecretpassword" + Device = "autochanger1" + Media Type = AIT-1 + Autochanger = yes +} +\end{verbatim} +\normalsize + +Note that the Storage resource uses neither of the two addresses to +the Storage daemon -- neither server.int.mydomain.tld nor +firewall.mydomain.tld, but instead uses the address Storage-server. + +What is key is that in the internal net, Storage-server is resolved +to server.int.mydomain.tld, either with an entry in /etc/hosts, or by +creating and appropriate DNS entry, and on the external net (the Client +machine), Storage-server is resolved to firewall.mydomain.tld. + + +In addition to the above, I have two Client resources defined in +server-dir.conf: + +\footnotesize +\begin{verbatim} +Client { + Name = private1-fd + Address = private1.int.mydomain.tld + FDPort = 9102 + Catalog = MyCatalog + Password = "mysecretpassword" # password for FileDaemon +} +Client { + Name = public1-fd + Address = public1.mydomain.tld + FDPort = 9102 + Catalog = MyCatalog + Password = "mysecretpassword" # password for FileDaemon +} +\end{verbatim} +\normalsize + +And finally, to tie it all together, I have two Job resources defined in +server-dir.conf: + +\footnotesize +\begin{verbatim} +Job { + Name = "Private1-Backup" + Type = Backup + Client = private1-fd + FileSet = "Private1" + Schedule = "WeeklyCycle" + Storage = "autochanger1-int" + Messages = Standard + Pool = "Weekly" + Write Bootstrap = "/var/db/bacula/Private1-Backup.bsr" + Priority = 12 +} +Job { + Name = "Public1-Backup" + Type = Backup + Client = public1-fd + FileSet = "Public1" + Schedule = "WeeklyCycle" + Storage = "autochanger1-ext" + Messages = Standard + Pool = "Weekly" + Write Bootstrap = "/var/db/bacula/Public1-Backup.bsr" + Priority = 13 +} +\end{verbatim} +\normalsize + +It is important to notice that because the 'Private1-Backup' Job is intended +to back up a machine on the internal network so it resolves Storage-server +to contact the Storage daemon via the internal net. +On the other hand, the 'Public1-Backup' Job is intended to +back up a machine on the external network, so it resolves Storage-server +to contact the Storage daemon via the external net. + +I have left the Pool, Catalog, Messages, FileSet, Schedule, and Director +resources out of the above server-dir.conf examples because they are not +pertinent to the discussion. + +\subsection{How Does It Work?} +\index[general]{How Does It Work? } +\index[general]{Work!How Does It } + +If I want to run a backup of private1.int.mydomain.tld and store that backup +using server-sd then my understanding of the order of events is this: + +\begin{enumerate} +\item I execute my Bacula 'console' command on server.int.mydomain.tld. +\item console connects to server-dir. +\item I tell console to 'run' backup Job 'Private1-Backup'. +\item console relays this command to server-dir. +\item server-dir connects to private1-fd at private1.int.mydomain.tld:9102 +\item server-dir tells private1-fd to start sending the files defined in the + 'Private1-Backup' Job's FileSet resource to the Storage resource + 'autochanger1', which we have defined in server-dir.conf as having the +address:port of Storage-server, which is mapped by DNS to server.int.mydomain.tld. +\item private1-fd connects to server.int.mydomain.tld:9103 and begins sending + files. + \end{enumerate} + +Alternatively, if I want to run a backup of public1.mydomain.tld and store +that backup using server-sd then my understanding of the order of events is +this: + +\begin{enumerate} +\item I execute my Bacula 'console' command on server.int.mydomain.tld. +\item console connects to server-dir. +\item I tell console to 'run' backup Job 'Public1-Backup'. +\item console relays this command to server-dir. +\item server-dir connects, through the NAT'd firewall, to public1-fd at + public1.mydomain.tld:9102 +\item server-dir tells public1-fd to start sending the files defined in the + 'Public1-Backup' Job's FileSet resource to the Storage resource + 'autochanger1', which we have defined in server-dir.conf as having the + same address:port as above of Storage-server, but which on this machine + is resolved to firewall.mydomain.tld:9103. +\item public1-fd connects to firewall.mydomain.tld:9103 and begins sending + files. + \end{enumerate} + +\subsection{Important Note} +\index[general]{Important Note } +\index[general]{Note!Important } + +In order for the above 'Public1-Backup' Job to succeed, +firewall.mydomain.tld:9103 MUST be forwarded using the firewall's +configuration software to server.int.mydomain.tld:9103. Some firewalls call +this 'Server Publication'. Others may call it 'Port Forwarding'. + +\subsection{Firewall Problems} +\index[general]{Firewall Problems} +\index[general]{Problems!Firewalls} +Either a firewall or a router may decide to timeout and terminate +open connections if they are not active for a short time. By Internet +standards the period should be two hours, and should be indefinitely +extended if KEEPALIVE is set as is the case by Bacula. If your firewall +or router does not respect these rules, you may find Bacula connections +terminated. In that case, the first thing to try is turning on the +{\bf Heart Beat Interval} both in the File daemon and the Storage daemon +and set an interval of say five minutes. + +Also, if you have denial of service rate limiting in your firewall, this +too can cause Bacula disconnects since Bacula can at times use very high +access rates. To avoid this, you should implement default accept +rules for the Bacula ports involved before the rate limiting rules. + +Finally, if you have a Windows machine, it will most likely by default +disallow connections to the Bacula Windows File daemon. See the +Windows chapter of this manual for additional details. diff --git a/docs/manuals/en/problems/fix_tex.pl b/docs/manuals/en/problems/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/problems/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/problems/index.perl b/docs/manuals/en/problems/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/problems/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/problems/kaboom.tex b/docs/manuals/en/problems/kaboom.tex new file mode 100644 index 00000000..a4e5bc57 --- /dev/null +++ b/docs/manuals/en/problems/kaboom.tex @@ -0,0 +1,233 @@ +%% +%% + +\chapter{What To Do When Bacula Crashes (Kaboom)} +\label{KaboomChapter} +\index[general]{Kaboom!What To Do When Bacula Crashes } +\index[general]{What To Do When Bacula Crashes (Kaboom) } + +If you are running on a Linux system, and you have a set of working +configuration files, it is very unlikely that {\bf Bacula} will crash. As with +all software, however, it is inevitable that someday, it may crash, +particularly if you are running on another operating system or using a new or +unusual feature. + +This chapter explains what you should do if one of the three {\bf Bacula} +daemons (Director, File, Storage) crashes. When we speak of crashing, we +mean that the daemon terminates abnormally because of an error. There are +many cases where Bacula detects errors (such as PIPE errors) and will fail +a job. These are not considered crashes. In addition, under certain +conditions, Bacula will detect a fatal in the configuration, such as +lack of permission to read/write the working directory. In that case, +Bacula will force itself to crash with a SEGFAULT. However, before +crashing, Bacula will normally display a message indicating why. +For more details, please read on. + + +\section{Traceback} +\index[general]{Traceback} + +Each of the three Bacula daemons has a built-in exception handler which, in +case of an error, will attempt to produce a traceback. If successful the +traceback will be emailed to you. + +For this to work, you need to ensure that a few things are setup correctly on +your system: + +\begin{enumerate} +\item You must have a version of Bacula built with debug information turned + on and not stripped of debugging symbols. + +\item You must have an installed copy of {\bf gdb} (the GNU debugger), and it + must be on {\bf Bacula's} path. On some systems such as Solaris, {\bf + gdb} may be replaced by {\bf dbx}. + +\item The Bacula installed script file {\bf btraceback} must be in the same + directory as the daemon which dies, and it must be marked as executable. + +\item The script file {\bf btraceback.gdb} must have the correct path to it + specified in the {\bf btraceback} file. + +\item You must have a {\bf mail} program which is on {\bf Bacula's} path. + By default, this {\bf mail} program is set to {\bf bsmtp}, so it must + be correctly configured. + +\item If you run either the Director or Storage daemon under a non-root + userid, you will most likely need to modify the {\bf btraceback} file + to do something like {\bf sudo} (raise to root priority) for the + call to {\bf gdb} so that it has the proper permissions to debug + Bacula. +\end{enumerate} + +If all the above conditions are met, the daemon that crashes will produce a +traceback report and email it to you. If the above conditions are not true, +you can either run the debugger by hand as described below, or you may be able +to correct the problems by editing the {\bf btraceback} file. I recommend not +spending too much time on trying to get the traceback to work as it can be +very difficult. + +The changes that might be needed are to add a correct path to the {\bf gdb} +program, correct the path to the {\bf btraceback.gdb} file, change the {\bf +mail} program or its path, or change your email address. The key line in the +{\bf btraceback} file is: + +\footnotesize +\begin{verbatim} +gdb -quiet -batch -x /home/kern/bacula/bin/btraceback.gdb \ + $1 $2 2>\&1 | bsmtp -s "Bacula traceback" your-address@xxx.com +\end{verbatim} +\normalsize + +Since each daemon has the same traceback code, a single btraceback file is +sufficient if you are running more than one daemon on a machine. + +\section{Testing The Traceback} +\index[general]{Traceback!Testing The } +\index[general]{Testing The Traceback } + +To "manually" test the traceback feature, you simply start {\bf Bacula} then +obtain the {\bf PID} of the main daemon thread (there are multiple threads). +The output produced here will look different depending on what OS and what +version of the kernel you are running. +Unfortunately, the output had to be split to fit on this page: + +\footnotesize +\begin{verbatim} +[kern@rufus kern]$ ps fax --columns 132 | grep bacula-dir + 2103 ? S 0:00 /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2104 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2106 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf + 2105 ? S 0:00 \_ /home/kern/bacula/k/src/dird/bacula-dir -c + /home/kern/bacula/k/src/dird/dird.conf +\end{verbatim} +\normalsize + +which in this case is 2103. Then while Bacula is running, you call the program +giving it the path to the Bacula executable and the {\bf PID}. In this case, +it is: + +\footnotesize +\begin{verbatim} +./btraceback /home/kern/bacula/k/src/dird 2103 +\end{verbatim} +\normalsize + +It should produce an email showing you the current state of the daemon (in +this case the Director), and then exit leaving {\bf Bacula} running as if +nothing happened. If this is not the case, you will need to correct the +problem by modifying the {\bf btraceback} script. + +Typical problems might be that {\bf gdb} or {\bf dbx} for Solaris is not on +the default path. Fix this by specifying the full path to it in the {\bf +btraceback} file. Another common problem is that you haven't modified the +script so that the {\bf bsmtp} program has an appropriate smtp server or +the proper syntax for your smtp server. If you use the {\bf mail} program +and it is not on the default path, it will also fail. On some systems, it +is preferable to use {\bf Mail} rather than {\bf mail}. + +\section{Getting A Traceback On Other Systems} +\index[general]{Getting A Traceback On Other Systems} +\index[general]{Systems!Getting A Traceback On Other} + +It should be possible to produce a similar traceback on systems other than +Linux, either using {\bf gdb} or some other debugger. Solaris with {\bf dbx} +loaded works quite fine. On other systems, you will need to modify the {\bf +btraceback} program to invoke the correct debugger, and possibly correct the +{\bf btraceback.gdb} script to have appropriate commands for your debugger. If +anyone succeeds in making this work with another debugger, please send us a +copy of what you modified. Please keep in mind that for any debugger to +work, it will most likely need to run as root, so you may need to modify +the {\bf btraceback} script accordingly. + +\label{ManuallyDebugging} +\section{Manually Running Bacula Under The Debugger} +\index[general]{Manually Running Bacula Under The Debugger} +\index[general]{Debugger!Manually Running Bacula Under The} + +If for some reason you cannot get the automatic traceback, or if you want to +interactively examine the variable contents after a crash, you can run Bacula +under the debugger. Assuming you want to run the Storage daemon under the +debugger (the technique is the same for the other daemons, only the name +changes), you would do the following: + +\begin{enumerate} +\item Start the Director and the File daemon. If the Storage daemon also + starts, you will need to find its PID as shown above (ps fax | grep + bacula-sd) and kill it with a command like the following: + +\footnotesize +\begin{verbatim} + kill -15 PID +\end{verbatim} +\normalsize + +where you replace {\bf PID} by the actual value. + +\item At this point, the Director and the File daemon should be running but + the Storage daemon should not. + +\item cd to the directory containing the Storage daemon + +\item Start the Storage daemon under the debugger: + + \footnotesize +\begin{verbatim} + gdb ./bacula-sd +\end{verbatim} +\normalsize + +\item Run the Storage daemon: + + \footnotesize +\begin{verbatim} + run -s -f -c ./bacula-sd.conf +\end{verbatim} +\normalsize + +You may replace the {\bf ./bacula-sd.conf} with the full path to the Storage +daemon's configuration file. + +\item At this point, Bacula will be fully operational. + +\item In another shell command window, start the Console program and do what + is necessary to cause Bacula to die. + +\item When Bacula crashes, the {\bf gdb} shell window will become active and + {\bf gdb} will show you the error that occurred. + +\item To get a general traceback of all threads, issue the following command: + + +\footnotesize +\begin{verbatim} + thread apply all bt +\end{verbatim} +\normalsize + +After that you can issue any debugging command. +\end{enumerate} + +\section{Getting Debug Output from Bacula} +\index[general]{Getting Debug Output from Bacula } +Each of the daemons normally has debug compiled into the program, but +disabled. There are two ways to enable the debug output. One is to add the +{\bf -d nnn} option on the command line when starting the debugger. The {\bf +nnn} is the debug level, and generally anything between 50 and 200 is +reasonable. The higher the number, the more output is produced. The output is +written to standard output. + +The second way of getting debug output is to dynamically turn it on using the +Console using the {\bf setdebug} command. The full syntax of the command is: + +\footnotesize +\begin{verbatim} + setdebug level=nnn client=client-name storage=storage-name dir +\end{verbatim} +\normalsize + +If none of the options are given, the command will prompt you. You can +selectively turn on/off debugging in any or all the daemons (i.e. it is not +necessary to specify all the components of the above command). diff --git a/docs/manuals/en/problems/latex2html-init.pl b/docs/manuals/en/problems/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/problems/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/problems/problems.tex b/docs/manuals/en/problems/problems.tex new file mode 100644 index 00000000..b6a1d5ba --- /dev/null +++ b/docs/manuals/en/problems/problems.tex @@ -0,0 +1,81 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Problem Resolution Guide} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{faq} +\include{tips} +\include{tapetesting} +\include{firewalls} +\include{kaboom} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/en/problems/rpm-faq.tex b/docs/manuals/en/problems/rpm-faq.tex new file mode 100644 index 00000000..1e37cc59 --- /dev/null +++ b/docs/manuals/en/problems/rpm-faq.tex @@ -0,0 +1,394 @@ +%% +%% + +\chapter{Bacula RPM Packaging FAQ} +\label{RpmFaqChapter} +\index[general]{FAQ!Bacula\textsuperscript{\textregistered} - RPM Packaging } +\index[general]{Bacula\textsuperscript{\textregistered} - RPM Packaging FAQ } + +\begin{enumerate} +\item + \ilink{How do I build Bacula for platform xxx?}{faq1} +\item + \ilink{How do I control which database support gets built?}{faq2} + +\item + \ilink{What other defines are used?}{faq3} +\item + \ilink{I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?}{faq4} +\item + \ilink{I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called + /usr/afsws/bin/pagsh.}{faq5} +\item + \ilink{I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?}{faq6} +\item + \ilink{Is there an easier way than sorting out all these command line options?}{faq7} +\item + \ilink{I just upgraded from 1.36.x to 1.38.x and now my director daemon won't start. It appears to start but dies silently and I get a "connection refused" error when starting the console. What is wrong?}{faq8} +\item + \ilink{There are a lot of rpm packages. Which packages do I need for what?}{faq9} +\end{enumerate} + +\section{Answers} +\index[general]{Answers } + +\begin{enumerate} +\item + \label{faq1} + {\bf How do I build Bacula for platform xxx?} + The bacula spec file contains defines to build for several platforms: + Red Hat 7.x (rh7), Red Hat 8.0 (rh8), Red Hat 9 (rh9), Fedora Core (fc1, + fc3, fc4, fc5, fc6, fc7), Whitebox Enterprise Linux 3.0 (wb3), Red Hat Enterprise Linux + (rhel3, rhel4, rhel5), Mandrake 10.x (mdk), Mandriva 2006.x (mdv) CentOS (centos3, centos4, centos5) + Scientific Linux (sl3, sl4, sl5) and SuSE (su9, su10, su102, su103). The package build is controlled by a mandatory define set at the beginning of the file. These defines basically just control the dependency information that gets coded into the finished rpm package as well + as any special configure options required. The platform define may be edited + in the spec file directly (by default all defines are set to 0 or "not set"). + For example, to build the Red Hat 7.x package find the line in the spec file + which reads + +\footnotesize +\begin{verbatim} + %define rh7 0 + +\end{verbatim} +\normalsize + +and edit it to read + +\footnotesize +\begin{verbatim} + %define rh7 1 + +\end{verbatim} +\normalsize + +Alternately you may pass the define on the command line when calling rpmbuild: + + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" bacula.spec + rpmbuild --rebuild --define build_rh7 1" bacula-x.x.x-x.src.rpm + +\end{verbatim} +\normalsize + +\item + \label{faq2} + {\bf How do I control which database support gets built?} + Another mandatory build define controls which database support is compiled, + one of build\_sqlite, build\_mysql or build\_postgresql. To get the MySQL + package and support either set the + +\footnotesize +\begin{verbatim} + %define mysql 0 + OR + %define mysql4 0 + OR + %define mysql5 0 + +\end{verbatim} +\normalsize + +to + +\footnotesize +\begin{verbatim} + %define mysql 1 + OR + %define mysql4 1 + OR + %define mysql5 1 + +\end{verbatim} +\normalsize + +in the spec file directly or pass it to rpmbuild on the command line: + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" --define "build_mysql 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql4 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql5 1" bacula.spec + +\end{verbatim} +\normalsize + +\item + \label{faq3} + {\bf What other defines are used?} + Three other building defines of note are the depkgs\_version, docs\_version and + \_rescuever identifiers. These two defines are set with each release and must + match the version of those sources that are being used to build the packages. + You would not ordinarily need to edit these. See also the Build Options section + below for other build time options that can be passed on the command line. +\item + \label{faq4} + {\bf I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?} + No, you do not need to be root and, in fact, it is better practice to + build rpm packages as a non-root user. Bacula packages are designed to + be built by a regular user but you must make a few changes on your + system to do this. If you are building on your own system then the + simplest method is to add write permissions for all to the build + directory (/usr/src/redhat/, /usr/src/RPM or /usr/src/packages). + To accomplish this, execute the following command as root: + +\footnotesize +\begin{verbatim} + chmod -R 777 /usr/src/redhat + chmod -R 777 /usr/src/RPM + chmod -R 777 /usr/src/packages + +\end{verbatim} +\normalsize + +If you are working on a shared system where you can not use the method +above then you need to recreate the appropriate above directory tree with all +of its subdirectories inside your home directory. Then create a file named + +{\tt .rpmmacros} + +in your home directory (or edit the file if it already exists) +and add the following line: + +\footnotesize +\begin{verbatim} + %_topdir /home/myuser/redhat + +\end{verbatim} +\normalsize + +Another handy directive for the .rpmmacros file if you wish to suppress the +creation of debug rpm packages is: + +\footnotesize +\begin{verbatim} + %debug_package %{nil} + +\end{verbatim} + +\normalsize + +\item + \label{faq5} + {\bf I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called /usr/afsws/bin/pagsh.} This + is a shell from the OpenAFS (Andrew File System). If you are seeing + this then you chose to include the docs/examples directory in your + package. One of the example scripts in this directory is a pagsh + script. Rpmbuild, when scanning for dependencies, looks at the shebang + line of all packaged scripts in addition to checking shared libraries. + To avoid this do not package the examples directory. If you are seeing this + problem you are building a very old bacula package as the examples have been + removed from the doc packaging. + +\item + \label{faq6} + {\bf I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?} Yes, + contributions from users are accepted and appreciated. Please examine the + directory platforms/contrib-rpm in the source code for further information. + +\item + \label{faq7} + {\bf Is there an easier way than sorting out all these command line options?} Yes, + there is a gui wizard shell script which you can use to rebuild the src rpm package. + Look in the source archive for platforms/contrib-rpm/rpm\_wizard.sh. This script will + allow you to specify build options using GNOME dialog screens. It requires zenity. + +\item + \label{faq8} + {\bf I just upgraded from 1.36.x to 1.38.x and now my director daemon +won't start. It appears to start but dies silently and I get a "connection +refused" error when starting the console. What is wrong?} Beginning with +1.38 the rpm packages are configured to run the director and storage +daemons as a non-root user. The file daemon runs as user root and group +bacula, the storage daemon as user bacula and group disk, and the director +as user bacula and group bacula. If you are upgrading you will need to +change some file permissions for things to work. Execute the following +commands as root: + +\footnotesize +\begin{verbatim} + chown bacula.bacula /var/bacula/* + chown root.bacula /var/bacula/bacula-fd.9102.state + chown bacula.disk /var/bacula/bacula-sd.9103.state + +\end{verbatim} +\normalsize + +Further, if you are using File storage volumes rather than tapes those +files will also need to have ownership set to user bacula and group bacula. + +\item + \label{faq9} + {\bf There are a lot of rpm packages. Which packages do I need for +what?} For a bacula server you need to select the packsge based upon your +preferred catalog database: one of bacula-mysql, bacula-postgresql or +bacula-sqlite. If your system does not provide an mtx package you also +need bacula-mtx to satisfy that dependancy. For a client machine you need +only install bacula-client. Optionally, for either server or client +machines, you may install a graphical console bacula-gconsole and/or +bacula-wxconsole. The Bacula Administration Tool is installed with the +bacula-bat package. One last package, bacula-updatedb is required only when +upgrading a server more than one database revision level. + + + +\item {\bf Support for RHEL3/4/5, CentOS 3/4/5, Scientific Linux 3/4/5 and x86\_64} + The examples below show + explicit build support for RHEL4 and CentOS 4. Build support + for x86\_64 has also been added. +\end{enumerate} + +\footnotesize +\begin{verbatim} +Build with one of these 3 commands: + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_sqlite 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_postgresql 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_mysql4 1" \ + bacula-1.38.3-1.src.rpm + +For CentOS substitute '--define "build_centos4 1"' in place of rhel4. +For Scientific Linux substitute '--define "build_sl4 1"' in place of rhel4. + +For 64 bit support add '--define "build_x86_64 1"' +\end{verbatim} +\normalsize + +\section{Build Options} +\index[general]{Build Options} +The spec file currently supports building on the following platforms: +\footnotesize +\begin{verbatim} +Red Hat builds +--define "build_rh7 1" +--define "build_rh8 1" +--define "build_rh9 1" + +Fedora Core build +--define "build_fc1 1" +--define "build_fc3 1" +--define "build_fc4 1" +--define "build_fc5 1" +--define "build_fc6 1" +--define "build_fc7 1" + +Whitebox Enterprise build +--define "build_wb3 1" + +Red Hat Enterprise builds +--define "build_rhel3 1" +--define "build_rhel4 1" +--define "build_rhel5 1" + +CentOS build +--define "build_centos3 1" +--define "build_centos4 1" +--define "build_centos5 1" + +Scientific Linux build +--define "build_sl3 1" +--define "build_sl4 1" +--define "build_sl5 1" + +SuSE build +--define "build_su9 1" +--define "build_su10 1" +--define "build_su102 1" +--define "build_su103 1" + +Mandrake 10.x build +--define "build_mdk 1" + +Mandriva build +--define "build_mdv 1" + +MySQL support: +for mysql 3.23.x support define this +--define "build_mysql 1" +if using mysql 4.x define this, +currently: Mandrake 10.x, Mandriva 2006.0, SuSE 9.x & 10.0, FC4 & RHEL4 +--define "build_mysql4 1" +if using mysql 5.x define this, +currently: SuSE 10.1 & FC5 +--define "build_mysql5 1" + +PostgreSQL support: +--define "build_postgresql 1" + +Sqlite support: +--define "build_sqlite 1" + +Build the client rpm only in place of one of the above database full builds: +--define "build_client_only 1" + +X86-64 support: +--define "build_x86_64 1" + +Supress build of bgnome-console: +--define "nobuild_gconsole 1" + +Build the WXWindows console: +requires wxGTK >= 2.6 +--define "build_wxconsole 1" + +Build the Bacula Administration Tool: +requires QT >= 4.2 +--define "build_bat 1" + +Build python scripting support: +--define "build_python 1" + +Modify the Packager tag for third party packages: +--define "contrib_packager Your Name " + +\end{verbatim} +\normalsize + +\section{RPM Install Problems} +\index[general]{RPM Install Problems} +In general the RPMs, once properly built should install correctly. +However, when attempting to run the daemons, a number of problems +can occur: +\begin{itemize} +\item [Wrong /var/bacula Permissions] + By default, the Director and Storage daemon do not run with + root permission. If the /var/bacula is owned by root, then it + is possible that the Director and the Storage daemon will not + be able to access this directory, which is used as the Working + Directory. To fix this, the easiest thing to do is: +\begin{verbatim} + chown bacula:bacula /var/bacula +\end{verbatim} + Note: as of 1.38.8 /var/bacula is installed root:bacula with + permissions 770. +\item [The Storage daemon cannot Access the Tape drive] + This can happen in some older RPM releases where the Storage + daemon ran under userid bacula, group bacula. There are two + ways of fixing this: the best is to modify the /etc/init.d/bacula-sd + file so that it starts the Storage daemon with group "disk". + The second way to fix the problem is to change the permissions + of your tape drive (usually /dev/nst0) so that Bacula can access it. + You will probably need to change the permissions of the SCSI control + device as well, which is usually /dev/sg0. The exact names depend + on your configuration, please see the Tape Testing chapter for + more information on devices. +\end{itemize} + diff --git a/docs/manuals/en/problems/setup.sm b/docs/manuals/en/problems/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/problems/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/problems/tapetesting.tex b/docs/manuals/en/problems/tapetesting.tex new file mode 100644 index 00000000..7281f34e --- /dev/null +++ b/docs/manuals/en/problems/tapetesting.tex @@ -0,0 +1,1293 @@ +%% +%% + +\chapter{Testing Your Tape Drive With Bacula} +\label{TapeTestingChapter} +\index[general]{Testing Your Tape Drive With Bacula} + +This chapter is concerned with testing and configuring your tape drive to make +sure that it will work properly with Bacula using the {\bf btape} program. +\label{summary} + +\section{Get Your Tape Drive Working} + +In general, you should follow the following steps to get your tape drive to +work with Bacula. Start with a tape mounted in your drive. If you have an +autochanger, load a tape into the drive. We use {\bf /dev/nst0} as the tape +drive name, you will need to adapt it according to your system. + +Do not proceed to the next item until you have succeeded with the previous +one. + +\begin{enumerate} +\item Make sure that Bacula (the Storage daemon) is not running + or that you have {\bf unmount}ed the drive you will use + for testing. + +\item Use tar to write to, then read from your drive: + + \footnotesize +\begin{verbatim} + mt -f /dev/nst0 rewind + tar cvf /dev/nst0 . + mt -f /dev/nst0 rewind + tar tvf /dev/nst0 + +\end{verbatim} +\normalsize + +\item Make sure you have a valid and correct Device resource corresponding + to your drive. For Linux users, generally, the default one works. For + FreeBSD users, there are two possible Device configurations (see below). + For other drives and/or OSes, you will need to first ensure that your + system tape modes are properly setup (see below), then possibly modify + you Device resource depending on the output from the btape program (next + item). When doing this, you should consult the \ilink{Storage Daemon + Configuration}{StoredConfChapter} of this manual. + +\item If you are using a Fibre Channel to connect your tape drive to + Bacula, please be sure to disable any caching in the NSR (network + storage router, which is a Fibre Channel to SCSI converter). + +\item Run the btape {\bf test} command: + + \footnotesize +\begin{verbatim} + ./btape -c bacula-sd.conf /dev/nst0 + test + +\end{verbatim} +\normalsize + + It isn't necessary to run the autochanger part of the test at this time, + but do not go past this point until the basic test succeeds. If you do + have an autochanger, please be sure to read the \ilink{Autochanger + chapter}{AutochangersChapter} of this manual. + +\item Run the btape {\bf fill} command, preferably with two volumes. This + can take a long time. If you have an autochanger and it is configured, Bacula + will automatically use it. If you do not have it configured, you can manually + issue the appropriate {\bf mtx} command, or press the autochanger buttons to + change the tape when requested to do so. + +\item FreeBSD users, if you have a pre-5.0 system run the {\bf tapetest} + program, and make sure your system is patched if necessary. The tapetest + program can be found in the platform/freebsd directory. The instructions + for its use are at the top of the file. + +\item Run Bacula, and backup a reasonably small directory, say 60 + Megabytes. Do three successive backups of this directory. + +\item Stop Bacula, then restart it. Do another full backup of the same + directory. Then stop and restart Bacula. + +\item Do a restore of the directory backed up, by entering the following + restore command, being careful to restore it to an alternate location: + + +\footnotesize +\begin{verbatim} + restore select all done + yes + +\end{verbatim} +\normalsize + + Do a {\bf diff} on the restored directory to ensure it is identical to the + original directory. If you are going to backup multiple different systems + (Linux, Windows, Mac, Solaris, FreeBSD, ...), be sure you test the restore + on each system type. + +\item If you have an autochanger, you should now go back to the btape program + and run the autochanger test: + +\footnotesize +\begin{verbatim} + ./btape -c bacula-sd.conf /dev/nst0 + auto + +\end{verbatim} +\normalsize + + Adjust your autochanger as necessary to ensure that it works correctly. See + the Autochanger chapter of this manual for a complete discussion of testing + your autochanger. + +\item We strongly recommend that you use a dedicated SCSI + controller for your tape drives. Scanners are known to induce + serious problems with the SCSI bus, causing it to reset. If the + SCSI bus is reset while Bacula has the tape drive open, it will + most likely be fatal to your tape since the drive will rewind. + These kinds of problems show up in the system log. For example, + the following was most likely caused by a scanner: + +\footnotesize +\begin{verbatim} +Feb 14 17:29:55 epohost kernel: (scsi0:A:2:0): No or incomplete CDB sent to device. +Feb 14 17:29:55 epohost kernel: scsi0: Issued Channel A Bus Reset. 1 SCBs aborted +\end{verbatim} +\normalsize + +\end{enumerate} + +If you have reached this point, you stand a good chance of having everything +work. If you get into trouble at any point, {\bf carefully} read the +documentation given below. If you cannot get past some point, ask the {\bf +bacula-users} email list, but specify which of the steps you have successfully +completed. In particular, you may want to look at the +\ilink{ Tips for Resolving Problems}{problems1} section below. + + +\label{NoTapeInDrive} +\subsection{Problems When no Tape in Drive} +\index[general]{Problems When no Tape in Drive} +When Bacula was first written the Linux 2.4 kernel permitted opening the +drive whether or not there was a tape in the drive. Thus the Bacula code is +based on the concept that if the drive cannot be opened, there is a serious +problem, and the job is failed. + +With version 2.6 of the Linux kernel, if there is no tape in the drive, the +OS will wait two minutes (default) and then return a failure, and consequently, +Bacula version 1.36 and below will fail the job. This is important to keep +in mind, because if you use an option such as {\bf Offline on Unmount = +yes}, there will be a point when there is no tape in the drive, and if +another job starts or if Bacula asks the operator to mount a tape, when +Bacula attempts to open the drive (about a 20 minute delay), it will fail +and Bacula will fail the job. + +In version 1.38.x, the Bacula code partially gets around this problem -- at +least in the initial open of the drive. However, functions like Polling +the drive do not work correctly if there is no tape in the drive. +Providing you do not use {\bf Offline on Unmount = yes}, you should not +experience job failures as mentioned above. If you do experience such +failures, you can also increase the {\bf Maximum Open Wait} time interval, +which will give you more time to mount the next tape before the job is +failed. + +\subsection{Specifying the Configuration File} +\index[general]{File!Specifying the Configuration} +\index[general]{Specifying the Configuration File} + +Starting with version 1.27, each of the tape utility programs including the +{\bf btape} program requires a valid Storage daemon configuration file +(actually, the only part of the configuration file that {\bf btape} needs is +the {\bf Device} resource definitions). This permits {\bf btape} to find the +configuration parameters for your archive device (generally a tape drive). +Without those parameters, the testing and utility programs do not know how to +properly read and write your drive. By default, they use {\bf bacula-sd.conf} +in the current directory, but you may specify a different configuration file +using the {\bf -c} option. + +\subsection{Specifying a Device Name For a Tape} +\index[general]{Tape!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a Tape} + +{\bf btape} {\bf device-name} where the Volume can be found. In the case of a +tape, this is the physical device name such as {\bf /dev/nst0} or {\bf +/dev/rmt/0ubn} depending on your system that you specify on the Archive Device +directive. For the program to work, it must find the identical name in the +Device resource of the configuration file. If the name is not found in the +list of physical names, the utility program will compare the name you entered +to the Device names (rather than the Archive device names). + +When specifying a tape device, it is preferable that the "non-rewind" +variant of the device file name be given. In addition, on systems such as +Sun, which have multiple tape access methods, you must be sure to specify +to use Berkeley I/O conventions with the device. The +{\bf b} in the Solaris (Sun) archive specification {\bf /dev/rmt/0mbn} is +what is needed in this case. Bacula does not support SysV tape drive +behavior. + +See below for specifying Volume names. + +\subsection{Specifying a Device Name For a File} +\index[general]{File!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a File} + +If you are attempting to read or write an archive file rather than a tape, the +{\bf device-name} should be the full path to the archive location including +the filename. The filename (last part of the specification) will be stripped +and used as the Volume name, and the path (first part before the filename) +must have the same entry in the configuration file. So, the path is equivalent +to the archive device name, and the filename is equivalent to the volume name. + + +\section{btape} +\label{btape1} +\index[general]{Btape} + +This program permits a number of elementary tape operations via a tty command +interface. The {\bf test} command, described below, can be very useful for +testing tape drive compatibility problems. Aside from initial testing of tape +drive compatibility with {\bf Bacula}, {\bf btape} will be mostly used by +developers writing new tape drivers. + +{\bf btape} can be dangerous to use with existing {\bf Bacula} tapes because +it will relabel a tape or write on the tape if so requested regardless of +whether or not the tape contains valuable data, so please be careful and use +it only on blank tapes. + +To work properly, {\bf btape} needs to read the Storage daemon's configuration +file. As a default, it will look for {\bf bacula-sd.conf} in the current +directory. If your configuration file is elsewhere, please use the {\bf -c} +option to specify where. + +The physical device name or the Device resource name must be specified on the +command line, and this same device name must be present in the Storage +daemon's configuration file read by {\bf btape} + +\footnotesize +\begin{verbatim} +Usage: btape [options] device_name + -b specify bootstrap file + -c set configuration file to file + -d set debug level to nn + -p proceed inspite of I/O errors + -s turn off signals + -v be verbose + -? print this message. +\end{verbatim} +\normalsize + +\subsection{Using btape to Verify your Tape Drive} +\index[general]{Using btape to Verify your Tape Drive} +\index[general]{Drive!Using btape to Verify your Tape} + +An important reason for this program is to ensure that a Storage daemon +configuration file is defined so that Bacula will correctly read and write +tapes. + +It is highly recommended that you run the {\bf test} command before running +your first Bacula job to ensure that the parameters you have defined for your +storage device (tape drive) will permit {\bf Bacula} to function properly. You +only need to mount a blank tape, enter the command, and the output should be +reasonably self explanatory. For example: + +\footnotesize +\begin{verbatim} +(ensure that Bacula is not running) +./btape -c /usr/bin/bacula/bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +The output will be: + +\footnotesize +\begin{verbatim} +Tape block granularity is 1024 bytes. +btape: btape.c:376 Using device: /dev/nst0 +* +\end{verbatim} +\normalsize + +Enter the test command: + +\footnotesize +\begin{verbatim} +test +\end{verbatim} +\normalsize + +The output produced should be something similar to the following: I've cut the +listing short because it is frequently updated to have new tests. + +\footnotesize +\begin{verbatim} +=== Append files test === +This test is essential to Bacula. +I'm going to write one record in file 0, + two records in file 1, + and three records in file 2 +btape: btape.c:387 Rewound /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:855 Wrote one record of 64412 bytes. +btape: btape.c:857 Wrote block to device. +btape: btape.c:410 Wrote EOF to /dev/nst0 +btape: btape.c:387 Rewound /dev/nst0 +btape: btape.c:693 Now moving to end of media. +btape: btape.c:427 Moved to end of media +We should be in file 3. I am at file 3. This is correct! +Now the important part, I am going to attempt to append to the tape. +... +=== End Append files test === +\end{verbatim} +\normalsize + +If you do not successfully complete the above test, please resolve the +problem(s) before attempting to use {\bf Bacula}. Depending on your tape +drive, the test may recommend that you add certain records to your +configuration. We strongly recommend that you do so and then re-run the above +test to insure it works the first time. + +Some of the suggestions it provides for resolving the problems may or may not +be useful. If at all possible avoid using fixed blocking. If the test suddenly +starts to print a long series of: + +\footnotesize +\begin{verbatim} +Got EOF on tape. +Got EOF on tape. +... +\end{verbatim} +\normalsize + +then almost certainly, you are running your drive in fixed block mode rather +than variable block mode. See below for more help of resolving fix +versus variable block problems. + +It is also possible that you have your drive +set in SysV tape drive mode. The drive must use BSD tape conventions. +See the section above on setting your {\bf Archive device} correctly. + +For FreeBSD users, please see the notes below for doing further testing of +your tape drive. + +\label{SCSITricks} +\subsection{Linux SCSI Tricks} +\index[general]{Tricks!Linux SCSI} +\index[general]{Linux SCSI Tricks} + +You can find out what SCSI devices you have by doing: + +\footnotesize +\begin{verbatim} +lsscsi +\end{verbatim} +\normalsize + +Typical output is: + +\footnotesize +\begin{verbatim} +[0:0:0:0] disk ATA ST3160812AS 3.AD /dev/sda +[2:0:4:0] tape HP Ultrium 2-SCSI F6CH /dev/st0 +[2:0:5:0] tape HP Ultrium 2-SCSI F6CH /dev/st1 +[2:0:6:0] mediumx OVERLAND LXB 0107 - +[2:0:9:0] tape HP Ultrium 1-SCSI E50H /dev/st2 +[2:0:10:0] mediumx OVERLAND LXB 0107 - +\end{verbatim} +\normalsize + +There are two drives in one autochanger: /dev/st0 and /dev/st1 +and a third tape drive at /dev/st2. For using them with Bacula, one +would normally reference them as /dev/nst0 ... /dev/nst2. Not also, +there are two different autochangers identified as "mediumx OVERLAND LXB". +They can be addressed via their /dev/sgN designation, which can be +obtained by counting from the beginning as 0 to each changer. In the +above case, the two changers are located on /dev/sg3 and /dev/sg5. The one +at /dev/sg3, controls drives /dev/nst0 and /dev/nst1; and the one at +/dev/sg5 controles drive /dev/nst2. + +If you do not have the {\bf lsscsi} command, you can obtain the same +information as follows: + +\footnotesize +\begin{verbatim} +cat /proc/scsi/scsi +\end{verbatim} +\normalsize + +For the above example with the three drives and two autochangers, +I get: + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi0 Channel: 00 Id: 00 Lun: 00 + Vendor: ATA Model: ST3160812AS Rev: 3.AD + Type: Direct-Access ANSI SCSI revision: 05 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 05 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 06 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0107 + Type: Medium Changer ANSI SCSI revision: 02 +Host: scsi2 Channel: 00 Id: 09 Lun: 00 + Vendor: HP Model: Ultrium 1-SCSI Rev: E50H + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 10 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0107 + Type: Medium Changer ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + + +As an additional example, I get the following (on a different machine from the +above example): + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi2 Channel: 00 Id: 01 Lun: 00 + Vendor: HP Model: C5713A Rev: H107 + Type: Sequential-Access ANSI SCSI revision: 02 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: SONY Model: SDT-10000 Rev: 0110 + Type: Sequential-Access ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + +The above represents first an autochanger and second a simple +tape drive. The HP changer (the first entry) uses the same SCSI channel +for data and for control, so in Bacula, you would use: +\footnotesize +\begin{verbatim} +Archive Device = /dev/nst0 +Changer Device = /dev/sg0 +\end{verbatim} +\normalsize + +If you want to remove the SDT-10000 device, you can do so as root with: + +\footnotesize +\begin{verbatim} +echo "scsi remove-single-device 2 0 4 0">/proc/scsi/scsi +\end{verbatim} +\normalsize + +and you can put add it back with: + +\footnotesize +\begin{verbatim} +echo "scsi add-single-device 2 0 4 0">/proc/scsi/scsi +\end{verbatim} +\normalsize + +where the 2 0 4 0 are the Host, Channel, Id, and Lun as seen on the output +from {\bf cat /proc/scsi/scsi}. Note, the Channel must be specified as +numeric. + +Below is a slightly more complicated output, which is a single autochanger +with two drives, and which operates the changer on a different channel +from from the drives: + +\footnotesize +\begin{verbatim} +Attached devices: +Host: scsi0 Channel: 00 Id: 00 Lun: 00 + Vendor: ATA Model: WDC WD1600JD-75H Rev: 08.0 + Type: Direct-Access ANSI SCSI revision: 05 +Host: scsi2 Channel: 00 Id: 04 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 05 Lun: 00 + Vendor: HP Model: Ultrium 2-SCSI Rev: F6CH + Type: Sequential-Access ANSI SCSI revision: 03 +Host: scsi2 Channel: 00 Id: 06 Lun: 00 + Vendor: OVERLAND Model: LXB Rev: 0106 + Type: Medium Changer ANSI SCSI revision: 02 +\end{verbatim} +\normalsize + +The above tape drives are accessed on /dev/nst0 and /dev/nst1, while +the control channel for those two drives is /dev/sg3. + + + +\label{problems1} +\section{Tips for Resolving Problems} +\index[general]{Problems!Tips for Resolving} +\index[general]{Tips for Resolving Problems} + +\label{CannotRestore} +\subsection{Bacula Saves But Cannot Restore Files} +\index[general]{Files!Bacula Saves But Cannot Restore} +\index[general]{Bacula Saves But Cannot Restore Files} + +If you are getting error messages such as: + +\footnotesize +\begin{verbatim} +Volume data error at 0:1! Wanted block-id: "BB02", got "". Buffer discarded +\end{verbatim} +\normalsize + +It is very likely that Bacula has tried to do block positioning and ended up +at an invalid block. This can happen if your tape drive is in fixed block mode +while Bacula's default is variable blocks. Note that in such cases, Bacula is +perfectly able to write to your Volumes (tapes), but cannot position to read +them. + +There are two possible solutions. + +\begin{enumerate} +\item The first and best is to always ensure that your drive is in variable + block mode. Note, it can switch back to fixed block mode on a reboot or if + another program uses the drive. So on such systems you need to modify the + Bacula startup files to explicitly set: + +\footnotesize +\begin{verbatim} +mt -f /dev/nst0 defblksize 0 +\end{verbatim} +\normalsize + +or whatever is appropriate on your system. Note, if you are running a Linux +system, and the above command does not work, it is most likely because you +have not loaded the appropriate {\bf mt} package, which is often called +{\bf mt\_st}, but may differ according to your distribution. + +\item The second possibility, especially, if Bacula wrote while the drive was + in fixed block mode, is to turn off block positioning in Bacula. This is done + by adding: + +\footnotesize +\begin{verbatim} +Block Positioning = no +\end{verbatim} +\normalsize + +to the Device resource. This is not the recommended procedure because it can +enormously slow down recovery of files, but it may help where all else +fails. This directive is available in version 1.35.5 or later (and not yet +tested). +\end{enumerate} + +If you are getting error messages such as: +\footnotesize +\begin{verbatim} +Volume data error at 0:0! +Block checksum mismatch in block=0 len=32625 calc=345678 blk=123456 +\end{verbatim} +\normalsize + +You are getting tape read errors, and this is most likely due to +one of the following things: +\begin{enumerate} +\item An old or bad tape. +\item A dirty drive that needs cleaning (particularly for DDS drives). +\item A loose SCSI cable. +\item Old firmware in your drive. Make sure you have the latest firmware + loaded. +\item Computer memory errors. +\item Over-clocking your CPU. +\item A bad SCSI card. +\end{enumerate} + + +\label{opendevice} +\subsection{Bacula Cannot Open the Device} +\index[general]{Device!Bacula Cannot Open the} +\index[general]{Bacula Cannot Open the Device} + +If you get an error message such as: + +\footnotesize +\begin{verbatim} +dev open failed: dev.c:265 stored: unable to open +device /dev/nst0:> ERR=No such device or address +\end{verbatim} +\normalsize + +the first time you run a job, it is most likely due to the fact that you +specified the incorrect device name on your {\bf Archive Device}. + +If Bacula works fine with your drive, then all off a sudden you get error +messages similar to the one shown above, it is quite possible that your driver +module is being removed because the kernel deems it idle. This is done via +{\bf crontab} with the use of {\bf rmmod -a}. To fix the problem, you can +remove this entry from {\bf crontab}, or you can manually {\bf modprob} your +driver module (or add it to the local startup script). Thanks to Alan Brown +for this tip. +\label{IncorrectFiles} + +\subsection{Incorrect File Number} +\index[general]{Number!Incorrect File} +\index[general]{Incorrect File Number} + +When Bacula moves to the end of the medium, it normally uses the {\bf +ioctl(MTEOM)} function. Then Bacula uses the {\bf ioctl(MTIOCGET)} function to +retrieve the current file position from the {\bf mt\_fileno} field. Some SCSI +tape drivers will use a fast means of seeking to the end of the medium and in +doing so, they will not know the current file position and hence return a {\bf +-1}. As a consequence, if you get {\bf "This is NOT correct!"} in the +positioning tests, this may be the cause. You must correct this condition in +order for Bacula to work. + +There are two possible solutions to the above problem of incorrect file +number: + +\begin{itemize} +\item Figure out how to configure your SCSI driver to keep track of the file + position during the MTEOM request. This is the preferred solution. +\item Modify the {\bf Device} resource of your {\bf bacula-sd.conf} file to + include: + +\footnotesize +\begin{verbatim} +Hardware End of File = no +\end{verbatim} +\normalsize + +This will cause Bacula to use the MTFSF request to seek to the end of the +medium, and Bacula will keep track of the file number itself. +\end{itemize} + +\label{IncorrectBlocks} +\subsection{Incorrect Number of Blocks or Positioning Errors} +\index[general]{Testing!Incorrect Number of Blocks or Positioning Errors} +\index[general]{Incorrect Number of Blocks or Positioning Errors} + +{\bf Bacula's} preferred method of working with tape drives (sequential +devices) is to run in variable block mode, and this is what is set by default. +You should first ensure that your tape drive is set for variable block mode +(see below). + +If your tape drive is in fixed block mode and you have told Bacula to use +different fixed block sizes or variable block sizes (default), you will get +errors when Bacula attempts to forward space to the correct block (the kernel +driver's idea of tape blocks will not correspond to Bacula's). + +All modern tape drives support variable tape blocks, but some older drives (in +particular the QIC drives) as well as the ATAPI ide-scsi driver run only in +fixed block mode. The Travan tape drives also apparently must run in fixed +block mode (to be confirmed). + +Even in variable block mode, with the exception of the first record on the +second or subsequent volume of a multi-volume backup, Bacula will write blocks +of a fixed size. However, in reading a tape, Bacula will assume that for each +read request, exactly one block from the tape will be transferred. This the +most common way that tape drives work and is well supported by {\bf Bacula}. + +Drives that run in fixed block mode can cause serious problems for Bacula if +the drive's block size does not correspond exactly to {\bf Bacula's} block +size. In fixed block size mode, drivers may transmit a partial block or +multiple blocks for a single read request. From {\bf Bacula's} point of view, +this destroys the concept of tape blocks. It is much better to run in variable +block mode, and almost all modern drives (the OnStream is an exception) run in +variable block mode. In order for Bacula to run in fixed block mode, you must +include the following records in the Storage daemon's Device resource +definition: + +\footnotesize +\begin{verbatim} +Minimum Block Size = nnn +Maximum Block Size = nnn +\end{verbatim} +\normalsize + +where {\bf nnn} must be the same for both records and must be identical to the +driver's fixed block size. + +We recommend that you avoid this configuration if at all possible by using +variable block sizes. + +If you must run with fixed size blocks, make sure they are not 512 bytes. This +is too small and the overhead that Bacula has with each record will become +excessive. If at all possible set any fixed block size to something like +64,512 bytes or possibly 32,768 if 64,512 is too large for your drive. See +below for the details on checking and setting the default drive block size. + +To recover files from tapes written in fixed block mode, see below. + +\label{TapeModes} +\subsection{Ensuring that the Tape Modes Are Properly Set -- {\bf Linux +Only}} +\index[general]{Ensuring that the Tape Modes Are Properly Set -- Linux Only} + +If you have a modern SCSI tape drive and you are having problems with the {\bf +test} command as noted above, it may be that some program has set one or more +of your SCSI driver's options to non-default values. For example, if your +driver is set to work in SysV manner, Bacula will not work correctly because +it expects BSD behavior. To reset your tape drive to the default values, you +can try the following, but {\bf ONLY} if you have a SCSI tape drive on a {\bf +Linux} system: + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 rewind +mt -f /dev/nst0 stoptions buffer-writes async-writes read-ahead +\end{verbatim} +\normalsize + +The above commands will clear all options and then set those specified. None +of the specified options are required by Bacula, but a number of other options +such as SysV behavior must not be set. Bacula does not support SysV tape +behavior. On systems other than Linux, you will need to consult your {\bf mt} +man pages or documentation to figure out how to do the same thing. This should +not really be necessary though -- for example, on both Linux and Solaris +systems, the default tape driver options are compatible with Bacula. +On Solaris systems, you must take care to specify the correct device +name on the {\bf Archive device} directive. See above for more details. + +You may also want to ensure that no prior program has set the default block +size, as happened to one user, by explicitly turning it off with: + +\footnotesize +\begin{verbatim} +mt -f /dev/nst0 defblksize 0 +\end{verbatim} +\normalsize + +If you are running a Linux +system, and the above command does not work, it is most likely because you +have not loaded the appropriate {\bf mt} package, which is often called +{\bf mt\_st}, but may differ according to your distribution. + +If you would like to know what options you have set before making any of the +changes noted above, you can now view them on Linux systems, thanks to a tip +provided by Willem Riede. Do the following: + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 stsetoptions 0 +grep st0 /var/log/messages +\end{verbatim} +\normalsize + +and you will get output that looks something like the following: + +\footnotesize +\begin{verbatim} +kernel: st0: Mode 0 options: buffer writes: 1, async writes: 1, read ahead: 1 +kernel: st0: can bsr: 0, two FMs: 0, fast mteom: 0, auto lock: 0, +kernel: st0: defs for wr: 0, no block limits: 0, partitions: 0, s2 log: 0 +kernel: st0: sysv: 0 nowait: 0 +\end{verbatim} +\normalsize + +Note, I have chopped off the beginning of the line with the date and machine +name for presentation purposes. + +Some people find that the above settings only last until the next reboot, so +please check this otherwise you may have unexpected problems. + +Beginning with Bacula version 1.35.8, if Bacula detects that you are running +in variable block mode, it will attempt to set your drive appropriately. All +OSes permit setting variable block mode, but some OSes do not permit setting +the other modes that Bacula needs to function properly. + +\label{compression} +\subsection{Tape Hardware Compression and Blocking Size} +\index[general]{Tape Hardware Compression and Blocking Size} +\index[general]{Size!Tape Hardware Compression and Blocking Size} + +As far as I can tell, there is no way with the {\bf mt} program to check if +your tape hardware compression is turned on or off. You can, however, turn it +on by using (on Linux): + +\footnotesize +\begin{verbatim} +become super user +mt -f /dev/nst0 defcompression 1 +\end{verbatim} +\normalsize + +and of course, if you use a zero instead of the one at the end, you will turn +it off. + +If you have built the {\bf mtx} program in the {\bf depkgs} package, you can +use tapeinfo to get quite a bit of information about your tape drive even if +it is not an autochanger. This program is called using the SCSI control +device. On Linux for tape drive /dev/nst0, this is usually /dev/sg0, while on +FreeBSD for /dev/nsa0, the control device is often /dev/pass2. For example on +my DDS-4 drive (/dev/nst0), I get the following: + +\footnotesize +\begin{verbatim} +tapeinfo -f /dev/sg0 +Product Type: Tape Drive +Vendor ID: 'HP ' +Product ID: 'C5713A ' +Revision: 'H107' +Attached Changer: No +MinBlock:1 +MaxBlock:16777215 +SCSI ID: 5 +SCSI LUN: 0 +Ready: yes +BufferedMode: yes +Medium Type: Not Loaded +Density Code: 0x26 +BlockSize: 0 +\end{verbatim} +\normalsize + +where the {\bf DataCompEnabled: yes} means that tape hardware compression is +turned on. You can turn it on and off (yes|no) by using the {\bf mt} +commands given above. Also, this output will tell you if the {\bf BlockSize} +is non-zero and hence set for a particular block size. Bacula is not likely to +work in such a situation because it will normally attempt to write blocks of +64,512 bytes, except the last block of the job which will generally be +shorter. The first thing to try is setting the default block size to zero +using the {\bf mt -f /dev/nst0 defblksize 0} command as shown above. +On FreeBSD, this would be something like: {\bf mt -f /dev/nsa0 blocksize 0}. + +On some operating systems with some tape drives, the amount of data that +can be written to the tape and whether or not compression is enabled is +determined by the density usually the {\bf mt -f /dev/nst0 setdensity xxx} command. +Often {\bf mt -f /dev/nst0 status} will print out the current +density code that is used with the drive. Most systems, but unfortunately +not all, set the density to the maximum by default. On some systems, you +can also get a list of all available density codes with: +{\bf mt -f /dev/nst0 densities} or a similar {\bf mt} command. +Note, for DLT and SDLT devices, no-compression versus compression is very +often controlled by the density code. On FreeBSD systems, the compression +mode is set using {\bf mt -f /dev/nsa0 comp xxx} where xxx is the +mode you want. In general, see {\bf man mt} for the options available on +your system. + +Note, some of the above {\bf mt} commands may not be persistent depending +on your system configuration. That is they may be reset if a program +other than Bacula uses the drive or, as is frequently the case, on reboot +of your system. + +If your tape drive requires fixed block sizes (very unusual), you can use the +following records: + +\footnotesize +\begin{verbatim} +Minimum Block Size = nnn +Maximum Block Size = nnn +\end{verbatim} +\normalsize + +in your Storage daemon's Device resource to force Bacula to write fixed size +blocks (where you sent nnn to be the same for both of the above records). This +should be done only if your drive does not support variable block sizes, or +you have some other strong reasons for using fixed block sizes. As mentioned +above, a small fixed block size of 512 or 1024 bytes will be very inefficient. +Try to set any fixed block size to something like 64,512 bytes or larger if +your drive will support it. + +Also, note that the {\bf Medium Type} field of the output of {\bf tapeinfo} +reports {\bf Not Loaded}, which is not correct. As a consequence, you should +ignore that field as well as the {\bf Attached Changer} field. + +To recover files from tapes written in fixed block mode, see below. +\label{FreeBSDTapes} + +\subsection{Tape Modes on FreeBSD} +\index[general]{FreeBSD!Tape Modes on} +\index[general]{Tape Modes on FreeBSD} + +On most FreeBSD systems such as 4.9 and most tape drives, Bacula should run +with: + +\footnotesize +\begin{verbatim} +mt -f /dev/nsa0 seteotmodel 2 +mt -f /dev/nsa0 blocksize 0 +mt -f /dev/nsa0 comp enable +\end{verbatim} +\normalsize + +You might want to put those commands in a startup script to make sure your +tape driver is properly initialized before running Bacula, because +depending on your system configuration, these modes may be reset if a +program other than Bacula uses the drive or when your system is rebooted. + +Then according to what the {\bf btape test} command returns, you will probably +need to set the following (see below for an alternative): + +\footnotesize +\begin{verbatim} + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Backward Space File = no + Fast Forward Space File = no + TWO EOF = yes +\end{verbatim} +\normalsize + +Then be sure to run some append tests with Bacula where you start and stop +Bacula between appending to the tape, or use {\bf btape} version 1.35.1 or +greater, which includes simulation of stopping/restarting Bacula. + +Please see the file {\bf platforms/freebsd/pthreads-fix.txt} in the main +Bacula directory concerning {\bf important} information concerning +compatibility of Bacula and your system. A much more optimal Device +configuration is shown below, but does not work with all tape drives. Please +test carefully before putting either into production. + +Note, for FreeBSD 4.10-RELEASE, using a Sony TSL11000 L100 DDS4 with an +autochanger set to variable block size and DCLZ compression, Brian McDonald +reports that to get Bacula to append correctly between Bacula executions, +the correct values to use are: + +\footnotesize +\begin{verbatim} +mt -f /dev/nsa0 seteotmodel 1 +mt -f /dev/nsa0 blocksize 0 +mt -f /dev/nsa0 comp enable +\end{verbatim} +\normalsize + +and + +\footnotesize +\begin{verbatim} + Hardware End of Medium = no + BSF at EOM = no + Backward Space Record = no + Backward Space File = no + Fast Forward Space File = yes + TWO EOF = no +\end{verbatim} +\normalsize + +This has been confirmed by several other people using different hardware. This +configuration is the preferred one because it uses one EOF and no backspacing +at the end of the tape, which works much more efficiently and reliably with +modern tape drives. + +Finally, here is a Device configuration that Danny Butroyd reports to work +correctly with the Overland Powerloader tape library using LT0-2 and +FreeBSD 5.4-Stable: + +\footnotesize +\begin{verbatim} +# Overland Powerloader LT02 - 17 slots single drive +Device { + Name = Powerloader + Media Type = LT0-2 + Archive Device = /dev/nsa0 + AutomaticMount = yes; + AlwaysOpen = yes; + RemovableMedia = yes; + RandomAccess = no; + Changer Command = "/usr/local/sbin/mtx-changer %c %o %S %a %d" + Changer Device = /dev/pass2 + AutoChanger = yes + Alert Command = "sh -c 'tapeinfo -f %c |grep TapeAlert|cat'" + + # FreeBSD Specific Settings + Offline On Unmount = no + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Fast Forward Space File = no + TWO EOF = yes +} + +The following Device resource works fine with Dell PowerVault 110T and +120T devices on both FreeBSD 5.3 and on NetBSD 3.0. It also works +with Sony AIT-2 drives on FreeBSD. +\footnotesize +\begin{verbatim} +Device { + ... + # FreeBSD/NetBSD Specific Settings + Hardware End of Medium = no + BSF at EOM = yes + Backward Space Record = no + Fast Forward Space File = yes + TWO EOF = yes +} +\end{verbatim} +\normalsize + +On FreeBSD version 6.0, it is reported that you can even set +Backward Space Record = yes. + + + +\subsection{Finding your Tape Drives and Autochangers on FreeBSD} +\index[general]{FreeBSD!Finding Tape Drives and Autochangers} +\index[general]{Finding Tape Drives and Autochangers on FreeBSD} + +On FreeBSD, you can do a {\bf camcontrol devlist} as root to determine what +drives and autochangers you have. For example, + +\footnotesize +\begin{verbatim} +undef# camcontrol devlist + at scbus0 target 2 lun 0 (pass0,sa0) + at scbus0 target 4 lun 0 (pass1,sa1) + at scbus0 target 4 lun 1 (pass2) +\end{verbatim} +\normalsize + +from the above, you can determine that there is a tape drive on {\bf /dev/sa0} +and another on {\bf /dev/sa1} in addition since there is a second line for the +drive on {\bf /dev/sa1}, you know can assume that it is the control device for +the autochanger (i.e. {\bf /dev/pass2}). It is also the control device name to +use when invoking the tapeinfo program. E.g. + +\footnotesize +\begin{verbatim} +tapeinfo -f /dev/pass2 +\end{verbatim} +\normalsize + +\label{onstream} + +\subsection{Using the OnStream driver on Linux Systems} +\index[general]{Using the OnStream driver on Linux Systems} +\index[general]{Systems!Using the OnStream driver on Linux} + +Bacula version 1.33 (not 1.32x) is now working and ready for testing with the +OnStream kernel osst driver version 0.9.14 or above. Osst is available from: +\elink{http://sourceforge.net/projects/osst/} +{http://sourceforge.net/projects/osst/}. + +To make Bacula work you must first load the new driver then, as root, do: + +\footnotesize +\begin{verbatim} + mt -f /dev/nosst0 defblksize 32768 +\end{verbatim} +\normalsize + +Also you must add the following to your Device resource in your Storage +daemon's conf file: + +\footnotesize +\begin{verbatim} + Minimum Block Size = 32768 + Maximum Block Size = 32768 +\end{verbatim} +\normalsize + +Here is a Device specification provided by Michel Meyers that is known to +work: + +\footnotesize +\begin{verbatim} +Device { + Name = "Onstream DI-30" + Media Type = "ADR-30" + Archive Device = /dev/nosst0 + Minimum Block Size = 32768 + Maximum Block Size = 32768 + Hardware End of Medium = yes + BSF at EOM = no + Backward Space File = yes + Fast Forward Space File = yes + Two EOF = no + AutomaticMount = yes + AlwaysOpen = yes + Removable Media = yes +} +\end{verbatim} +\normalsize + +\section{Hardware Compression on EXB-8900} +\index[general]{Hardware Compression on EXB-8900} +\index[general]{EXB-8900!Hardware Compression} + +To active, check, or disable the hardware compression feature +on an EXB-8900, use the exabyte MammothTool. You can get it here: +\elink{http://www.exabyte.com/support/online/downloads/index.cfm} +{http://www.exabyte.com/support/online/downloads/index.cfm}. +There is a Solaris version of this tool. With option -C 0 or 1 you +can disable or activate compression. Start this tool without any +options for a small reference. + +\label{fill} +\subsection{Using btape to Simulate Filling a Tape} +\index[general]{Using btape to Simulate Filling a Tape} +\index[general]{Tape!Using btape to Simulate Filling} + +Because there are often problems with certain tape drives or systems when end +of tape conditions occur, {\bf btape} has a special command {\bf fill} that +causes it to write random data to a tape until the tape fills. It then writes +at least one more Bacula block to a second tape. Finally, it reads back both +tapes to ensure that the data has been written in a way that Bacula can +recover it. Note, there is also a single tape option as noted below, which you +should use rather than the two tape test. See below for more details. + +This can be an extremely time consuming process (here it is about 6 hours) to +fill a full tape. Note, that btape writes random data to the tape when it is +filling it. This has two consequences: 1. it takes a bit longer to generate +the data, especially on slow CPUs. 2. the total amount of data is +approximately the real physical capacity of your tape, regardless of whether +or not the tape drive compression is on or off. This is because random data +does not compress very much. + +To begin this test, you enter the {\bf fill} command and follow the +instructions. There are two options: the simple single tape option and the +multiple tape option. Please use only the simple single tape option because +the multiple tape option still doesn't work totally correctly. If the single +tape option does not succeed, you should correct the problem before using +Bacula. +\label{RecoveringFiles} + +\section{Recovering Files Written With Fixed Block Sizes} +\index[general]{Recovering Files Written With Fixed Block Sizes} + +If you have been previously running your tape drive in fixed block mode +(default 512) and Bacula with variable blocks (default), then in version +1.32f-x and 1.34 and above, Bacula will fail to recover files because it does +block spacing, and because the block sizes don't agree between your tape drive +and Bacula it will not work. + +The long term solution is to run your drive in variable block mode as +described above. However, if you have written tapes using fixed block sizes, +this can be a bit of a pain. The solution to the problem is: while you are +doing a restore command using a tape written in fixed block size, ensure that +your drive is set to the fixed block size used while the tape was written. +Then when doing the {\bf restore} command in the Console program, do not +answer the prompt {\bf yes/mod/no}. Instead, edit the bootstrap file (the +location is listed in the prompt) using any ASCII editor. Remove all {\bf +VolBlock} lines in the file. When the file is re-written, answer the question, +and Bacula will run without using block positioning, and it should recover +your files. + +\label{BlockModes} +\section{Tape Blocking Modes} +\index[general]{Modes!Tape Blocking} +\index[general]{Tape Blocking Modes} + +SCSI tapes may either be written in {\bf variable} or {\bf fixed} block sizes. +Newer drives support both modes, but some drives such as the QIC devices +always use fixed block sizes. Bacula attempts to fill and write complete +blocks (default 65K), so that in normal mode (variable block size), Bacula +will always write blocks of the same size except the last block of a Job. If +Bacula is configured to write fixed block sizes, it will pad the last block of +the Job to the correct size. Bacula expects variable tape block size drives to +behave as follows: Each write to the drive results in a single record being +written to the tape. Each read returns a single record. If you request less +bytes than are in the record, only those number of bytes will be returned, but +the entire logical record will have been read (the next read will retrieve the +next record). Thus data from a single write is always returned in a single +read, and sequentially written records are returned by sequential reads. + +Bacula expects fixed block size tape drives to behave as follows: If a write +length is greater than the physical block size of the drive, the write will be +written as two blocks each of the fixed physical size. This single write may +become multiple physical records on the tape. (This is not a good situation). +According to the documentation, one may never write an amount of data that is +not the exact multiple of the blocksize (it is not specified if an error +occurs or if the the last record is padded). When reading, it is my +understanding that each read request reads one physical record from the tape. +Due to the complications of fixed block size tape drives, you should avoid +them if possible with Bacula, or you must be ABSOLUTELY certain that you use +fixed block sizes within Bacula that correspond to the physical block size of +the tape drive. This will ensure that Bacula has a one to one correspondence +between what it writes and the physical record on the tape. + +Please note that Bacula will not function correctly if it writes a block and +that block is split into two or more physical records on the tape. Bacula +assumes that each write causes a single record to be written, and that it can +sequentially recover each of the blocks it has written by using the same +number of sequential reads as it had written. + +\section{Details of Tape Modes} +\index[general]{Modes!Details} +\index[general]{Details of Tape Modes} +Rudolf Cejka has provided the following information concerning +certain tape modes and MTEOM. + +\begin{description} +\item[Tape level] + It is always possible to position filemarks or blocks, whereas + positioning to the end-of-data is only optional feature, however it is + implemented very often. SCSI specification also talks about optional + sequential filemarks, setmarks and sequential setmarks, but these are not + implemented so often. Modern tape drives keep track of file positions in + built-in chip (AIT, LTO) or at the beginning of the tape (SDLT), so there + is not any speed difference, if end-of-data or filemarks is used (I have + heard, that LTO-1 from all 3 manufacturers do not use its chip for file + locations, but a tape as in SDLT case, and I'm not sure about LTO-2 and + LTO-3 case). However there is a big difference, that end-of-data ignores + file position, whereas filemarks returns the real number of skipped + files, so OS can track current file number just in filemarks case. + +\item[OS level] + Solaris does use just SCSI SPACE Filemarks, it does not support SCSI + SPACE End-of-data. When MTEOM is called, Solaris does use SCSI SPACE + Filemarks with count = 1048576 for fast mode, and combination of SCSI + SPACE Filemarks with count = 1 with SCSI SPACE Blocks with count = 1 for + slow mode, so EOD mark on the tape on some older tape drives is not + skipped. File number is always tracked for MTEOM. + + Linux does support both SCSI SPACE Filemarks and End-of-data: When MTEOM + is called in MT\_ST\_FAST\_MTEOM mode, SCSI SPACE End-of-data is used. + In the other case, SCSI SPACE Filemarks with count = + 8388607 is used. + There is no real slow mode like in Solaris - I just expect, that for + older tape drives Filemarks may be slower than End-of-data, but not so + much as in Solaris slow mode. File number is tracked for MTEOM just + without MT\_ST\_FAST\_MTEOM - when MT\_ST\_FAST\_MTEOM is used, it is not. + + FreeBSD does support both SCSI SPACE Filemarks and End-of-data, but when + MTEOD (MTEOM) is called, SCSI SPACE End-of-data is always used. FreeBSD + never use SCSI SPACE Filemarks for MTEOD. File number is never tracked + for MTEOD. + +\item[Bacula level] + When {\bf Hardware End of Medium = Yes} is used, MTEOM is called, but it + does not mean, that hardware End-of-data must be used. When Hardware End + of Medium = No, if Fast Forward Space File = Yes, MTFSF with count = + 32767 is used, else Block Read with count = 1 with Forward Space File + with count = 1 is used, which is really very slow. + +\item [Hardware End of Medium = Yes|No] + The name of this option is misleading and is the source of confusion, + because it is not the hardware EOM, what is really switched here. + + If I use Yes, OS must not use SCSI SPACE End-of-data, because Bacula + expects, that there is tracked file number, which is not supported by + SCSI specification. Instead, the OS have to use SCSI SPACE Filemarks. + + If I use No, an action depends on Fast Forward Space File. + + When I set {\bf Hardware End of Medium = no} + and {\bf Fast Forward Space File = no} + file positioning was very slow + on my LTO-3 (about ten to 100 minutes), but + + with {\bf Hardware End of Medium = no} and +{\bf Fast Forward Space File = yes}, the time is ten to +100 times faster (about one to two minutes). + +\end{description} + +\section{Autochanger Errors} +\index[general]{Errors!Autochanger} +\index[general]{Autochanger Errors} + +If you are getting errors such as: + +\footnotesize +\begin{verbatim} +3992 Bad autochanger "load slot 1, drive 1": ERR=Child exited with code 1. +\end{verbatim} +\normalsize + +and you are running your Storage daemon as non-root, then most likely +you are having permissions problems with the control channel. Running +as root, set permissions on /dev/sgX so that the userid and group of +your Storage daemon can access the device. You need to ensure that you +all access to the proper control device, and if you don't have any +SCSI disk drives (including SATA drives), you might want to change +the permissions on /dev/sg*. + +\section{Syslog Errors} +\index[general]{Errors!Syslog} +\index[general]{Syslog Errors} + +If you are getting errors such as: + +\footnotesize +\begin{verbatim} +: kernel: st0: MTSETDRVBUFFER only allowed for root +\end{verbatim} +\normalsize + +you are most likely running your Storage daemon as non-root, and +Bacula is attempting to set the correct OS buffering to correspond +to your Device resource. Most OSes allow only root to issue this +ioctl command. In general, the message can be ignored providing +you are sure that your OS parameters are properly configured as +described earlier in this manual. If you are running your Storage daemon +as root, you should not be getting these system log messages, and if +you are, something is probably wrong. diff --git a/docs/manuals/en/problems/tips.tex b/docs/manuals/en/problems/tips.tex new file mode 100644 index 00000000..d0e77f03 --- /dev/null +++ b/docs/manuals/en/problems/tips.tex @@ -0,0 +1,1045 @@ +%% +%% + +\chapter{Tips and Suggestions} +\label{TipsChapter} +\index[general]{Tips and Suggestions } +\index[general]{Suggestions!Tips and } +\label{examples} +\index[general]{Examples } + +There are a number of example scripts for various things that can be found in +the {\bf example} subdirectory and its subdirectories of the Bacula source +distribution. + +For additional tips, please see the \elink{Bacula +wiki}{\url{http://wiki.bacula.org}}. + +\section{Upgrading Bacula Versions} +\label{upgrading} +\index[general]{Upgrading Bacula Versions } +\index[general]{Versions!Upgrading Bacula } +\index[general]{Upgrading} + +The first thing to do before upgrading from one version to another is to +ensure that you don't overwrite or delete your production (current) version +of Bacula until you have tested that the new version works. + +If you have installed Bacula into a single directory, this is simple: simply +make a copy of your Bacula directory. + +If you have done a more typical Unix installation where the binaries are +placed in one directory and the configuration files are placed in another, +then the simplest way is to configure your new Bacula to go into a single +file. Alternatively, make copies of all your binaries and especially your +conf files. + +Whatever your situation may be (one of the two just described), you should +probably start with the {\bf defaultconf} script that can be found in the {\bf +examples} subdirectory. Copy this script to the main Bacula directory, modify +it as necessary (there should not need to be many modifications), configure +Bacula, build it, install it, then stop your production Bacula, copy all the +{\bf *.conf} files from your production Bacula directory to the test Bacula +directory, start the test version, and run a few test backups. If all seems +good, then you can proceed to install the new Bacula in place of or possibly +over the old Bacula. + +When installing a new Bacula you need not worry about losing the changes you +made to your configuration files as the installation process will not +overwrite them providing that you do not do a {\bf make uninstall}. + +If the new version of Bacula requires an upgrade to the database, +you can upgrade it with the script {\bf update\_bacula\_tables}, which +will be installed in your scripts directory (default {\bf /etc/bacula}), +or alternatively, you can find it in the +{\bf \lt{}bacula-source\gt{}/src/cats} directory. + +\section{Getting Notified of Job Completion} +\label{notification} +\index[general]{Getting Notified of Job Completion } +\index[general]{Completion!Getting Notified of Job } + +One of the first things you should do is to ensure that you are being properly +notified of the status of each Job run by Bacula, or at a minimum of each Job +that terminates with an error. + +Until you are completely comfortable with {\bf Bacula}, we recommend that you +send an email to yourself for each Job that is run. This is most easily +accomplished by adding an email notification address in the {\bf Messages} +resource of your Director's configuration file. An email is automatically +configured in the default configuration files, but you must ensure that the +default {\bf root} address is replaced by your email address. + +For additional examples of how to configure a Bacula, please take a look at the +{\bf .conf} files found in the {\bf examples} sub-directory. We recommend the +following configuration (where you change the paths and email address to +correspond to your setup). Note, the {\bf mailcommand} and {\bf +operatorcommand} should be on a single line. They were split here for +presentation: + +\footnotesize +\begin{verbatim} +Messages { + Name = Standard + mailcommand = "/home/bacula/bin/bsmtp -h localhost + -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "/home/bacula/bin/bsmtp -h localhost + -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" + Mail = your-email-address = all, !skipped, !terminate + append = "/home/bacula/bin/log" = all, !skipped, !terminate + operator = your-email-address = mount + console = all, !skipped, !saved +} +\end{verbatim} +\normalsize + +You will need to ensure that the {\bf /home/bacula/bin} path on the {\bf +mailcommand} and the {\bf operatorcommand} lines point to your {\bf Bacula} +binary directory where the {\bf bsmtp} program will be installed. You will +also want to ensure that the {\bf your-email-address} is replaced by your +email address, and finally, you will also need to ensure that the {\bf +/home/bacula/bin/log} points to the file where you want to log all messages. + +With the above Messages resource, you will be notified by email of every Job +that ran, all the output will be appended to the {\bf log} file you specify, +all output will be directed to the console program, and all mount messages +will be emailed to you. Note, some messages will be sent to multiple +destinations. + +The form of the mailcommand is a bit complicated, but it allows you to +distinguish whether the Job terminated in error or terminated normally. Please +see the +\ilink{Mail Command}{mailcommand} section of the Messages +Resource chapter of this manual for the details of the substitution characters +used above. + +Once you are totally comfortable with Bacula as I am, or if you have a large +number of nightly Jobs as I do (eight), you will probably want to change the +{\bf Mail} command to {\bf Mail On Error} which will generate an email message +only if the Job terminates in error. If the Job terminates normally, no email +message will be sent, but the output will still be appended to the log file as +well as sent to the Console program. + +\section{Getting Email Notification to Work} +\label{email} +\index[general]{Work!Getting Email Notification to } +\index[general]{Getting Email Notification to Work } + +The section above describes how to get email notification of job status. +Occasionally, however, users have problems receiving any email at all. In that +case, the things to check are the following: + +\begin{itemize} +\item Ensure that you have a valid email address specified on your {\bf Mail} + record in the Director's Messages resource. The email address should be fully + qualified. Simply using {\bf root} generally will not work, rather you should +use {\bf root@localhost} or better yet your full domain. +\item Ensure that you do not have a {\bf Mail} record in the Storage daemon's + or File daemon's configuration files. The only record you should have is {\bf + director}: + +\footnotesize +\begin{verbatim} + director = director-name = all + +\end{verbatim} +\normalsize + +\item If all else fails, try replacing the {\bf mailcommand} with + + \footnotesize +\begin{verbatim} +mailcommand = "mail -s test your@domain.com" +\end{verbatim} +\normalsize + +\item Once the above is working, assuming you want to use {\bf bsmtp}, submit + the desired bsmtp command by hand and ensure that the email is delivered, + then put that command into {\bf Bacula}. Small differences in things such as +the parenthesis around the word Bacula can make a big difference to some +bsmtp programs. For example, you might start simply by using: + +\footnotesize +\begin{verbatim} +mailcommand = "/home/bacula/bin/bsmtp -f \"root@localhost\" %r" +\end{verbatim} +\normalsize + +\end{itemize} + +\section{Getting Notified that Bacula is Running} +\label{JobNotification} +\index[general]{Running!Getting Notified that Bacula is } +\index[general]{Getting Notified that Bacula is Running } + +If like me, you have setup Bacula so that email is sent only when a Job has +errors, as described in the previous section of this chapter, inevitably, one +day, something will go wrong and {\bf Bacula} can stall. This could be because +Bacula crashes, which is vary rare, or more likely the network has caused {\bf +Bacula} to {\bf hang} for some unknown reason. + +To avoid this, you can use the {\bf RunAfterJob} command in the Job resource +to schedule a Job nightly, or weekly that simply emails you a message saying +that Bacula is still running. For example, I have setup the following Job in +my Director's configuration file: + +\footnotesize +\begin{verbatim} +Schedule { + Name = "Watchdog" + Run = Level=Full sun-sat at 6:05 +} +Job { + Name = "Watchdog" + Type = Admin + Client=Watchdog + FileSet="Verify Set" + Messages = Standard + Storage = DLTDrive + Pool = Default + Schedule = "Watchdog" + RunAfterJob = "/home/kern/bacula/bin/watchdog %c %d" +} +Client { + Name = Watchdog + Address = rufus + FDPort = 9102 + Catalog = Verify + Password = "" + File Retention = 1day + Job Retention = 1 month + AutoPrune = yes +} +\end{verbatim} +\normalsize + +Where I established a schedule to run the Job nightly. The Job itself is type +{\bf Admin} which means that it doesn't actually do anything, and I've defined +a FileSet, Pool, Storage, and Client, all of which are not really used (and +probably don't need to be specified). The key aspect of this Job is the +command: + +\footnotesize +\begin{verbatim} + RunAfterJob = "/home/kern/bacula/bin/watchdog %c %d" +\end{verbatim} +\normalsize + +which runs my "watchdog" script. As an example, I have added the Job codes +\%c and \%d which will cause the Client name and the Director's name to be +passed to the script. For example, if the Client's name is {\bf Watchdog} and +the Director's name is {\bf main-dir} then referencing \$1 in the script would +get {\bf Watchdog} and referencing \$2 would get {\bf main-dir}. In this case, +having the script know the Client and Director's name is not really useful, +but in other situations it may be. + +You can put anything in the watchdog script. In my case, I like to monitor the +size of my catalog to be sure that {\bf Bacula} is really pruning it. The +following is my watchdog script: + +\footnotesize +\begin{verbatim} +#!/bin/sh +cd /home/kern/mysql/var/bacula +du . * | +/home/kern/bacula/bin/bsmtp \ + -f "\(Bacula\) abuse@whitehouse.com" -h mail.yyyy.com \ + -s "Bacula running" abuse@whitehouse.com +\end{verbatim} +\normalsize + +If you just wish to send yourself a message, you can do it with: + +\footnotesize +\begin{verbatim} +#!/bin/sh +cd /home/kern/mysql/var/bacula +/home/kern/bacula/bin/bsmtp \ + -f "\(Bacula\) abuse@whitehouse.com" -h mail.yyyy.com \ + -s "Bacula running" abuse@whitehouse.com </volume-list + exit 0 +\end{verbatim} +\normalsize + +so that the whole case looks like: + +\footnotesize +\begin{verbatim} + list) +# +# commented out lines + cat /volume-list + exit 0 + ;; +\end{verbatim} +\normalsize + +where you replace \lt{}absolute-path\gt{} with the full path to the +volume-list file. Then using the console, you enter the following command: + +\footnotesize +\begin{verbatim} + label barcodes +\end{verbatim} +\normalsize + +and Bacula will proceed to mount the autochanger Volumes in the list and label +them with the Volume names you have supplied. Bacula will think that the list +was provided by the autochanger barcodes, but in reality, it was you who +supplied the \lt{}barcodes\gt{}. + +If it seems to work, when it finishes, enter: + +\footnotesize +\begin{verbatim} + list volumes +\end{verbatim} +\normalsize + +and you should see all the volumes nicely created. + +\section{Backing Up Portables Using DHCP} +\label{DNS} +\index[general]{DHCP!Backing Up Portables Using } +\index[general]{Backing Up Portables Using DHCP } + +You may want to backup laptops or portables that are not always connected to +the network. If you are using DHCP to assign an IP address to those machines +when they connect, you will need to use the Dynamic Update capability of DNS +to assign a name to those machines that can be used in the Address field of +the Client resource in the Director's conf file. + +\section{Going on Vacation} +\label{Vacation} +\index[general]{Vacation!Going on } +\index[general]{Going on Vacation } + +At some point, you may want to be absent for a week or two and you want to +make sure Bacula has enough tape left so that the backups will complete. You +start by doing a {\bf list volumes} in the Console program: + +\footnotesize +\begin{verbatim} +list volumes + +Using default Catalog name=BackupDB DB=bacula +Pool: Default ++---------+---------------+-----------+-----------+----------------+- +| MediaId | VolumeName | MediaType | VolStatus | VolBytes | ++---------+---------------+-----------+-----------+----------------+- +| 23 | DLT-30Nov02 | DLT8000 | Full | 54,739,278,128 | +| 24 | DLT-21Dec02 | DLT8000 | Full | 56,331,524,629 | +| 25 | DLT-11Jan03 | DLT8000 | Full | 67,863,514,895 | +| 26 | DLT-02Feb03 | DLT8000 | Full | 63,439,314,216 | +| 27 | DLT-03Mar03 | DLT8000 | Full | 66,022,754,598 | +| 28 | DLT-04Apr03 | DLT8000 | Full | 60,792,559,924 | +| 29 | DLT-28Apr03 | DLT8000 | Full | 62,072,494,063 | +| 30 | DLT-17May03 | DLT8000 | Full | 65,901,767,839 | +| 31 | DLT-07Jun03 | DLT8000 | Used | 56,558,490,015 | +| 32 | DLT-28Jun03 | DLT8000 | Full | 64,274,871,265 | +| 33 | DLT-19Jul03 | DLT8000 | Full | 64,648,749,480 | +| 34 | DLT-08Aug03 | DLT8000 | Full | 64,293,941,255 | +| 35 | DLT-24Aug03 | DLT8000 | Append | 9,999,216,782 | ++---------+---------------+-----------+-----------+----------------+ +\end{verbatim} +\normalsize + +Note, I have truncated the output for presentation purposes. What is +significant, is that I can see that my current tape has almost 10 Gbytes of +data, and that the average amount of data I get on my tapes is about 60 +Gbytes. So if I go on vacation now, I don't need to worry about tape capacity +(at least not for short absences). + +Equally significant is the fact that I did go on vacation the 28th of June +2003, and when I did the {\bf list volumes} command, my current tape at that +time, DLT-07Jun03 MediaId 31, had 56.5 Gbytes written. I could see that the +tape would fill shortly. Consequently, I manually marked it as {\bf Used} and +replaced it with a fresh tape that I labeled as DLT-28Jun03, thus assuring +myself that the backups would all complete without my intervention. + +\section{Exclude Files on Windows Regardless of Case} +\label{Case} +\index[general]{Exclude Files on Windows Regardless of Case} +% TODO: should this be put in the win32 chapter? +% TODO: should all these tips be placed in other chapters? + +This tip was submitted by Marc Brueckner who wasn't sure of the case of some +of his files on Win32, which is case insensitive. The problem is that Bacula +thinks that {\bf /UNIMPORTANT FILES} is different from {\bf /Unimportant +Files}. Marc was aware that the file exclusion permits wild-cards. So, he +specified: + +\footnotesize +\begin{verbatim} +"/[Uu][Nn][Ii][Mm][Pp][Oo][Rr][Tt][Aa][Nn][Tt] [Ff][Ii][Ll][Ee][Ss]" +\end{verbatim} +\normalsize + +As a consequence, the above exclude works for files of any case. + +Please note that this works only in Bacula Exclude statement and not in +Include. + +\section{Executing Scripts on a Remote Machine} +\label{RemoteExecution} +\index[general]{Machine!Executing Scripts on a Remote } +\index[general]{Executing Scripts on a Remote Machine } + +This tip also comes from Marc Brueckner. (Note, this tip is probably outdated +by the addition of {\bf ClientRunBeforJob} and {\bf ClientRunAfterJob} Job +records, but the technique still could be useful.) First I thought the "Run +Before Job" statement in the Job-resource is for executing a script on the +remote machine (the machine to be backed up). (Note, this is possible as mentioned +above by using {\bf ClientRunBeforJob} and {\bf ClientRunAfterJob}). +It could be useful to execute +scripts on the remote machine e.g. for stopping databases or other services +while doing the backup. (Of course I have to start the services again when the +backup has finished) I found the following solution: Bacula could execute +scripts on the remote machine by using ssh. The authentication is done +automatically using a private key. First you have to generate a keypair. I've +done this by: + +\footnotesize +\begin{verbatim} +ssh-keygen -b 4096 -t dsa -f Bacula_key +\end{verbatim} +\normalsize + +This statement may take a little time to run. It creates a public/private key +pair with no passphrase. You could save the keys in /etc/bacula. Now you have +two new files : Bacula\_key which contains the private key and Bacula\_key.pub +which contains the public key. + +Now you have to append the Bacula\_key.pub file to the file authorized\_keys +in the \textbackslash{}root\textbackslash{}.ssh directory of the remote +machine. Then you have to add (or uncomment) the line + +\footnotesize +\begin{verbatim} +AuthorizedKeysFile %h/.ssh/authorized_keys +\end{verbatim} +\normalsize + +to the sshd\_config file on the remote machine. Where the \%h stands for the +home-directory of the user (root in this case). + +Assuming that your sshd is already running on the remote machine, you can now +enter the following on the machine where Bacula runs: + +\footnotesize +\begin{verbatim} +ssh -i Bacula_key -l root "ls -la" +\end{verbatim} +\normalsize + +This should execute the "ls -la" command on the remote machine. + +Now you could add lines like the following to your Director's conf file: + +\footnotesize +\begin{verbatim} +... +Run Before Job = ssh -i /etc/bacula/Bacula_key 192.168.1.1 \ + "/etc/init.d/database stop" +Run After Job = ssh -i /etc/bacula/Bacula_key 192.168.1.1 \ + "/etc/init.d/database start" +... +\end{verbatim} +\normalsize + +Even though Bacula version 1.32 and later has a ClientRunBeforeJob, the ssh method still +could be useful for updating all the Bacula clients on several remote machines +in a single script. + +\section{Recycling All Your Volumes} +\label{recycle} +\index[general]{Recycling All Your Volumes } +\index[general]{Volumes!Recycling All Your } + +This tip comes from Phil Stracchino. + +If you decide to blow away your catalog and start over, the simplest way to +re-add all your prelabeled tapes with a minimum of fuss (provided you don't +care about the data on the tapes) is to add the tape labels using the console +{\bf add} command, then go into the catalog and manually set the VolStatus of +every tape to {\bf Recycle}. + +The SQL command to do this is very simple, either use your vendor's +command line interface (mysql, postgres, sqlite, ...) or use the sql +command in the Bacula console: + +\footnotesize +\begin{verbatim} +update Media set VolStatus='Recycle'; +\end{verbatim} +\normalsize + +Bacula will then ignore the data already stored on the tapes and just re-use +each tape without further objection. + +\section{Backing up ACLs on ext3 or XFS filesystems} +\label{ACLs} +\index[general]{Filesystems!Backing up ACLs on ext3 or XFS } +\index[general]{Backing up ACLs on ext3 or XFS filesystems } + +This tip comes from Volker Sauer. + +Note, this tip was given prior to implementation of ACLs in Bacula (version +1.34.5). It is left here because dumping/displaying ACLs can still be useful +in testing/verifying that Bacula is backing up and restoring your ACLs +properly. Please see the +\ilink{aclsupport}{ACLSupport} FileSet option in the +configuration chapter of this manual. + +For example, you could dump the ACLs to a file with a script similar to the +following: + +\footnotesize +\begin{verbatim} +#!/bin/sh +BACKUP_DIRS="/foo /bar" +STORE_ACL=/root/acl-backup +umask 077 +for i in $BACKUP_DIRS; do + cd $i /usr/bin/getfacl -R --skip-base .>$STORE_ACL/${i//\//_} +done +\end{verbatim} +\normalsize + +Then use Bacula to backup {\bf /root/acl-backup}. + +The ACLs could be restored using Bacula to the {\bf /root/acl-backup} file, +then restored to your system using: + +\footnotesize +\begin{verbatim} +setfacl --restore/root/acl-backup +\end{verbatim} +\normalsize + +\section{Total Automation of Bacula Tape Handling} +\label{automate} +\index[general]{Handling!Total Automation of Bacula Tape } +\index[general]{Total Automation of Bacula Tape Handling } + +This tip was provided by Alexander Kuehn. + +\elink{Bacula}{\url{http://www.bacula.org/}} is a really nice backup program except +that the manual tape changing requires user interaction with the bacula +console. + +Fortunately I can fix this. +NOTE!!! This suggestion applies for people who do *NOT* have tape autochangers +and must change tapes manually.!!!!! + +Bacula supports a variety of tape changers through the use of mtx-changer +scripts/programs. This highly flexible approach allowed me to create +\elink{this shell script}{\url{http://www.bacula.org/rel-manual/mtx-changer.txt}} which does the following: +% TODO: We need to include this in book appendix and point to it. +% TODO: +Whenever a new tape is required it sends a mail to the operator to insert the +new tape. Then it waits until a tape has been inserted, sends a mail again to +say thank you and let's bacula continue its backup. +So you can schedule and run backups without ever having to log on or see the +console. +To make the whole thing work you need to create a Device resource which looks +something like this ("Archive Device", "Maximum Changer Wait", "Media +Type" and "Label media" may have different values): + +\footnotesize +\begin{verbatim} +Device { + Name=DDS3 + Archive Device = # use yours not mine! ;)/dev/nsa0 + Changer Device = # not really required/dev/nsa0 + Changer Command = "# use this (maybe change the path)! + /usr/local/bin/mtx-changer %o %a %S" + Maximum Changer Wait = 3d # 3 days in seconds + AutomaticMount = yes; # mount on start + AlwaysOpen = yes; # keep device locked + Media Type = DDS3 # it's just a name + RemovableMedia = yes; # + Offline On Unmount = Yes; # keep this too + Label media = Yes; # +} +\end{verbatim} +\normalsize + +As the script has to emulate the complete wisdom of a mtx-changer it has an +internal "database" containing where which tape is stored, you can see this on +the following line: + +\footnotesize +\begin{verbatim} +labels="VOL-0001 VOL-0002 VOL-0003 VOL-0004 VOL-0005 VOL-0006 +VOL-0007 VOL-0008 VOL-0009 VOL-0010 VOL-0011 VOL-0012" +\end{verbatim} +\normalsize + +The above should be all on one line, and it effectively tells Bacula that +volume "VOL-0001" is located in slot 1 (which is our lowest slot), that +volume "VOL-0002" is located in slot 2 and so on.. +The script also maintains a logfile (/var/log/mtx.log) where you can monitor +its operation. + +\section{Running Concurrent Jobs} +\label{ConcurrentJobs} +\index[general]{Jobs!Running Concurrent} +\index[general]{Running Concurrent Jobs} +\index[general]{Concurrent Jobs} + +Bacula can run multiple concurrent jobs, but the default configuration files +do not enable it. Using the {\bf Maximum Concurrent Jobs} directive, you +can configure how many and which jobs can be run simultaneously. +The Director's default value for {\bf Maximum Concurrent Jobs} is "1". + +To initially setup concurrent jobs you need to define {\bf Maximum Concurrent Jobs} in +the Director's configuration file (bacula-dir.conf) in the +Director, Job, Client, and Storage resources. + +Additionally the File daemon, and the Storage daemon each have their own +{\bf Maximum Concurrent Jobs} directive that sets the overall maximum +number of concurrent jobs the daemon will run. The default for both the +File daemon and the Storage daemon is "20". + +For example, if you want two different jobs to run simultaneously backing up +the same Client to the same Storage device, they will run concurrently only if +you have set {\bf Maximum Concurrent Jobs} greater than one in the Director +resource, the Client resource, and the Storage resource in bacula-dir.conf. + +We recommend that you read the \ilink{Data +Spooling}{SpoolingChapter} of this manual first, then test your multiple +concurrent backup including restore testing before you put it into +production. + +Below is a super stripped down bacula-dir.conf file showing you the four +places where the the file must be modified to allow the same job {\bf +NightlySave} to run up to four times concurrently. The change to the Job +resource is not necessary if you want different Jobs to run at the same time, +which is the normal case. + +\footnotesize +\begin{verbatim} +# +# Bacula Director Configuration file -- bacula-dir.conf +# +Director { + Name = rufus-dir + Maximum Concurrent Jobs = 4 + ... +} +Job { + Name = "NightlySave" + Maximum Concurrent Jobs = 4 + Client = rufus-fd + Storage = File + ... +} +Client { + Name = rufus-fd + Maximum Concurrent Jobs = 4 + ... +} +Storage { + Name = File + Maximum Concurrent Jobs = 4 + ... +} +\end{verbatim} +\normalsize diff --git a/docs/manuals/en/problems/translate_images.pl b/docs/manuals/en/problems/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/problems/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/problems/update_version b/docs/manuals/en/problems/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/problems/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/problems/update_version.in b/docs/manuals/en/problems/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/problems/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/problems/version.tex.in b/docs/manuals/en/problems/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/problems/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/en/utility/Makefile.in b/docs/manuals/en/utility/Makefile.in new file mode 100644 index 00000000..7136d1b6 --- /dev/null +++ b/docs/manuals/en/utility/Makefile.in @@ -0,0 +1,135 @@ +# +# +# Makefile for LaTeX +# +# To build everything do +# make tex +# make web +# make html +# make dvipdf +# +# or simply +# +# make +# +# for rapid development do: +# make tex +# make show +# +# +# If you are having problems getting "make" to work, debugging it is +# easier if can see the output from latex, which is normally redirected +# to /dev/null. To see it, do the following: +# +# cd docs/manual +# make tex +# latex bacula.tex +# +# typically the latex command will stop indicating the error (e.g. a +# missing \ in front of a _ or a missing { or ] ... +# +# The following characters must be preceded by a backslash +# to be entered as printable characters: +# +# # $ % & ~ _ ^ \ { } +# + +IMAGES=../../../images + +DOC=utility + +first_rule: all + +all: tex web dvipdf mini-clean + +.SUFFIXES: .tex .html +.PHONY: +.DONTCARE: + + +tex: + @./update_version + @echo "Making version `cat version.tex`" + @cp -fp ${IMAGES}/hires/*.eps . + @touch ${DOC}i-dir.tex ${DOC}i-fd.tex ${DOC}i-sd.tex \ + ${DOC}i-console.tex ${DOC}i-general.tex + latex -interaction=batchmode ${DOC}.tex + makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null + latex -interaction=batchmode ${DOC}.tex + +pdf: + @echo "Making pdfm" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdfm -p a4 ${DOC}.dvi + +dvipdf: + @echo "Making dvi to pdf" + @cp -fp ${IMAGES}/hires/*.eps . + dvipdf ${DOC}.dvi ${DOC}.pdf + +html: + @echo " " + @echo "Making html" + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @(if [ -f imagename_translations ] ; then \ + ./translate_images.pl --from_meaningful_names ${DOC}.html; \ + fi) + latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \ + -init_file latex2html-init.pl ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}.html + @echo "Done making html" + +web: + @echo "Making web" + @mkdir -p ${DOC} + @cp -fp ${IMAGES}/*.eps . + @rm -f next.eps next.png prev.eps prev.png up.eps up.png + @cp -fp ${IMAGES}/*.eps ${DOC}/ + @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png ${DOC}/ + @rm -f ${DOC}/xp-*.png + @rm -f ${DOC}/next.eps ${DOC}/next.png ${DOC}/prev.eps ${DOC}/prev.png ${DOC}/up.eps ${DOC}/up.png + @rm -rf ${DOC}/*.html + latex2html -split 3 -local_icons -t "Bacula Utility Programs" -long_titles 4 \ + -toc_stars -contents_in_nav -init_file latex2html-init.pl -white -notransparent ${DOC} >tex.out 2>&1 + ./translate_images.pl --to_meaningful_names ${DOC}/Bacula_Utilit*.html + @echo "Done making web" +show: + xdvi ${DOC} + +texcheck: + ./check_tex.pl ${DOC}.tex + +main_configs: + pic2graph -density 100 main_configs.png + +mini-clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.gif *.jpg *.eps + @rm -f *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.backup *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd *.old *.out + @rm -f ${DOC}/*.gif ${DOC}/*.jpg ${DOC}/*.eps + @rm -f ${DOC}/*.aux ${DOC}/*.cp ${DOC}/*.fn ${DOC}/*.ky ${DOC}/*.log ${DOC}/*.pg + @rm -f ${DOC}/*.backup ${DOC}/*.ilg ${DOC}/*.lof ${DOC}/*.lot + @rm -f ${DOC}/*.cdx ${DOC}/*.cnd ${DOC}/*.ddx ${DOC}/*.ddn ${DOC}/*.fdx ${DOC}/*.fnd ${DOC}/*.ind ${DOC}/*.sdx ${DOC}/*.snd + @rm -f ${DOC}/*.dnd ${DOC}/*.old ${DOC}/*.out + @rm -f ${DOC}/WARNINGS + + +clean: + @rm -f 1 2 3 *.tex~ + @rm -f *.png *.gif *.jpg *.eps + @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg + @rm -f *.html *.backup *.ps *.dvi *.ilg *.lof *.lot + @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd + @rm -f *.dnd imagename_translations + @rm -f *.old WARNINGS *.out *.toc *.idx + @rm -f ${DOC}i-*.tex + @rm -rf ${DOC} + + +distclean: clean + @rm -f images.pl labels.pl internals.pl + @rm -f Makefile version.tex diff --git a/docs/manuals/en/utility/bimagemgr-chapter.tex b/docs/manuals/en/utility/bimagemgr-chapter.tex new file mode 100644 index 00000000..01157f84 --- /dev/null +++ b/docs/manuals/en/utility/bimagemgr-chapter.tex @@ -0,0 +1,155 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\section{bimagemgr} +\label{bimagemgr} +\index[general]{Bimagemgr } + +{\bf bimagemgr} is a utility for those who backup to disk volumes in order to +commit them to CDR disk, rather than tapes. It is a web based interface +written in Perl and is used to monitor when a volume file needs to be burned to +disk. It requires: + +\begin{itemize} +\item A web server running on the bacula server +\item A CD recorder installed and configured on the bacula server +\item The cdrtools package installed on the bacula server. +\item perl, perl-DBI module, and either DBD-MySQL DBD-SQLite or DBD-PostgreSQL modules + \end{itemize} + +DVD burning is not supported by {\bf bimagemgr} at this +time, but both are planned for future releases. + +\subsection{bimagemgr installation} +\index[general]{bimagemgr!Installation } +\index[general]{bimagemgr Installation } + +Installation from tarball: +% TODO: use itemized list for this? +1. Examine the Makefile and adjust it to your configuration if needed. +2. Edit config.pm to fit your configuration. +3. Do 'make install' as root. +4. Edit httpd.conf and change the Timeout value. The web server must not time +out and close the connection before the burn process is finished. The exact +value needed may vary depending upon your cd recorder speed and whether you are +burning on the bacula server on on another machine across your network. In my +case I set it to 1000 seconds. Restart httpd. +5. Make sure that cdrecord is setuid root. +% TODO: I am pretty sure cdrecord can be used without setuid root +% TODO: as long as devices are setup correctly + +Installation from rpm package: +% TODO: use itemized list for this? +1. Install the rpm package for your platform. +2. Edit /cgi-bin/config.pm to fit your configuration. +3. Edit httpd.conf and change the Timeout value. The web server must not time +out and close the connection before the burn process is finished. The exact +value needed may vary depending upon your cd recorder speed and whether you are +burning on the bacula server on on another machine across your network. In my +case I set it to 1000 seconds. Restart httpd. +4. Make sure that cdrecord is setuid root. + +For bacula systems less than 1.36: +% TODO: use itemized list for this? +1. Edit the configuration section of config.pm to fit your configuration. +2. Run /etc/bacula/create\_cdimage\_table.pl from a console on your bacula +server (as root) to add the CDImage table to your bacula database. + +Accessing the Volume files: +The Volume files by default have permissions 640 and can only be read by root. +The recommended approach to this is as follows (and only works if bimagemgr and +apache are running on the same host as bacula. + +For bacula-1.34 or 1.36 installed from tarball - +% TODO: use itemized list for this? +1. Create a new user group bacula and add the user apache to the group for +Red Hat or Mandrake systems. For SuSE systems add the user wwwrun to the +bacula group. +2. Change ownership of all of your Volume files to root.bacula +3. Edit the /etc/bacula/bacula startup script and set SD\_USER=root and +SD\_GROUP=bacula. Restart bacula. + +Note: step 3 should also be done in /etc/init.d/bacula-sd but released versions +of this file prior to 1.36 do not support it. In that case it would be necessary after +a reboot of the server to execute '/etc/bacula/bacula restart'. + +For bacula-1.38 installed from tarball - +% TODO: use itemized list for this? +1. Your configure statement should include: +% TODO: fix formatting here + --with-dir-user=bacula + --with-dir-group=bacula + --with-sd-user=bacula + --with-sd-group=disk + --with-fd-user=root + --with-fd-group=bacula +2. Add the user apache to the bacula group for Red Hat or Mandrake systems. +For SuSE systems add the user wwwrun to the bacula group. +3. Check/change ownership of all of your Volume files to root.bacula + +For bacula-1.36 or bacula-1.38 installed from rpm - +% TODO: use itemized list for this? +1. Add the user apache to the group bacula for Red Hat or Mandrake systems. +For SuSE systems add the user wwwrun to the bacula group. +2. Check/change ownership of all of your Volume files to root.bacula + +bimagemgr installed from rpm > 1.38.9 will add the web server user to the +bacula group in a post install script. Be sure to edit the configuration +information in config.pm after installation of rpm package. + +bimagemgr will now be able to read the Volume files but they are still not +world readable. + +If you are running bimagemgr on another host (not recommended) then you will +need to change the permissions on all of your backup volume files to 644 in +order to access them via nfs share or other means. This approach should only +be taken if you are sure of the security of your environment as it exposes +the backup Volume files to world read. + +\subsection{bimagemgr usage} +\index[general]{bimagemgr!Usage } +\index[general]{bimagemgr Usage } + +Calling the program in your web browser, e.g. {\tt +http://localhost/cgi-bin/bimagemgr.pl} will produce a display as shown below +% TODO: use tex to say figure number +in Figure 1. The program will query the bacula database and display all volume +files with the date last written and the date last burned to disk. If a volume +needs to be burned (last written is newer than last burn date) a "Burn" +button will be displayed in the rightmost column. + +\addcontentsline{lof}{figure}{Bacula CD Image Manager} +\includegraphics{./bimagemgr1.eps} \\Figure 1 +% TODO: use tex to say figure number + +Place a blank CDR disk in your recorder and click the "Burn" button. This will +cause a pop up window as shown in Figure 2 to display the burn progress. +% TODO: use tex to say figure number + +\addcontentsline{lof}{figure}{Bacula CD Image Burn Progress Window} +\includegraphics{./bimagemgr2.eps} \\Figure 2 +% TODO: use tex to say figure number + +When the burn finishes the pop up window will display the results of cdrecord +% TODO: use tex to say figure number +as shown in Figure 3. Close the pop up window and refresh the main window. The +last burn date will be updated and the "Burn" button for that volume will +disappear. Should you have a failed burn you can reset the last burn date of +that volume by clicking its "Reset" link. + +\addcontentsline{lof}{figure}{Bacula CD Image Burn Results} +\includegraphics{./bimagemgr3.eps} \\Figure 3 +% TODO: use tex to say figure number + +In the bottom row of the main display window are two more buttons labeled +"Burn Catalog" and "Blank CDRW". "Burn Catalog" will place a copy of +your bacula catalog on a disk. If you use CDRW disks rather than CDR then +"Blank CDRW" allows you to erase the disk before re-burning it. Regularly +committing your backup volume files and your catalog to disk with {\bf +bimagemgr} ensures that you can rebuild easily in the event of some disaster +on the bacula server itself. diff --git a/docs/manuals/en/utility/check_tex.pl b/docs/manuals/en/utility/check_tex.pl new file mode 100755 index 00000000..e12d51be --- /dev/null +++ b/docs/manuals/en/utility/check_tex.pl @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Finds potential problems in tex files, and issues warnings to the console +# about what it finds. Takes a list of files as its only arguments, +# and does checks on all the files listed. The assumption is that these are +# valid (or close to valid) LaTeX files. It follows \include statements +# recursively to pick up any included tex files. +# +# +# +# Currently the following checks are made: +# +# -- Multiple hyphens not inside a verbatim environment (or \verb). These +# should be placed inside a \verb{} contruct so they will not be converted +# to single hyphen by latex and latex2html. + + +# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com +# +# + +use strict; + +# The following builds the test string to identify and change multiple +# hyphens in the tex files. Several constructs are identified but only +# multiple hyphens are changed; the others are fed to the output +# unchanged. +my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{ +my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{ +my $c = '\\s*\\}'; # closing curly brace + +# This captures entire verbatim environments. These are passed to the output +# file unchanged. +my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c; + +# This captures \verb{..{ constructs. They are passed to the output unchanged. +my $verb = '\\\\verb\\*?(.).*?\\1'; + +# This captures multiple hyphens with a leading and trailing space. These are not changed. +my $hyphsp = '\\s\\-{2,}\\s'; + +# This identifies other multiple hyphens. +my $hyphens = '\\-{2,}'; + +# This identifies \hyperpage{..} commands, which should be ignored. +my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}'; + +# This builds the actual test string from the above strings. +#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens"; +my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens"; + + +sub get_includes { + # Get a list of include files from the top-level tex file. The first + # argument is a pointer to the list of files found. The rest of the + # arguments is a list of filenames to check for includes. + my $files = shift; + my ($fileline,$includefile,$includes); + + while (my $filename = shift) { + # Get a list of all the html files in the directory. + open my $if,"<$filename" or die "Cannot open input file $filename\n"; + $fileline = 0; + $includes = 0; + while (<$if>) { + chomp; + $fileline++; + # If a file is found in an include, process it. + if (($includefile) = /\\include\s*\{(.*?)\}/) { + $includes++; + # Append .tex to the filename + $includefile .= '.tex'; + + # If the include file has already been processed, issue a warning + # and don't do it again. + my $found = 0; + foreach (@$files) { + if ($_ eq $includefile) { + $found = 1; + last; + } + } + if ($found) { + print "$includefile found at line $fileline in $filename was previously included\n"; + } else { + # The file has not been previously found. Save it and + # recursively process it. + push (@$files,$includefile); + get_includes($files,$includefile); + } + } + } + close IF; + } +} + + +sub check_hyphens { + my (@files) = @_; + my ($filedata,$this,$linecnt,$before); + + # Build the test string to check for the various environments. + # We only do the conversion if the multiple hyphens are outside of a + # verbatim environment (either \begin{verbatim}...\end{verbatim} or + # \verb{--}). Capture those environments and pass them to the output + # unchanged. + + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # Set up to process the file data. + $linecnt = 1; + + # Go through the file data from beginning to end. For each match, save what + # came before it and what matched. $filedata now becomes only what came + # after the match. + # Chech the match to see if it starts with a multiple-hyphen. If so + # warn the user. Keep track of line numbers so they can be output + # with the warning message. + while ($filedata =~ /$teststr/os) { + $this = $&; + $before = $`; + $filedata = $'; + $linecnt += $before =~ tr/\n/\n/; + + # Check if the multiple hyphen is present outside of one of the + # acceptable constructs. + if ($this =~ /^\-+/) { + print "Possible unwanted multiple hyphen found in line ", + "$linecnt of file $file\n"; + } + $linecnt += $this =~ tr/\n/\n/; + } + } +} +################################################################## +# MAIN #### +################################################################## + +my (@includes,$cnt); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +get_includes(\@includes,@ARGV); + +check_hyphens(@includes); diff --git a/docs/manuals/en/utility/do_echo b/docs/manuals/en/utility/do_echo new file mode 100644 index 00000000..04b9f79a --- /dev/null +++ b/docs/manuals/en/utility/do_echo @@ -0,0 +1,6 @@ +# +# Avoid that @VERSION@ and @DATE@ are changed by configure +# This file is sourced by update_version +# +echo "s%@VERSION@%${VERSION}%g" >${out} +echo "s%@DATE@%${DATE}%g" >>${out} diff --git a/docs/manuals/en/utility/faq.css b/docs/manuals/en/utility/faq.css new file mode 100644 index 00000000..d1824aff --- /dev/null +++ b/docs/manuals/en/utility/faq.css @@ -0,0 +1,30 @@ +/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ +.MATH { font-family: "Century Schoolbook", serif; } +.MATH I { font-family: "Century Schoolbook", serif; font-style: italic } +.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } + +/* implement both fixed-size and relative sizes */ +SMALL.XTINY { font-size : xx-small } +SMALL.TINY { font-size : x-small } +SMALL.SCRIPTSIZE { font-size : smaller } +SMALL.FOOTNOTESIZE { font-size : small } +SMALL.SMALL { } +BIG.LARGE { } +BIG.XLARGE { font-size : large } +BIG.XXLARGE { font-size : x-large } +BIG.HUGE { font-size : larger } +BIG.XHUGE { font-size : xx-large } + +/* heading styles */ +H1 { } +H2 { } +H3 { } +H4 { } +H5 { } + +/* mathematics styles */ +DIV.displaymath { } /* math displays */ +TD.eqno { } /* equation-number cells */ + + +/* document-specific styles come next */ diff --git a/docs/manuals/en/utility/fdl.tex b/docs/manuals/en/utility/fdl.tex new file mode 100644 index 00000000..b46cd990 --- /dev/null +++ b/docs/manuals/en/utility/fdl.tex @@ -0,0 +1,485 @@ +% TODO: maybe get rid of centering + +\chapter{GNU Free Documentation License} +\index[general]{GNU Free Documentation License} +\index[general]{License!GNU Free Documentation} + +\label{label_fdl} + + \begin{center} + + Version 1.2, November 2002 + + + Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc. + + \bigskip + + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + \bigskip + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +\end{center} + + +\begin{center} +{\bf\large Preamble} +\end{center} + +The purpose of this License is to make a manual, textbook, or other +functional and useful document "free" in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of "copyleft", which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + + +\begin{center} +{\Large\bf 1. APPLICABILITY AND DEFINITIONS} +\end{center} + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The \textbf{"Document"}, below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as \textbf{"you"}. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A \textbf{"Modified Version"} of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A \textbf{"Secondary Section"} is a named appendix or a front-matter section of +the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall subject +(or to related matters) and contains nothing that could fall directly +within that overall subject. (Thus, if the Document is in part a +textbook of mathematics, a Secondary Section may not explain any +mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The \textbf{"Cover Texts"} are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A \textbf{"Transparent"} copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not "Transparent" is called \textbf{"Opaque"}. + +Examples of suitable formats for Transparent copies include plain +ASCII without markup, Texinfo input format, LaTeX input format, SGML +or XML using a publicly available DTD, and standard-conforming simple +HTML, PostScript or PDF designed for human modification. Examples of +transparent image formats include PNG, XCF and JPG. Opaque formats +include proprietary formats that can be read and edited only by +proprietary word processors, SGML or XML for which the DTD and/or +processing tools are not generally available, and the +machine-generated HTML, PostScript or PDF produced by some word +processors for output purposes only. + +The \textbf{"Title Page"} means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, "Title Page" means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as \textbf{"Acknowledgements"}, +\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.) +To \textbf{"Preserve the Title"} +of such a section when you modify the Document means that it remains a +section "Entitled XYZ" according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + + +\begin{center} +{\Large\bf 2. VERBATIM COPYING} +\end{center} + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + + +\begin{center} +{\Large\bf 3. COPYING IN QUANTITY} +\end{center} + + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + + +\begin{center} +{\Large\bf 4. MODIFICATIONS} +\end{center} + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +\begin{itemize} +\item[A.] + Use in the Title Page (and on the covers, if any) a title distinct + from that of the Document, and from those of previous versions + (which should, if there were any, be listed in the History section + of the Document). You may use the same title as a previous version + if the original publisher of that version gives permission. + +\item[B.] + List on the Title Page, as authors, one or more persons or entities + responsible for authorship of the modifications in the Modified + Version, together with at least five of the principal authors of the + Document (all of its principal authors, if it has fewer than five), + unless they release you from this requirement. + +\item[C.] + State on the Title page the name of the publisher of the + Modified Version, as the publisher. + +\item[D.] + Preserve all the copyright notices of the Document. + +\item[E.] + Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + +\item[F.] + Include, immediately after the copyright notices, a license notice + giving the public permission to use the Modified Version under the + terms of this License, in the form shown in the Addendum below. + +\item[G.] + Preserve in that license notice the full lists of Invariant Sections + and required Cover Texts given in the Document's license notice. + +\item[H.] + Include an unaltered copy of this License. + +\item[I.] + Preserve the section Entitled "History", Preserve its Title, and add + to it an item stating at least the title, year, new authors, and + publisher of the Modified Version as given on the Title Page. If + there is no section Entitled "History" in the Document, create one + stating the title, year, authors, and publisher of the Document as + given on its Title Page, then add an item describing the Modified + Version as stated in the previous sentence. + +\item[J.] + Preserve the network location, if any, given in the Document for + public access to a Transparent copy of the Document, and likewise + the network locations given in the Document for previous versions + it was based on. These may be placed in the "History" section. + You may omit a network location for a work that was published at + least four years before the Document itself, or if the original + publisher of the version it refers to gives permission. + +\item[K.] + For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section all + the substance and tone of each of the contributor acknowledgements + and/or dedications given therein. + +\item[L.] + Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section titles. + +\item[M.] + Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + +\item[N.] + Do not retitle any existing section to be Entitled "Endorsements" + or to conflict in title with any Invariant Section. + +\item[O.] + Preserve any Warranty Disclaimers. +\end{itemize} + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled "Endorsements", provided it contains +nothing but endorsements of your Modified Version by various +parties--for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + + +\begin{center} +{\Large\bf 5. COMBINING DOCUMENTS} +\end{center} + + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled "History" +in the various original documents, forming one section Entitled +"History"; likewise combine any sections Entitled "Acknowledgements", +and any sections Entitled "Dedications". You must delete all sections +Entitled "Endorsements". + +\begin{center} +{\Large\bf 6. COLLECTIONS OF DOCUMENTS} +\end{center} + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + + +\begin{center} +{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS} +\end{center} + + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an "aggregate" if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + + +\begin{center} +{\Large\bf 8. TRANSLATION} +\end{center} + + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled "Acknowledgements", +"Dedications", or "History", the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + + +\begin{center} +{\Large\bf 9. TERMINATION} +\end{center} + + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + + +\begin{center} +{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE} +\end{center} + + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +http://www.gnu.org/copyleft/. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. + + +\begin{center} +{\Large\bf ADDENDUM: How to use this License for your documents} +% TODO: this is too long for table of contents +\end{center} + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +\bigskip +\begin{quote} + Copyright \copyright YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU + Free Documentation License". +\end{quote} +\bigskip + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the "with...Texts." line with this: + +\bigskip +\begin{quote} + with the Invariant Sections being LIST THEIR TITLES, with the + Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. +\end{quote} +\bigskip + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +%--------------------------------------------------------------------- diff --git a/docs/manuals/en/utility/fix_tex.pl b/docs/manuals/en/utility/fix_tex.pl new file mode 100755 index 00000000..98657576 --- /dev/null +++ b/docs/manuals/en/utility/fix_tex.pl @@ -0,0 +1,184 @@ +#!/usr/bin/perl -w +# Fixes various things within tex files. + +use strict; + +my %args; + + +sub get_includes { + # Get a list of include files from the top-level tex file. + my (@list,$file); + + foreach my $filename (@_) { + $filename or next; + # Start with the top-level latex file so it gets checked too. + push (@list,$filename); + + # Get a list of all the html files in the directory. + open IF,"<$filename" or die "Cannot open input file $filename"; + while () { + chomp; + push @list,"$1.tex" if (/\\include\{(.*?)\}/); + } + + close IF; + } + return @list; +} + +sub convert_files { + my (@files) = @_; + my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt); + + $cnt = 0; + foreach my $file (@files) { + # Open the file and load the whole thing into $filedata. A bit wasteful but + # easier to deal with, and we don't have a problem with speed here. + $filedata = ""; + open IF,"<$file" or die "Cannot open input file $file"; + while () { + $filedata .= $_; + } + close IF; + + # We look for a line that starts with \item, and indent the two next lines (if not blank) + # by three spaces. + my $linecnt = 3; + $indentcnt = 0; + $output = ""; + # Process a line at a time. + foreach (split(/\n/,$filedata)) { + $_ .= "\n"; # Put back the return. + # If this line is less than the third line past the \item command, + # and the line isn't blank and doesn't start with whitespace + # add three spaces to the start of the line. Keep track of the number + # of lines changed. + if ($linecnt < 3 and !/^\\item/) { + if (/^[^\n\s]/) { + $output .= " " . $_; + $indentcnt++; + } else { + $output .= $_; + } + $linecnt++; + } else { + $linecnt = 3; + $output .= $_; + } + /^\\item / and $linecnt = 1; + } + + + # This is an item line. We need to process it too. If inside a \begin{description} environment, convert + # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'. + $itemcnt = 0; + $filedata = $output; + $output = ""; + my ($before,$descrip,$this,$between); + + # Find any \begin{description} environment + while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) { + $output .= $` . $1; + $filedata = $3 . $'; + $descrip = $2; + + # Search for \item {\bf xxx} + while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) { + $descrip = $'; + $output .= $`; + ($between,$descrip) = find_matching_brace($descrip); + if (!$descrip) { + $linecnt = $output =~ tr/\n/\n/; + print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip); + } + + # Now do the replacement. + $between = '{' . $between . '}' if ($between =~ /\[|\]/); + $output .= "\\item \[$between\]"; + $itemcnt++; + } + $output .= $descrip; + } + $output .= $filedata; + + # If any hyphens or \item commnads were converted, save the file. + if ($indentcnt or $itemcnt) { + open OF,">$file" or die "Cannot open output file $file"; + print OF $output; + close OF; + print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n"; + print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n"; + } + + $cnt += $indentcnt + $itemcnt; + } + return $cnt; +} + +sub find_matching_brace { + # Finds text up to the next matching brace. Assumes that the input text doesn't contain + # the opening brace, but we want to find text up to a matching closing one. + # Returns the text between the matching braces, followed by the rest of the text following + # (which does not include the matching brace). + # + my $str = shift; + my ($this,$temp); + my $cnt = 1; + + while ($cnt) { + # Ignore verbatim constructs involving curly braces, or if the character preceding + # the curly brace is a backslash. + if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) { + $this .= $`; + $str = $'; + $temp = $&; + + if ((substr($this,-1,1) eq '\\') or + $temp =~ /^\\verb/) { + $this .= $temp; + next; + } + + $cnt += ($temp eq '{') ? 1 : -1; + # If this isn't the matching curly brace ($cnt > 0), include the brace. + $this .= $temp if ($cnt); + } else { + # No matching curly brace found. + return ($this . $str,''); + } + } + return ($this,$str); +} + +sub check_arguments { + # Checks command-line arguments for ones starting with -- puts them into + # a hash called %args and removes them from @ARGV. + my $args = shift; + my $i; + + for ($i = 0; $i < $#ARGV; $i++) { + $ARGV[$i] =~ /^\-+/ or next; + $ARGV[$i] =~ s/^\-+//; + $args{$ARGV[$i]} = ""; + delete ($ARGV[$i]); + + } +} + +################################################################## +# MAIN #### +################################################################## + +my @includes; +my $cnt; + +check_arguments(\%args); +die "No Files given to Check\n" if ($#ARGV < 0); + +# Examine the file pointed to by the first argument to get a list of +# includes to test. +@includes = get_includes(@ARGV); + +$cnt = convert_files(@includes); +print "No lines changed\n" unless $cnt; diff --git a/docs/manuals/en/utility/index.perl b/docs/manuals/en/utility/index.perl new file mode 100644 index 00000000..bc4e1b60 --- /dev/null +++ b/docs/manuals/en/utility/index.perl @@ -0,0 +1,564 @@ +# This module does multiple indices, supporting the style of the LaTex 'index' +# package. + +# Version Information: +# 16-Feb-2005 -- Original Creation. Karl E. Cunningham +# 14-Mar-2005 -- Clarified and Consolodated some of the code. +# Changed to smoothly handle single and multiple indices. + +# Two LaTeX index formats are supported... +# --- SINGLE INDEX --- +# \usepackage{makeidx} +# \makeindex +# \index{entry1} +# \index{entry2} +# \index{entry3} +# ... +# \printindex +# +# --- MULTIPLE INDICES --- +# +# \usepackage{makeidx} +# \usepackage{index} +# \makeindex -- latex2html doesn't care but LaTeX does. +# \newindex{ref1}{ext1}{ext2}{title1} +# \newindex{ref2}{ext1}{ext2}{title2} +# \newindex{ref3}{ext1}{ext2}{title3} +# \index[ref1]{entry1} +# \index[ref1]{entry2} +# \index[ref3]{entry3} +# \index[ref2]{entry4} +# \index{entry5} +# \index[ref3]{entry6} +# ... +# \printindex[ref1] +# \printindex[ref2] +# \printindex[ref3] +# \printindex +# ___________________ +# +# For the multiple-index style, each index is identified by the ref argument to \newindex, \index, +# and \printindex. A default index is allowed, which is indicated by omitting the optional +# argument. The default index does not require a \newindex command. As \index commands +# are encountered, their entries are stored according +# to the ref argument. When the \printindex command is encountered, the stored index +# entries for that argument are retrieved and printed. The title for each index is taken +# from the last argument in the \newindex command. +# While processing \index and \printindex commands, if no argument is given the index entries +# are built into a default index. The title of the default index is simply "Index". +# This makes the difference between single- and multiple-index processing trivial. +# +# Another method can be used by omitting the \printindex command and just using \include to +# pull in index files created by the makeindex program. These files will start with +# \begin{theindex}. This command is used to determine where to print the index. Using this +# approach, the indices will be output in the same order as the newindex commands were +# originally found (see below). Using a combination of \printindex and \include{indexfile} has not +# been tested and may produce undesireable results. +# +# The index data are stored in a hash for later sorting and output. As \printindex +# commands are handled, the order in which they were found in the tex filea is saved, +# associated with the ref argument to \printindex. +# +# We use the original %index hash to store the index data into. We append a \002 followed by the +# name of the index to isolate the entries in different indices from each other. This is necessary +# so that different indices can have entries with the same name. For the default index, the \002 is +# appended without the name. +# +# Since the index order in the output cannot be determined if the \include{indexfile} +# command is used, the order will be assumed from the order in which the \newindex +# commands were originally seen in the TeX files. This order is saved as well as the +# order determined from any printindex{ref} commands. If \printindex commnads are used +# to specify the index output, that order will be used. If the \include{idxfile} command +# is used, the order of the original newindex commands will be used. In this case the +# default index will be printed last since it doesn't have a corresponding \newindex +# command and its order cannot be determined. Mixing \printindex and \include{idxfile} +# commands in the same file is likely to produce less than satisfactory results. +# +# +# The hash containing index data is named %indices. It contains the following data: +#{ +# 'title' => { +# $ref1 => $indextitle , +# $ref2 => $indextitle , +# ... +# }, +# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index. +#} + + +# Globals to handle multiple indices. +my %indices; + +# This tells the system to use up to 7 words in index entries. +$WORDS_IN_INDEX = 10; + +# KEC 2-18-05 +# Handles the \newindex command. This is called if the \newindex command is +# encountered in the LaTex source. Gets the index ref and title from the arguments. +# Saves the index ref and title. +# Note that we are called once to handle multiple \newindex commands that are +# newline-separated. +sub do_cmd_newindex { + my $data = shift; + # The data is sent to us as fields delimited by their ID #'s. We extract the + # fields. + foreach my $line (split("\n",$data)) { + my @fields = split (/(?:\<\#\d+?\#\>)+/,$line); + + # The index name and title are the second and fourth fields in the data. + if ($line =~ /^ \001 + # @ -> \002 + # | -> \003 + $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines + # protect \001 occurring with images + $str =~ s/\001/\016/g; # 0x1 to 0xF + $str =~ s/\\\\/\011/g; # Double backslash -> 0xB + $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC + $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD + $str =~ s/!/\001/g; # Exclamation point -> 0x1 + $str =~ s/\013/!/g; # 0xD -> Exclaimation point + $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF + $str =~ s/@/\002/g; # At sign -> 0x2 + $str =~ s/\015/@/g; # 0xF to At sign + $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11 + $str =~ s/\|/\003/g; # Vertical line to 0x3 + $str =~ s/\017/|/g; # 0x11 to vertical line + $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is + $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot; + $str =~ s/\011/\\\\/g; # 0x11 to double backslash + local($key_part, $pageref) = split("\003", $str, 2); + + # For any keys of the form: blablabla!blablabla, which want to be split at the + # exclamation point, replace the ! with a comma and a space. We don't do it + # that way for this index. + $key_part =~ s/\001/, /g; + local(@keys) = split("\001", $key_part); + # If TITLE is not yet available use $before. + $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title))); + $TITLE = $before unless $TITLE; + # Save the reference + local($words) = ''; + if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; } + elsif ($SHORT_INDEX) { $words = &make_shortidxname; } + else { $words = &make_idxname; } + local($super_key) = ''; + local($sort_key, $printable_key, $cur_key); + foreach $key (@keys) { + $key =~ s/\016/\001/g; # revert protected \001s + ($sort_key, $printable_key) = split("\002", $key); + # + # RRM: 16 May 1996 + # any \label in the printable-key will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($printable_key =~ /tex2html_anchor_mark/ ) { + $printable_key =~ s/><\/A>$cross_ref_mark/ + $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_label = $external_labels{$label} unless + ($ref_label = $ref_files{$label}); + '"' . "$ref_label#$label" . '">' . + &get_ref_mark($label,$id)} + /geo; + } + $printable_key =~ s/<\#[^\#>]*\#>//go; + #RRM + # recognise \char combinations, for a \backslash + # + $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s + $printable_key =~ s/\&\#;\`
/\\/g; # ditto + $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto + # + # $sort_key .= "@$printable_key" if !($printable_key); # RRM + $sort_key .= "@$printable_key" if !($sort_key); # RRM + $sort_key =~ tr/A-Z/a-z/; + if ($super_key) { + $cur_key = $super_key . "\001" . $sort_key; + $sub_index{$super_key} .= $cur_key . "\004"; + } else { + $cur_key = $sort_key; + } + + # Append the $index_name to the current key with a \002 delimiter. This will + # allow the same index entry to appear in more than one index. + $index_key = $cur_key . "\002$index_name"; + + $index{$index_key} .= ""; + + # + # RRM, 15 June 1996 + # if there is no printable key, but one is known from + # a previous index-entry, then use it. + # + if (!($printable_key) && ($printable_key{$index_key})) + { $printable_key = $printable_key{$index_key}; } +# if (!($printable_key) && ($printable_key{$cur_key})) +# { $printable_key = $printable_key{$cur_key}; } + # + # do not overwrite the printable_key if it contains an anchor + # + if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ )) + { $printable_key{$index_key} = $printable_key || $key; } +# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ )) +# { $printable_key{$cur_key} = $printable_key || $key; } + + $super_key = $cur_key; + } + # + # RRM + # page-ranges, from |( and |) and |see + # + if ($pageref) { + if ($pageref eq "\(" ) { + $pageref = ''; + $next .= " from "; + } elsif ($pageref eq "\)" ) { + $pageref = ''; + local($next) = $index{$index_key}; +# local($next) = $index{$cur_key}; + # $next =~ s/[\|] *$//; + $next =~ s/(\n )?\| $//; + $index{$index_key} = "$next to "; +# $index{$cur_key} = "$next to "; + } + } + + if ($pageref) { + $pageref =~ s/\s*$//g; # remove trailing spaces + if (!$pageref) { $pageref = ' ' } + $pageref =~ s/see/see <\/i> /g; + # + # RRM: 27 Dec 1996 + # check if $pageref corresponds to a style command. + # If so, apply it to the $words. + # + local($tmp) = "do_cmd_$pageref"; + if (defined &$tmp) { + $words = &$tmp("<#0#>$words<#0#>"); + $words =~ s/<\#[^\#]*\#>//go; + $pageref = ''; + } + } + # + # RRM: 25 May 1996 + # any \label in the pageref section will have already + # created a label where the \index occurred. + # This has to be removed, so that the desired label + # will be found on the Index page instead. + # + if ($pageref) { + if ($pageref =~ /tex2html_anchor_mark/ ) { + $pageref =~ s/><\/A>
$cross_ref_mark/ + $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/ + do { ($label,$id) = ($1,$2); + $ref_files{$label} = ''; # ???? RRM + if ($index_labels{$label}) { $ref_label = ''; } + else { $ref_label = $external_labels{$label} + unless ($ref_label = $ref_files{$label}); + } + '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo; + } + $pageref =~ s/<\#[^\#>]*\#>//go; + + if ($pageref eq ' ') { $index{$index_key}='@'; } + else { $index{$index_key} .= $pageref . "\n | "; } + } else { + local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words); + $thisref =~ s/\n//g; + $index{$index_key} .= $thisref."\n | "; + } + #print "\nREF: $sort_key : $index_key :$index{$index_key}"; + + #join('',"$anchor_invisible_mark<\/A>",$_); + + "$anchor_invisible_mark<\/A>"; +} + + +# KEC. -- Copied from makeidx.perl, then modified to do multiple indices. +# Feeds the index entries to the output. This is called for each index to be built. +# +# Generates a list of lookup keys for index entries, from both %printable_keys +# and %index keys. +# Sorts the keys according to index-sorting rules. +# Removes keys with a 0x01 token. (duplicates?) +# Builds a string to go to the index file. +# Adds the index entries to the string if they belong in this index. +# Keeps track of which index is being worked on, so only the proper entries +# are included. +# Places the index just built in to the output at the proper place. +{ my $index_number = 0; +sub add_real_idx { + print "\nDoing the index ... Index Number $index_number\n"; + local($key, @keys, $next, $index, $old_key, $old_html); + my ($idx_ref,$keyref); + # RRM, 15.6.96: index constructed from %printable_key, not %index + @keys = keys %printable_key; + + while (/$idx_mark/) { + # Get the index reference from what follows the $idx_mark and + # remove it from the string. + s/$idxmark\002(.*?)\002/$idxmark/; + $idx_ref = $1; + $index = ''; + # include non- makeidx index-entries + foreach $key (keys %index) { + next if $printable_key{$key}; + $old_key = $key; + if ($key =~ s/###(.*)$//) { + next if $printable_key{$key}; + push (@keys, $key); + $printable_key{$key} = $key; + if ($index{$old_key} =~ /HREF="([^"]*)"/i) { + $old_html = $1; + $old_html =~ /$dd?([^#\Q$dd\E]*)#/; + $old_html = $1; + } else { $old_html = '' } + $index{$key} = $index{$old_key} . $old_html."\n | "; + }; + } + @keys = sort makeidx_keysort @keys; + @keys = grep(!/\001/, @keys); + my $cnt = 0; + foreach $key (@keys) { + my ($keyref) = $key =~ /.*\002(.*)/; + next unless ($idx_ref eq $keyref); # KEC. + $index .= &add_idx_key($key); + $cnt++; + } + print "$cnt Index Entries Added\n"; + $index = '
'.$index unless ($index =~ /^\s*/); + $index_number++; # KEC. + if ($SHORT_INDEX) { + print "(compact version with Legend)"; + local($num) = ( $index =~ s/\ 50 ) { + s/$idx_mark/$preindex
\n$index\n<\/DL>$preindex/o; + } else { + s/$idx_mark/$preindex
\n$index\n<\/DL>/o; + } + } else { + s/$idx_mark/
\n$index\n<\/DL>/o; } + } +} +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# The bibliography and the index should be treated as separate sections +# in their own HTML files. The \bibliography{} command acts as a sectioning command +# that has the desired effect. But when the bibliography is constructed +# manually using the thebibliography environment, or when using the +# theindex environment it is not possible to use the normal sectioning +# mechanism. This subroutine inserts a \bibliography{} or a dummy +# \textohtmlindex command just before the appropriate environments +# to force sectioning. +sub add_bbl_and_idx_dummy_commands { + local($id) = $global{'max_id'}; + + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg; + ## if ($bbl_cnt == 1) { + s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo; + #} + $global{'max_id'} = $id; + # KEC. Modified to global substitution to place multiple index tokens. + s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go; + # KEC. Modified to pick up the optional argument to \printindex + s/[\\]printindex\s*(\[.*?\])?/ + do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego; + &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands); +} + +# KEC. Copied from latex2html.pl and modified to support multiple indices. +# For each textohtmlindex mark found, determine the index titles and headers. +# We place the index ref in the header so the proper index can be generated later. +# For the default index, the index ref is blank. +# +# One problem is that this routine is called twice.. Once for processing the +# command as originally seen, and once for processing the command when +# doing the name for the index file. We can detect that by looking at the +# id numbers (or ref) surrounding the \theindex command, and not incrementing +# index_number unless a new id (or ref) is seen. This has the side effect of +# having to unconventionally start the index_number at -1. But it works. +# +# Gets the title from the list of indices. +# If this is the first index, save the title in $first_idx_file. This is what's referenced +# in the navigation buttons. +# Increment the index_number for next time. +# If the indexname command is defined or a newcommand defined for indexname, do it. +# Save the index TITLE in the toc +# Save the first_idx_file into the idxfile. This goes into the nav buttons. +# Build index_labels if needed. +# Create the index headings and put them in the output stream. + +{ my $index_number = 0; # Will be incremented before use. + my $first_idx_file; # Static + my $no_increment = 0; + +sub do_cmd_textohtmlindex { + local($_) = @_; + my ($idxref,$idxnum,$index_name); + + # We get called from make_name with the first argument = "\001noincrement". This is a sign + # to not increment $index_number the next time we are called. We get called twice, once + # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name + # but doesn't use the result so we get called a second time by process_command. This works fine + # except for cases where there are multiple indices except if they aren't named, which is the case + # when the index is inserted by an include command in latex. In these cases we are only able to use + # the index number to decide which index to draw from, and we don't know how to increment that index + # number if we get called a variable number of times for the same index, as is the case between + # making html (one output file) and web (multiple output files) output formats. + if (/\001noincrement/) { + $no_increment = 1; + return; + } + + # Remove (but save) the index reference + s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e; + + # If we have an $idxref, the index name was specified. In this case, we have all the + # information we need to carry on. Otherwise, we need to get the idxref + # from the $index_number and set the name to "Index". + if ($idxref) { + $index_name = $indices{'title'}{$idxref}; + } else { + if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) { + $index_name = $indices{'title'}{$idxref}; + } else { + $idxref = ''; + $index_name = "Index"; + } + } + + $idx_title = "Index"; # The name displayed in the nav bar text. + + # Only set $idxfile if we are at the first index. This will point the + # navigation panel to the first index file rather than the last. + $first_idx_file = $CURRENT_FILE if ($index_number == 0); + $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar. + $toc_sec_title = $index_name; # Index link text in the toc. + $TITLE = $toc_sec_title; # Title for this index, from which its filename is built. + if (%index_labels) { &make_index_labels(); } + if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); } + else { $preindex = ''; } + local $idx_head = $section_headings{'textohtmlindex'}; + local($heading) = join('' + , &make_section_heading($TITLE, $idx_head) + , $idx_mark, "\002", $idxref, "\002" ); + local($pre,$post) = &minimize_open_tags($heading); + $index_number++ unless ($no_increment); + $no_increment = 0; + join('',"
\n" , $pre, $_); +} +} + +# Returns an index key, given the key passed as the first argument. +# Not modified for multiple indices. +sub add_idx_key { + local($key) = @_; + local($index, $next); + if (($index{$key} eq '@' )&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { $index .= "

\n
".&print_key."\n
"; } + else { $index .= "

\n
".&print_key."\n
"; } + } elsif (($index{$key})&&(!($index_printed{$key}))) { + if ($SHORT_INDEX) { + $next = "
".&print_key."\n : ". &print_idx_links; + } else { + $next = "
".&print_key."\n
". &print_idx_links; + } + $index .= $next."\n"; + $index_printed{$key} = 1; + } + + if ($sub_index{$key}) { + local($subkey, @subkeys, $subnext, $subindex); + @subkeys = sort(split("\004", $sub_index{$key})); + if ($SHORT_INDEX) { + $index .= "
".&print_key unless $index_printed{$key}; + $index .= "
\n"; + } else { + $index .= "
".&print_key."\n
" unless $index_printed{$key}; + $index .= "
\n"; + } + foreach $subkey (@subkeys) { + $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey}); + } + $index .= "
\n"; + } + return $index; +} + +1; # Must be present as the last line. diff --git a/docs/manuals/en/utility/latex2html-init.pl b/docs/manuals/en/utility/latex2html-init.pl new file mode 100644 index 00000000..14b5c319 --- /dev/null +++ b/docs/manuals/en/utility/latex2html-init.pl @@ -0,0 +1,10 @@ +# This file serves as a place to put initialization code and constants to +# affect the behavior of latex2html for generating the bacula manuals. + +# $LINKPOINT specifies what filename to use to link to when creating +# index.html. Not that this is a hard link. +$LINKPOINT='"$OVERALL_TITLE"'; + + +# The following must be the last line of this file. +1; diff --git a/docs/manuals/en/utility/progs.tex b/docs/manuals/en/utility/progs.tex new file mode 100644 index 00000000..9187970d --- /dev/null +++ b/docs/manuals/en/utility/progs.tex @@ -0,0 +1,1332 @@ +%% +%% + +\chapter{Volume Utility Tools} +\label{_UtilityChapter} +\index[general]{Volume Utility Tools} +\index[general]{Tools!Volume Utility} + +This document describes the utility programs written to aid Bacula users and +developers in dealing with Volumes external to Bacula. + +\section{Specifying the Configuration File} +\index[general]{Specifying the Configuration File} + +Starting with version 1.27, each of the following programs requires a valid +Storage daemon configuration file (actually, the only part of the +configuration file that these programs need is the {\bf Device} resource +definitions). This permits the programs to find the configuration parameters +for your archive device (generally a tape drive). By default, they read {\bf +bacula-sd.conf} in the current directory, but you may specify a different +configuration file using the {\bf -c} option. + + +\section{Specifying a Device Name For a Tape} +\index[general]{Tape!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a Tape} + +Each of these programs require a {\bf device-name} where the Volume can be +found. In the case of a tape, this is the physical device name such as {\bf +/dev/nst0} or {\bf /dev/rmt/0ubn} depending on your system. For the program to +work, it must find the identical name in the Device resource of the +configuration file. See below for specifying Volume names. + +Please note that if you have Bacula running and you ant to use +one of these programs, you will either need to stop the Storage daemon, or +{\bf unmount} any tape drive you want to use, otherwise the drive +will {\bf busy} because Bacula is using it. + + +\section{Specifying a Device Name For a File} +\index[general]{File!Specifying a Device Name For a} +\index[general]{Specifying a Device Name For a File} + +If you are attempting to read or write an archive file rather than a tape, the +{\bf device-name} should be the full path to the archive location including +the filename. The filename (last part of the specification) will be stripped +and used as the Volume name, and the path (first part before the filename) +must have the same entry in the configuration file. So, the path is equivalent +to the archive device name, and the filename is equivalent to the volume name. + + +\section{Specifying Volumes} +\index[general]{Volumes!Specifying} +\index[general]{Specifying Volumes} + +In general, you must specify the Volume name to each of the programs below +(with the exception of {\bf btape}). The best method to do so is to specify a +{\bf bootstrap} file on the command line with the {\bf -b} option. As part of +the bootstrap file, you will then specify the Volume name or Volume names if +more than one volume is needed. For example, suppose you want to read tapes +{\bf tape1} and {\bf tape2}. First construct a {\bf bootstrap} file named say, +{\bf list.bsr} which contains: + +\footnotesize +\begin{verbatim} +Volume=test1|test2 +\end{verbatim} +\normalsize + +where each Volume is separated by a vertical bar. Then simply use: + +\footnotesize +\begin{verbatim} +./bls -b list.bsr /dev/nst0 +\end{verbatim} +\normalsize + +In the case of Bacula Volumes that are on files, you may simply append volumes +as follows: + +\footnotesize +\begin{verbatim} +./bls /tmp/test1\|test2 +\end{verbatim} +\normalsize + +where the backslash (\textbackslash{}) was necessary as a shell escape to +permit entering the vertical bar (|). + +And finally, if you feel that specifying a Volume name is a bit complicated +with a bootstrap file, you can use the {\bf -V} option (on all programs except +{\bf bcopy}) to specify one or more Volume names separated by the vertical bar +(|). For example, + +\footnotesize +\begin{verbatim} +./bls -V Vol001 /dev/nst0 +\end{verbatim} +\normalsize + +You may also specify an asterisk (*) to indicate that the program should +accept any volume. For example: + +\footnotesize +\begin{verbatim} +./bls -V* /dev/nst0 +\end{verbatim} +\normalsize + +\section{bls} +\label{bls} +\index[general]{bls} +\index[general]{program!bls} + +{\bf bls} can be used to do an {\bf ls} type listing of a {\bf Bacula} tape or +file. It is called: + +\footnotesize +\begin{verbatim} +Usage: bls [options] + -b specify a bootstrap file + -c specify a config file + -d specify debug level + -e exclude list + -i include list + -j list jobs + -k list blocks + (no j or k option) list saved files + -L dump label + -p proceed inspite of errors + -v be verbose + -V specify Volume names (separated by |) + -? print this message +\end{verbatim} +\normalsize + +For example, to list the contents of a tape: + +\footnotesize +\begin{verbatim} +./bls -V Volume-name /dev/nst0 +\end{verbatim} +\normalsize + +Or to list the contents of a file: + +\footnotesize +\begin{verbatim} +./bls /tmp/Volume-name +or +./bls -V Volume-name /tmp +\end{verbatim} +\normalsize + +Note that, in the case of a file, the Volume name becomes the filename, so in +the above example, you will replace the {\bf xxx} with the name of the volume +(file) you wrote. + +Normally if no options are specified, {\bf bls} will produce the equivalent +output to the {\bf ls -l} command for each file on the tape. Using other +options listed above, it is possible to display only the Job records, only the +tape blocks, etc. For example: + +\footnotesize +\begin{verbatim} + +./bls /tmp/File002 +bls: butil.c:148 Using device: /tmp +drwxrwxr-x 3 k k 4096 02-10-19 21:08 /home/kern/bacula/k/src/dird/ +drwxrwxr-x 2 k k 4096 02-10-10 18:59 /home/kern/bacula/k/src/dird/CVS/ +-rw-rw-r-- 1 k k 54 02-07-06 18:02 /home/kern/bacula/k/src/dird/CVS/Root +-rw-rw-r-- 1 k k 16 02-07-06 18:02 /home/kern/bacula/k/src/dird/CVS/Repository +-rw-rw-r-- 1 k k 1783 02-10-10 18:59 /home/kern/bacula/k/src/dird/CVS/Entries +-rw-rw-r-- 1 k k 97506 02-10-18 21:07 /home/kern/bacula/k/src/dird/Makefile +-rw-r--r-- 1 k k 3513 02-10-18 21:02 /home/kern/bacula/k/src/dird/Makefile.in +-rw-rw-r-- 1 k k 4669 02-07-06 18:02 /home/kern/bacula/k/src/dird/README-config +-rw-r--r-- 1 k k 4391 02-09-14 16:51 /home/kern/bacula/k/src/dird/authenticate.c +-rw-r--r-- 1 k k 3609 02-07-07 16:41 /home/kern/bacula/k/src/dird/autoprune.c +-rw-rw-r-- 1 k k 4418 02-10-18 21:03 /home/kern/bacula/k/src/dird/bacula-dir.conf +... +-rw-rw-r-- 1 k k 83 02-08-31 19:19 /home/kern/bacula/k/src/dird/.cvsignore +bls: Got EOF on device /tmp +84 files found. +\end{verbatim} +\normalsize + +\subsection{Listing Jobs} +\index[general]{Listing Jobs with bls} +\index[general]{bls!Listing Jobs} + +If you are listing a Volume to determine what Jobs to restore, normally the +{\bf -j} option provides you with most of what you will need as long as you +don't have multiple clients. For example, + +\footnotesize +\begin{verbatim} +./bls -j -V Test1 -c stored.conf DDS-4 +bls: butil.c:258 Using device: "DDS-4" for reading. +11-Jul 11:54 bls: Ready to read from volume "Test1" on device "DDS-4" (/dev/nst0). +Volume Record: File:blk=0:1 SessId=4 SessTime=1121074625 JobId=0 DataLen=165 +Begin Job Session Record: File:blk=0:2 SessId=4 SessTime=1121074625 JobId=1 Level=F Type=B +Begin Job Session Record: File:blk=0:3 SessId=5 SessTime=1121074625 JobId=5 Level=F Type=B +Begin Job Session Record: File:blk=0:6 SessId=3 SessTime=1121074625 JobId=2 Level=F Type=B +Begin Job Session Record: File:blk=0:13 SessId=2 SessTime=1121074625 JobId=4 Level=F Type=B +End Job Session Record: File:blk=0:99 SessId=3 SessTime=1121074625 JobId=2 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:101 SessId=2 SessTime=1121074625 JobId=4 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:108 SessId=5 SessTime=1121074625 JobId=5 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +End Job Session Record: File:blk=0:109 SessId=4 SessTime=1121074625 JobId=1 Level=F Type=B + Files=168 Bytes=1,732,978 Errors=0 Status=T +11-Jul 11:54 bls: End of Volume at file 1 on device "DDS-4" (/dev/nst0), Volume "Test1" +11-Jul 11:54 bls: End of all volumes. +\end{verbatim} +\normalsize + +shows a full save followed by two incremental saves. + +Adding the {\bf -v} option will display virtually all information that is +available for each record: + +\subsection{Listing Blocks} +\index[general]{Listing Blocks with bls} +\index[general]{bls!Listing Blocks} + +Normally, except for debugging purposes, you will not need to list Bacula +blocks (the "primitive" unit of Bacula data on the Volume). However, you can +do so with: + +\footnotesize +\begin{verbatim} +./bls -k /tmp/File002 +bls: butil.c:148 Using device: /tmp +Block: 1 size=64512 +Block: 2 size=64512 +... +Block: 65 size=64512 +Block: 66 size=19195 +bls: Got EOF on device /tmp +End of File on device +\end{verbatim} +\normalsize + +By adding the {\bf -v} option, you can get more information, which can be +useful in knowing what sessions were written to the volume: + +\footnotesize +\begin{verbatim} +./bls -k -v /tmp/File002 +Volume Label: +Id : Bacula 0.9 mortal +VerNo : 10 +VolName : File002 +PrevVolName : +VolFile : 0 +LabelType : VOL_LABEL +LabelSize : 147 +PoolName : Default +MediaType : File +PoolType : Backup +HostName : +Date label written: 2002-10-19 at 21:16 +Block: 1 blen=64512 First rec FI=VOL_LABEL SessId=1 SessTim=1035062102 Strm=0 rlen=147 +Block: 2 blen=64512 First rec FI=6 SessId=1 SessTim=1035062102 Strm=DATA rlen=4087 +Block: 3 blen=64512 First rec FI=12 SessId=1 SessTim=1035062102 Strm=DATA rlen=5902 +Block: 4 blen=64512 First rec FI=19 SessId=1 SessTim=1035062102 Strm=DATA rlen=28382 +... +Block: 65 blen=64512 First rec FI=83 SessId=1 SessTim=1035062102 Strm=DATA rlen=1873 +Block: 66 blen=19195 First rec FI=83 SessId=1 SessTim=1035062102 Strm=DATA rlen=2973 +bls: Got EOF on device /tmp +End of File on device +\end{verbatim} +\normalsize + +Armed with the SessionId and the SessionTime, you can extract just about +anything. + +If you want to know even more, add a second {\bf -v} to the command line to +get a dump of every record in every block. + +\footnotesize +\begin{verbatim} +./bls -k -v -v /tmp/File002 +bls: block.c:79 Dump block 80f8ad0: size=64512 BlkNum=1 + Hdrcksum=b1bdfd6d cksum=b1bdfd6d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=VOL_LABEL Strm=0 len=147 p=80f8b40 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=SOS_LABEL Strm=-7 len=122 p=80f8be7 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=1 Strm=UATTR len=86 p=80f8c75 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=2 Strm=UATTR len=90 p=80f8cdf +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=UATTR len=92 p=80f8d4d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=DATA len=54 p=80f8dbd +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=3 Strm=MD5 len=16 p=80f8e07 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=UATTR len=98 p=80f8e2b +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=DATA len=16 p=80f8ea1 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=4 Strm=MD5 len=16 p=80f8ec5 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=UATTR len=96 p=80f8ee9 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=DATA len=1783 p=80f8f5d +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=5 Strm=MD5 len=16 p=80f9668 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=UATTR len=95 p=80f968c +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=32768 p=80f96ff +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=32768 p=8101713 +bls: block.c:79 Dump block 80f8ad0: size=64512 BlkNum=2 + Hdrcksum=9acc1e7f cksum=9acc1e7f +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=contDATA len=4087 p=80f8b40 +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=DATA len=31970 p=80f9b4b +bls: block.c:92 Rec: VId=1 VT=1035062102 FI=6 Strm=MD5 len=16 p=8101841 +... +\end{verbatim} +\normalsize + +\section{bextract} +\label{bextract} +\index[general]{Bextract} +\index[general]{program!bextract} + +If you find yourself using {\bf bextract}, you probably have done +something wrong. For example, if you are trying to recover a file +but are having problems, please see the \ilink {Restoring When Things Go +Wrong}{database_restore} section of the Restore chapter of this manual. + +Normally, you will restore files by running a {\bf Restore} Job from the {\bf +Console} program. However, {\bf bextract} can be used to extract a single file +or a list of files from a Bacula tape or file. In fact, {\bf bextract} can be +a useful tool to restore files to an empty system assuming you are able to +boot, you have statically linked {\bf bextract} and you have an appropriate +{\bf bootstrap} file. + +Please note that some of the current limitations of bextract are: + +\begin{enumerate} +\item It cannot restore access control lists (ACL) that have been + backed up along with the file data. +\item It cannot restore Win32 non-portable streams (typically default). +\item It cannot restore encrypted files. +\item The command line length is relatively limited, + which means that you cannot enter a huge number of volumes. If you need to + enter more volumes than the command line supports, please use a bootstrap + file (see below). +\end{enumerate} + + +It is called: + +\footnotesize +\begin{verbatim} + +Usage: bextract [-d debug_level] + -b specify a bootstrap file + -dnn set debug level to nn + -e exclude list + -i include list + -p proceed inspite of I/O errors + -V specify Volume names (separated by |) + -? print this message +\end{verbatim} +\normalsize + +where {\bf device-name} is the Archive Device (raw device name or full +filename) of the device to be read, and {\bf directory-to-store-files} is a +path prefix to prepend to all the files restored. + +NOTE: On Windows systems, if you specify a prefix of say d:/tmp, any file that +would have been restored to {\bf c:/My Documents} will be restored to {\bf +d:/tmp/My Documents}. That is, the original drive specification will be +stripped. If no prefix is specified, the file will be restored to the original +drive. + +\subsection{Extracting with Include or Exclude Lists} +\index[general]{Lists!Extracting with Include or Exclude} +\index[general]{Extracting with Include or Exclude Lists} + +Using the {\bf -e} option, you can specify a file containing a list of files +to be excluded. Wildcards can be used in the exclusion list. This option will +normally be used in conjunction with the {\bf -i} option (see below). Both the +{\bf -e} and the {\bf -i} options may be specified at the same time as the +{\bf -b} option. The bootstrap filters will be applied first, then the include +list, then the exclude list. + +Likewise, and probably more importantly, with the {\bf -i} option, you can +specify a file that contains a list (one file per line) of files and +directories to include to be restored. The list must contain the full filename +with the path. If you specify a path name only, all files and subdirectories +of that path will be restored. If you specify a line containing only the +filename (e.g. {\bf my-file.txt}) it probably will not be extracted because +you have not specified the full path. + +For example, if the file {\bf include-list} contains: + +\footnotesize +\begin{verbatim} +/home/kern/bacula +/usr/local/bin +\end{verbatim} +\normalsize + +Then the command: + +\footnotesize +\begin{verbatim} +./bextract -i include-list -V Volume /dev/nst0 /tmp +\end{verbatim} +\normalsize + +will restore from the Bacula archive {\bf /dev/nst0} all files and directories +in the backup from {\bf /home/kern/bacula} and from {\bf /usr/local/bin}. The +restored files will be placed in a file of the original name under the +directory {\bf /tmp} (i.e. /tmp/home/kern/bacula/... and +/tmp/usr/local/bin/...). + +\subsection{Extracting With a Bootstrap File} +\index[general]{File!Extracting With a Bootstrap} +\index[general]{Extracting With a Bootstrap File} + +The {\bf -b} option is used to specify a {\bf bootstrap} file containing the +information needed to restore precisely the files you want. Specifying a {\bf +bootstrap} file is optional but recommended because it gives you the most +control over which files will be restored. For more details on the {\bf +bootstrap} file, please see +\ilink{Restoring Files with the Bootstrap File}{BootstrapChapter} +chapter of this document. Note, you may also use a bootstrap file produced by +the {\bf restore} command. For example: + +\footnotesize +\begin{verbatim} +./bextract -b bootstrap-file /dev/nst0 /tmp +\end{verbatim} +\normalsize + +The bootstrap file allows detailed specification of what files you want +restored (extracted). You may specify a bootstrap file and include and/or +exclude files at the same time. The bootstrap conditions will first be +applied, and then each file record seen will be compared to the include and +exclude lists. + +\subsection{Extracting From Multiple Volumes} +\index[general]{Volumes!Extracting From Multiple} +\index[general]{Extracting From Multiple Volumes} + +If you wish to extract files that span several Volumes, you can specify the +Volume names in the bootstrap file or you may specify the Volume names on the +command line by separating them with a vertical bar. See the section above +under the {\bf bls} program entitled {\bf Listing Multiple Volumes} for more +information. The same techniques apply equally well to the {\bf bextract} +program or read the \ilink{Bootstrap}{BootstrapChapter} +chapter of this document. + +\section{bscan} +\label{bscan} +\index[general]{bscan} +\index[general]{program!bscan} + +If you find yourself using this program, you have probably done something +wrong. For example, the best way to recover a lost or damaged Bacula +database is to reload the database by using the bootstrap file that +was written when you saved it (default bacula-dir.conf file). + +The {\bf bscan} program can be used to re-create a database (catalog) +records from the backup information written to one or more Volumes. +This is normally +needed only if one or more Volumes have been pruned or purged from your +catalog so that the records on the Volume are no longer in the catalog, or +for Volumes that you have archived. + +With some care, it can also be used to synchronize your existing catalog with +a Volume. Although we have never seen a case of bscan damaging a +catalog, since bscan modifies your catalog, we recommend that +you do a simple ASCII backup of your database before running {\bf bscan} just +to be sure. See \ilink{Compacting Your Database}{CompactingMySQL} for +the details of making a copy of your database. + +{\bf bscan} can also be useful in a disaster recovery situation, after the +loss of a hard disk, if you do not have a valid {\bf bootstrap} file for +reloading your system, or if a Volume has been recycled but not overwritten, +you can use {\bf bscan} to re-create your database, which can then be used to +{\bf restore} your system or a file to its previous state. + +It is called: + +\footnotesize +\begin{verbatim} + +Usage: bscan [options] + -b bootstrap specify a bootstrap file + -c specify configuration file + -d set debug level to nn + -m update media info in database + -n specify the database name (default bacula) + -u specify database user name (default bacula) + -P specify database password (default none) + -h specify database host (default NULL) + -p proceed inspite of I/O errors + -r list records + -s synchronize or store in database + -v verbose + -V specify Volume names (separated by |) + -w specify working directory (default from conf file) + -? print this message +\end{verbatim} +\normalsize + +If you are using MySQL or PostgreSQL, there is no need to supply a working +directory since in that case, bscan knows where the databases are. However, if +you have provided security on your database, you may need to supply either the +database name ({\bf -b} option), the user name ({\bf -u} option), and/or the +password ({\bf -p}) options. + +NOTE: before {\bf bscan} can work, it needs at least a bare bones valid +database. If your database exists but some records are missing because +they were pruned, then you are all set. If your database was lost or +destroyed, then you must first ensure that you have the SQL program running +(MySQL or PostgreSQL), then you must create the Bacula database (normally +named bacula), and you must create the Bacula tables using the scripts in +the {\bf cats} directory. This is explained in the +\ilink{Installation}{CreateDatabase} chapter of the manual. Finally, before +scanning into an empty database, you must start and stop the Director with +the appropriate bacula-dir.conf file so that it can create the Client and +Storage records which are not stored on the Volumes. Without these +records, scanning is unable to connect the Job records to the proper +client. + +Forgetting for the moment the extra complications of a full rebuild of +your catalog, let's suppose that you did a backup to Volumes "Vol001" +and "Vol002", then sometime later all records of one or both those +Volumes were pruned or purged from the +database. By using {\bf bscan} you can recreate the catalog entries for +those Volumes and then use the {\bf restore} command in the Console to restore +whatever you want. A command something like: + +\footnotesize +\begin{verbatim} +bscan -c bacula-sd.conf -v -V Vol001\|Vol002 /dev/nst0 +\end{verbatim} +\normalsize + +will give you an idea of what is going to happen without changing +your catalog. Of course, you may need to change the path to the Storage +daemon's conf file, the Volume name, and your tape (or disk) device name. This +command must read the entire tape, so if it has a lot of data, it may take a +long time, and thus you might want to immediately use the command listed +below. Note, if you are writing to a disk file, replace the device name with +the path to the directory that contains the Volumes. This must correspond to +the Archive Device in the conf file. + +Then to actually write or store the records in the catalog, add the {\bf -s} +option as follows: + +\footnotesize +\begin{verbatim} + bscan -s -m -c bacula-sd.conf -v -V Vol001\|Vol002 /dev/nst0 +\end{verbatim} +\normalsize + +When writing to the database, if bscan finds existing records, it will +generally either update them if something is wrong or leave them alone. Thus +if the Volumes you are scanning are all or partially in the catalog already, no +harm will be done to that existing data. Any missing data will simply be +added. + +If you have multiple tapes, you should scan them with: + +\footnotesize +\begin{verbatim} + bscan -s -m -c bacula-sd.conf -v -V Vol001\|Vol002\|Vol003 /dev/nst0 +\end{verbatim} +\normalsize + +Since there is a limit on the command line length (511 bytes) accepted +by {\bf bscan}, if you have too many Volumes, you will need to manually +create a bootstrap file. See the \ilink{Bootstrap}{BootstrapChapter} +chapter of this manual for more details, in particular the section +entitled \ilink{Bootstrap for bscan}{bscanBootstrap}. + +You should, always try to specify the tapes in the order they are written. +However, bscan can handle scanning tapes that are not sequential. Any +incomplete records at the end of the tape will simply be ignored in that +case. If you are simply repairing an existing catalog, this may be OK, but +if you are creating a new catalog from scratch, it will leave your database +in an incorrect state. If you do not specify all necessary Volumes on a +single bscan command, bscan will not be able to correctly restore the +records that span two volumes. In other words, it is much better to +specify two or three volumes on a single bscan command rather than run +bscan two or three times, each with a single volume. + + +Note, the restoration process using bscan is not identical to the original +creation of the catalog data. This is because certain data such as Client +records and other non-essential data such +as volume reads, volume mounts, etc is not stored on the Volume, and thus is +not restored by bscan. The results of bscanning are, however, perfectly valid, +and will permit restoration of any or all the files in the catalog using the +normal Bacula console commands. If you are starting with an empty catalog +and expecting bscan to reconstruct it, you may be a bit disappointed, but +at a minimum, you must ensure that your bacula-dir.conf file is the same +as what it previously was -- that is, it must contain all the appropriate +Client resources so that they will be recreated in your new database {\bf +before} running bscan. Normally when the Director starts, it will recreate +any missing Client records in the catalog. Another problem you will have +is that even if the Volumes (Media records) are recreated in the database, +they will not have their autochanger status and slots properly set. As a +result, you will need to repair that by using the {\bf update slots} +command. There may be other considerations as well. Rather than +bscanning, you should always attempt to recover you previous catalog +backup. + + +\subsection{Using bscan to Compare a Volume to an existing Catalog} +\index[general]{Catalog!Using bscan to Compare a Volume to an existing} +\index[general]{Using bscan to Compare a Volume to an existing Catalog} + +If you wish to compare the contents of a Volume to an existing catalog without +changing the catalog, you can safely do so if and only if you do {\bf not} +specify either the {\bf -m} or the {\bf -s} options. However, at this time +(Bacula version 1.26), the comparison routines are not as good or as thorough +as they should be, so we don't particularly recommend this mode other than for +testing. + +\subsection{Using bscan to Recreate a Catalog from a Volume} +\index[general]{Volume!Using bscan to Recreate a Catalog from a Volume} +\index[general]{Using bscan to Recreate a Catalog from a Volume} + +This is the mode for which {\bf bscan} is most useful. You can either {\bf +bscan} into a freshly created catalog, or directly into your existing catalog +(after having made an ASCII copy as described above). Normally, you should +start with a freshly created catalog that contains no data. + +Starting with a single Volume named {\bf TestVolume1}, you run a command such +as: + +\footnotesize +\begin{verbatim} +./bscan -V TestVolume1 -v -s -m -c bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +If there is more than one volume, simply append it to the first one separating +it with a vertical bar. You may need to precede the vertical bar with a +forward slash escape the shell -- e.g. {\bf +TestVolume1\textbackslash{}|TestVolume2}. The {\bf -v} option was added for +verbose output (this can be omitted if desired). The {\bf -s} option that +tells {\bf bscan} to store information in the database. The physical device +name {\bf /dev/nst0} is specified after all the options. + +{\bf} For example, after having done a full backup of a directory, then two +incrementals, I reinitialized the SQLite database as described above, and +using the bootstrap.bsr file noted above, I entered the following command: + +\footnotesize +\begin{verbatim} +./bscan -b bootstrap.bsr -v -s -c bacula-sd.conf /dev/nst0 +\end{verbatim} +\normalsize + +which produced the following output: + +\footnotesize +\begin{verbatim} +bscan: bscan.c:182 Using Database: bacula, User: bacula +bscan: bscan.c:673 Created Pool record for Pool: Default +bscan: bscan.c:271 Pool type "Backup" is OK. +bscan: bscan.c:632 Created Media record for Volume: TestVolume1 +bscan: bscan.c:298 Media type "DDS-4" is OK. +bscan: bscan.c:307 VOL_LABEL: OK for Volume: TestVolume1 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=1 record for original JobId=2 +bscan: bscan.c:717 Created FileSet record "Kerns Files" +bscan: bscan.c:819 Updated Job termination record for new JobId=1 +bscan: bscan.c:905 Created JobMedia record JobId 1, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=2 record for original JobId=3 +bscan: bscan.c:708 Fileset "Kerns Files" already exists. +bscan: bscan.c:819 Updated Job termination record for new JobId=2 +bscan: bscan.c:905 Created JobMedia record JobId 2, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:693 Created Client record for Client: Rufus +bscan: bscan.c:769 Created new JobId=3 record for original JobId=4 +bscan: bscan.c:708 Fileset "Kerns Files" already exists. +bscan: bscan.c:819 Updated Job termination record for new JobId=3 +bscan: bscan.c:905 Created JobMedia record JobId 3, MediaId 1 +bscan: Got EOF on device /dev/nst0 +bscan: bscan.c:652 Updated Media record at end of Volume: TestVolume1 +bscan: bscan.c:428 End of Volume. VolFiles=3 VolBlocks=57 VolBytes=10,027,437 +\end{verbatim} +\normalsize + +The key points to note are that {\bf bscan} prints a line when each major +record is created. Due to the volume of output, it does not print a line for +each file record unless you supply the {\bf -v} option twice or more on the +command line. + +In the case of a Job record, the new JobId will not normally be the same as +the original Jobid. For example, for the first JobId above, the new JobId is +1, but the original JobId is 2. This is nothing to be concerned about as it is +the normal nature of databases. {\bf bscan} will keep everything straight. + +Although {\bf bscan} claims that it created a Client record for Client: Rufus +three times, it was actually only created the first time. This is normal. + +You will also notice that it read an end of file after each Job (Got EOF on +device ...). Finally the last line gives the total statistics for the bscan. + +If you had added a second {\bf -v} option to the command line, Bacula would +have been even more verbose, dumping virtually all the details of each Job +record it encountered. + +Now if you start Bacula and enter a {\bf list jobs} command to the console +program, you will get: + +\footnotesize +\begin{verbatim} ++-------+----------+------------------+------+-----+----------+----------+---------+ +| JobId | Name | StartTime | Type | Lvl | JobFiles | JobBytes | JobStat | ++-------+----------+------------------+------+-----+----------+----------+---------+ +| 1 | kernsave | 2002-10-07 14:59 | B | F | 84 | 4180207 | T | +| 2 | kernsave | 2002-10-07 15:00 | B | I | 15 | 2170314 | T | +| 3 | kernsave | 2002-10-07 15:01 | B | I | 33 | 3662184 | T | ++-------+----------+------------------+------+-----+----------+----------+---------+ +\end{verbatim} +\normalsize + +which corresponds virtually identically with what the database contained +before it was re-initialized and restored with bscan. All the Jobs and Files +found on the tape are restored including most of the Media record. The Volume +(Media) records restored will be marked as {\bf Full} so that they cannot be +rewritten without operator intervention. + +It should be noted that {\bf bscan} cannot restore a database to the exact +condition it was in previously because a lot of the less important information +contained in the database is not saved to the tape. Nevertheless, the +reconstruction is sufficiently complete, that you can run {\bf restore} +against it and get valid results. + +An interesting aspect of restoring a catalog backup using {\bf bscan} is +that the backup was made while Bacula was running and writing to a tape. At +the point the backup of the catalog is made, the tape Bacula is writing to +will have say 10 files on it, but after the catalog backup is made, there +will be 11 files on the tape Bacula is writing. This there is a difference +between what is contained in the backed up catalog and what is actually on +the tape. If after restoring a catalog, you attempt to write on the same +tape that was used to backup the catalog, Bacula will detect the difference +in the number of files registered in the catalog compared to what is on the +tape, and will mark the tape in error. + +There are two solutions to this problem. The first is possibly the simplest +and is to mark the volume as Used before doing any backups. The second is +to manually correct the number of files listed in the Media record of the +catalog. This procedure is documented elsewhere in the manual and involves +using the {\bf update volume} command in {\bf bconsole}. + +\subsection{Using bscan to Correct the Volume File Count} +\index[general]{Using bscan to Correct the Volume File Count} +\index[general]{Count!Using bscan to Correct the Volume File Count} + +If the Storage daemon crashes during a backup Job, the catalog will not be +properly updated for the Volume being used at the time of the crash. This +means that the Storage daemon will have written say 20 files on the tape, but +the catalog record for the Volume indicates only 19 files. + +Bacula refuses to write on a tape that contains a different number of files +from what is in the catalog. To correct this situation, you may run a {\bf +bscan} with the {\bf -m} option (but {\bf without} the {\bf -s} option) to +update only the final Media record for the Volumes read. + +\subsection{After bscan} +\index[general]{After bscan} +\index[general]{Bscan!After} + +If you use {\bf bscan} to enter the contents of the Volume into an existing +catalog, you should be aware that the records you entered may be immediately +pruned during the next job, particularly if the Volume is very old or had been +previously purged. To avoid this, after running {\bf bscan}, you can manually +set the volume status (VolStatus) to {\bf Read-Only} by using the {\bf update} +command in the catalog. This will allow you to restore from the volume without +having it immediately purged. When you have restored and backed up the data, +you can reset the VolStatus to {\bf Used} and the Volume will be purged from +the catalog. + +\section{bcopy} +\label{bcopy} +\index[general]{Bcopy} +\index[general]{program!bcopy} + +The {\bf bcopy} program can be used to copy one {\bf Bacula} archive file to +another. For example, you may copy a tape to a file, a file to a tape, a file +to a file, or a tape to a tape. For tape to tape, you will need two tape +drives. (a later version is planned that will buffer it to disk). In the +process of making the copy, no record of the information written to the new +Volume is stored in the catalog. This means that the new Volume, though it +contains valid backup data, cannot be accessed directly from existing catalog +entries. If you wish to be able to use the Volume with the Console restore +command, for example, you must first bscan the new Volume into the catalog. + +\subsection{bcopy Command Options} +\index[general]{Options!bcopy Command} +\index[general]{Bcopy Command Options} + +\footnotesize +\begin{verbatim} +Usage: bcopy [-d debug_level] + -b bootstrap specify a bootstrap file + -c specify configuration file + -dnn set debug level to nn + -i specify input Volume names (separated by |) + -o specify output Volume names (separated by |) + -p proceed inspite of I/O errors + -v verbose + -w dir specify working directory (default /tmp) + -? print this message +\end{verbatim} +\normalsize + +By using a {\bf bootstrap} file, you can copy parts of a Bacula archive file +to another archive. + +One of the objectives of this program is to be able to recover as much data as +possible from a damaged tape. However, the current version does not yet have +this feature. + +As this is a new program, any feedback on its use would be appreciated. In +addition, I only have a single tape drive, so I have never been able to test +this program with two tape drives. + +\section{btape} +\label{btape} +\index[general]{Btape} +\index[general]{program!btape} + +This program permits a number of elementary tape operations via a tty command +interface. It works only with tapes and not with other kinds of Bacula +storage media (DVD, File, ...). The {\bf test} command, described below, +can be very useful for testing older tape drive compatibility problems. +Aside from initial testing of tape drive compatibility with {\bf Bacula}, +{\bf btape} will be mostly used by developers writing new tape drivers. + +{\bf btape} can be dangerous to use with existing {\bf Bacula} tapes because +it will relabel a tape or write on the tape if so requested regardless that +the tape may contain valuable data, so please be careful and use it only on +blank tapes. + +To work properly, {\bf btape} needs to read the Storage daemon's configuration +file. As a default, it will look for {\bf bacula-sd.conf} in the current +directory. If your configuration file is elsewhere, please use the {\bf -c} +option to specify where. + +The physical device name must be specified on the command line, and this +same device name must be present in the Storage daemon's configuration file +read by {\bf btape} + +\footnotesize +\begin{verbatim} +Usage: btape + -b specify bootstrap file + -c set configuration file to file + -d set debug level to nn + -p proceed inspite of I/O errors + -s turn off signals + -v be verbose + -? print this message. +\end{verbatim} +\normalsize + +\subsection{Using btape to Verify your Tape Drive} +\index[general]{Using btape to Verify your Tape Drive} +\index[general]{Drive!Using btape to Verify your Tape} + +An important reason for this program is to ensure that a Storage daemon +configuration file is defined so that Bacula will correctly read and write +tapes. + +It is highly recommended that you run the {\bf test} command before running +your first Bacula job to ensure that the parameters you have defined for your +storage device (tape drive) will permit {\bf Bacula} to function properly. You +only need to mount a blank tape, enter the command, and the output should be +reasonably self explanatory. Please see the +\ilink{Tape Testing}{TapeTestingChapter} Chapter of this manual for +the details. + +\subsection{btape Commands} +\index[general]{Btape Commands} +\index[general]{Commands!btape} + +The full list of commands are: + +\footnotesize +\begin{verbatim} + Command Description + ======= =========== + autochanger test autochanger + bsf backspace file + bsr backspace record + cap list device capabilities + clear clear tape errors + eod go to end of Bacula data for append + eom go to the physical end of medium + fill fill tape, write onto second volume + unfill read filled tape + fsf forward space a file + fsr forward space a record + help print this command + label write a Bacula label to the tape + load load a tape + quit quit btape + rawfill use write() to fill tape + readlabel read and print the Bacula tape label + rectest test record handling functions + rewind rewind the tape + scan read() tape block by block to EOT and report + scanblocks Bacula read block by block to EOT and report + status print tape status + test General test Bacula tape functions + weof write an EOF on the tape + wr write a single Bacula block + rr read a single record + qfill quick fill command +\end{verbatim} +\normalsize + +The most useful commands are: + +\begin{itemize} +\item test -- test writing records and EOF marks and reading them back. +\item fill -- completely fill a volume with records, then write a few records + on a second volume, and finally, both volumes will be read back. + This command writes blocks containing random data, so your drive will + not be able to compress the data, and thus it is a good test of + the real physical capacity of your tapes. +\item readlabel -- read and dump the label on a Bacula tape. +\item cap -- list the device capabilities as defined in the configuration + file and as perceived by the Storage daemon. + \end{itemize} + +The {\bf readlabel} command can be used to display the details of a Bacula +tape label. This can be useful if the physical tape label was lost or damaged. + + +In the event that you want to relabel a {\bf Bacula}, you can simply use the +{\bf label} command which will write over any existing label. However, please +note for labeling tapes, we recommend that you use the {\bf label} command in +the {\bf Console} program since it will never overwrite a valid Bacula tape. + +\section{Other Programs} +\index[general]{Programs!Other} +\index[general]{Other Programs} + +The following programs are general utility programs and in general do not need +a configuration file nor a device name. + +\section{bsmtp} +\label{bsmtp} +\index[general]{Bsmtp} +\index[general]{program!bsmtp} + +{\bf bsmtp} is a simple mail transport program that permits more flexibility +than the standard mail programs typically found on Unix systems. It can even +be used on Windows machines. + +It is called: + +\footnotesize +\begin{verbatim} +Usage: bsmtp [-f from] [-h mailhost] [-s subject] [-c copy] [recipient ...] + -c set the Cc: field + -dnn set debug level to nn + -f set the From: field + -h use mailhost:port as the bsmtp server + -l limit the lines accepted to nn + -s set the Subject: field + -? print this message. +\end{verbatim} +\normalsize + +If the {\bf -f} option is not specified, {\bf bsmtp} will use your userid. If +the option {\bf -h} is not specified {\bf bsmtp} will use the value in the environment +variable {\bf bsmtpSERVER} or if there is none {\bf localhost}. By default +port 25 is used. + +If a line count limit is set with the {\bf -l} option, {\bf bsmtp} will +not send an email with a body text exceeding that number of lines. This +is especially useful for large restore job reports where the list of +files restored might produce very long mails your mail-server would +refuse or crash. However, be aware that you will probably suppress the +job report and any error messages unless you check the log file written +by the Director (see the messages resource in this manual for details). + + +{\bf recipients} is a space separated list of email recipients. + +The body of the email message is read from standard input. + +An example of the use of {\bf bsmtp} would be to put the following statement +in the {\bf Messages} resource of your {\bf bacula-dir.conf} file. Note, these +commands should appear on a single line each. + +\footnotesize +\begin{verbatim} + mailcommand = "/home/bacula/bin/bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: %t %e of %c %l\" %r" + operatorcommand = "/home/bacula/bin/bsmtp -h mail.domain.com -f \"\(Bacula\) %r\" + -s \"Bacula: Intervention needed for %j\" %r" +\end{verbatim} +\normalsize + +Where you replace {\bf /home/bacula/bin} with the path to your {\bf Bacula} +binary directory, and you replace {\bf mail.domain.com} with the fully +qualified name of your bsmtp (email) server, which normally listens on port +25. For more details on the substitution characters (e.g. \%r) used in the +above line, please see the documentation of the +\ilink{ MailCommand in the Messages Resource}{mailcommand} +chapter of this manual. + +It is HIGHLY recommended that you test one or two cases by hand to make sure +that the {\bf mailhost} that you specified is correct and that it will accept +your email requests. Since {\bf bsmtp} always uses a TCP connection rather +than writing in the spool file, you may find that your {\bf from} address is +being rejected because it does not contain a valid domain, or because your +message is caught in your spam filtering rules. Generally, you should specify +a fully qualified domain name in the {\bf from} field, and depending on +whether your bsmtp gateway is Exim or Sendmail, you may need to modify the +syntax of the from part of the message. Please test. + +When running {\bf bsmtp} by hand, you will need to terminate the message by +entering a ctl-d in column 1 of the last line. +% TODO: is "column" the correct terminology for this? + +If you are getting incorrect dates (e.g. 1970) and you are +running with a non-English language setting, you might try adding +a LANG=''en\_US'' immediately before the bsmtp call. + +\section{dbcheck} +\label{dbcheck} +\index[general]{Dbcheck} +\index[general]{program!dbcheck} +{\bf dbcheck} is a simple program that will search for logical +inconsistencies in the Bacula tables in your database, and optionally fix them. +It is a database maintenance routine, in the sense that it can +detect and remove unused rows, but it is not a database repair +routine. To repair a database, see the tools furnished by the +database vendor. Normally dbcheck should never need to be run, +but if Bacula has crashed or you have a lot of Clients, Pools, or +Jobs that you have removed, it could be useful. + +The {\bf dbcheck} program can be found in +the {\bf \lt{}bacula-source\gt{}/src/tools} directory of the source +distribution. Though it is built with the make process, it is not normally +"installed". + +It is called: + +\footnotesize +\begin{verbatim} +Usage: dbcheck [-c config] [-C catalog name] [-d debug_level] + [] + -b batch mode + -C catalog name in the director conf file + -c director conf filename + -dnn set debug level to nn + -f fix inconsistencies + -v verbose + -? print this message +\end{verbatim} +\normalsize + +If the {\bf -c} option is given with the Director's conf file, there is no +need to enter any of the command line arguments, in particular the working +directory as dbcheck will read them from the file. + +If the {\bf -f} option is specified, {\bf dbcheck} will repair ({\bf fix}) the +inconsistencies it finds. Otherwise, it will report only. + +If the {\bf -b} option is specified, {\bf dbcheck} will run in batch mode, and +it will proceed to examine and fix (if -f is set) all programmed inconsistency +checks. If the {\bf -b} option is not specified, {\bf dbcheck} will enter +interactive mode and prompt with the following: + +\footnotesize +\begin{verbatim} +Hello, this is the database check/correct program. +Please select the function you want to perform. + 1) Toggle modify database flag + 2) Toggle verbose flag + 3) Repair bad Filename records + 4) Repair bad Path records + 5) Eliminate duplicate Filename records + 6) Eliminate duplicate Path records + 7) Eliminate orphaned Jobmedia records + 8) Eliminate orphaned File records + 9) Eliminate orphaned Path records + 10) Eliminate orphaned Filename records + 11) Eliminate orphaned FileSet records + 12) Eliminate orphaned Client records + 13) Eliminate orphaned Job records + 14) Eliminate all Admin records + 15) Eliminate all Restore records + 16) All (3-15) + 17) Quit +Select function number: +\end{verbatim} +\normalsize + +By entering 1 or 2, you can toggle the modify database flag (-f option) and +the verbose flag (-v). It can be helpful and reassuring to turn off the modify +database flag, then select one or more of the consistency checks (items 3 +through 9) to see what will be done, then toggle the modify flag on and re-run +the check. + +The inconsistencies examined are the following: + +\begin{itemize} +\item Duplicate filename records. This can happen if you accidentally run two + copies of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an inconsistent + database. If this is the case, you will receive error messages during Jobs + warning of duplicate database records. If you are not getting these error + messages, there is no reason to run this check. +\item Repair bad Filename records. This checks and corrects filenames that + have a trailing slash. They should not. +\item Repair bad Path records. This checks and corrects path names that do + not have a trailing slash. They should. +\item Duplicate path records. This can happen if you accidentally run two + copies of Bacula at the same time, and they are both adding filenames + simultaneously. It is a rare occurrence, but will create an inconsistent + database. See the item above for why this occurs and how you know it is + happening. +\item Orphaned JobMedia records. This happens when a Job record is deleted + (perhaps by a user issued SQL statement), but the corresponding JobMedia + record (one for each Volume used in the Job) was not deleted. Normally, this + should not happen, and even if it does, these records generally do not take + much space in your database. However, by running this check, you can + eliminate any such orphans. +\item Orphaned File records. This happens when a Job record is deleted + (perhaps by a user issued SQL statement), but the corresponding File record + (one for each Volume used in the Job) was not deleted. Note, searching for + these records can be {\bf very} time consuming (i.e. it may take hours) for a + large database. Normally this should not happen as Bacula takes care to + prevent it. Just the same, this check can remove any orphaned File records. + It is recommended that you run this once a year since orphaned File records + can take a large amount of space in your database. You might + want to ensure that you have indexes on JobId, FilenameId, and + PathId for the File table in your catalog before running this + command. +\item Orphaned Path records. This condition happens any time a directory is + deleted from your system and all associated Job records have been purged. + During standard purging (or pruning) of Job records, Bacula does not check + for orphaned Path records. As a consequence, over a period of time, old + unused Path records will tend to accumulate and use space in your database. + This check will eliminate them. It is recommended that you run this + check at least once a year. +\item Orphaned Filename records. This condition happens any time a file is + deleted from your system and all associated Job records have been purged. + This can happen quite frequently as there are quite a large number of files + that are created and then deleted. In addition, if you do a system update or + delete an entire directory, there can be a very large number of Filename + records that remain in the catalog but are no longer used. + + During standard purging (or pruning) of Job records, Bacula does not check + for orphaned Filename records. As a consequence, over a period of time, old + unused Filename records will accumulate and use space in your database. This + check will eliminate them. It is strongly recommended that you run this check + at least once a year, and for large database (more than 200 Megabytes), it is + probably better to run this once every 6 months. +\item Orphaned Client records. These records can remain in the database long + after you have removed a client. +\item Orphaned Job records. If no client is defined for a job or you do not + run a job for a long time, you can accumulate old job records. This option + allow you to remove jobs that are not attached to any client (and thus + useless). +\item All Admin records. This command will remove all Admin records, + regardless of their age. +\item All Restore records. This command will remove all Restore records, + regardless of their age. +\end{itemize} + +By the way, I personally run dbcheck only where I have messed up +my database due to a bug in developing Bacula code, so normally +you should never need to run dbcheck in spite of the +recommendations given above, which are given so that users don't +waste their time running dbcheck too often. + +\section{bregex} +\label{bregex} +\index[general]{bregex} +\index[general]{program!bregex} + +{\bf bregex} is a simple program that will allow you to test +regular expressions against a file of data. This can be useful +because the regex libraries on most systems differ, and in +addition, regex expressions can be complicated. + +{\bf bregex} is found in the src/tools directory and it is +normally installed with your system binaries. To run it, use: + +\begin{verbatim} +Usage: bregex [-d debug_level] -f + -f specify file of data to be matched + -l suppress line numbers + -n print lines that do not match + -? print this message. +\end{verbatim} + +The \lt{}data-file\gt{} is a filename that contains lines +of data to be matched (or not) against one or more patterns. +When the program is run, it will prompt you for a regular +expression pattern, then apply it one line at a time against +the data in the file. Each line that matches will be printed +preceded by its line number. You will then be prompted again +for another pattern. + +Enter an empty line for a pattern to terminate the program. You +can print only lines that do not match by using the -n option, +and you can suppress printing of line numbers with the -l option. + +This program can be useful for testing regex expressions to be +applied against a list of filenames. + +\section{bwild} +\label{bwild} +\index[general]{bwild} +\index[general]{program!bwild} + +{\bf bwild} is a simple program that will allow you to test +wild-card expressions against a file of data. + +{\bf bwild} is found in the src/tools directory and it is +normally installed with your system binaries. To run it, use: + +\begin{verbatim} +Usage: bwild [-d debug_level] -f + -f specify file of data to be matched + -l suppress line numbers + -n print lines that do not match + -? print this message. +\end{verbatim} + +The \lt{}data-file\gt{} is a filename that contains lines +of data to be matched (or not) against one or more patterns. +When the program is run, it will prompt you for a wild-card +pattern, then apply it one line at a time against +the data in the file. Each line that matches will be printed +preceded by its line number. You will then be prompted again +for another pattern. + +Enter an empty line for a pattern to terminate the program. You +can print only lines that do not match by using the -n option, +and you can suppress printing of line numbers with the -l option. + +This program can be useful for testing wild expressions to be +applied against a list of filenames. + +\section{testfind} +\label{testfind} +\index[general]{Testfind} +\index[general]{program!testfind} + +{\bf testfind} permits listing of files using the same search engine that is +used for the {\bf Include} resource in Job resources. Note, much of the +functionality of this program (listing of files to be included) is present in +the +\ilink{estimate command}{estimate} in the Console program. + +The original use of testfind was to ensure that Bacula's file search engine +was correct and to print some statistics on file name and path length. +However, you may find it useful to see what bacula would do with a given {\bf +Include} resource. The {\bf testfind} program can be found in the {\bf +\lt{}bacula-source\gt{}/src/tools} directory of the source distribution. +Though it is built with the make process, it is not normally "installed". + +It is called: + +\footnotesize +\begin{verbatim} +Usage: testfind [-d debug_level] [-] [pattern1 ...] + -a print extended attributes (Win32 debug) + -dnn set debug level to nn + - read pattern(s) from stdin + -? print this message. +Patterns are used for file inclusion -- normally directories. +Debug level>= 1 prints each file found. +Debug level>= 10 prints path/file for catalog. +Errors are always printed. +Files/paths truncated is a number with len> 255. +Truncation is only in the catalog. +\end{verbatim} +\normalsize + +Where a pattern is any filename specification that is valid within an {\bf +Include} resource definition. If none is specified, {\bf /} (the root +directory) is assumed. For example: + +\footnotesize +\begin{verbatim} +./testfind /bin +\end{verbatim} +\normalsize + +Would print the following: + +\footnotesize +\begin{verbatim} +Dir: /bin +Reg: /bin/bash +Lnk: /bin/bash2 -> bash +Lnk: /bin/sh -> bash +Reg: /bin/cpio +Reg: /bin/ed +Lnk: /bin/red -> ed +Reg: /bin/chgrp +... +Reg: /bin/ipcalc +Reg: /bin/usleep +Reg: /bin/aumix-minimal +Reg: /bin/mt +Lnka: /bin/gawk-3.1.0 -> /bin/gawk +Reg: /bin/pgawk +Total files : 85 +Max file length: 13 +Max path length: 5 +Files truncated: 0 +Paths truncated: 0 +\end{verbatim} +\normalsize + +Even though {\bf testfind} uses the same search engine as {\bf Bacula}, each +directory to be listed, must be entered as a separate command line entry or +entered one line at a time to standard input if the {\bf -} option was +specified. + +Specifying a debug level of one (i.e. {\bf -d1}) on the command line will +cause {\bf testfind} to print the raw filenames without showing the Bacula +internal file type, or the link (if any). Debug levels of 10 or greater cause +the filename and the path to be separated using the same algorithm that is +used when putting filenames into the Catalog database. diff --git a/docs/manuals/en/utility/rpm-faq.tex b/docs/manuals/en/utility/rpm-faq.tex new file mode 100644 index 00000000..1e37cc59 --- /dev/null +++ b/docs/manuals/en/utility/rpm-faq.tex @@ -0,0 +1,394 @@ +%% +%% + +\chapter{Bacula RPM Packaging FAQ} +\label{RpmFaqChapter} +\index[general]{FAQ!Bacula\textsuperscript{\textregistered} - RPM Packaging } +\index[general]{Bacula\textsuperscript{\textregistered} - RPM Packaging FAQ } + +\begin{enumerate} +\item + \ilink{How do I build Bacula for platform xxx?}{faq1} +\item + \ilink{How do I control which database support gets built?}{faq2} + +\item + \ilink{What other defines are used?}{faq3} +\item + \ilink{I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?}{faq4} +\item + \ilink{I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called + /usr/afsws/bin/pagsh.}{faq5} +\item + \ilink{I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?}{faq6} +\item + \ilink{Is there an easier way than sorting out all these command line options?}{faq7} +\item + \ilink{I just upgraded from 1.36.x to 1.38.x and now my director daemon won't start. It appears to start but dies silently and I get a "connection refused" error when starting the console. What is wrong?}{faq8} +\item + \ilink{There are a lot of rpm packages. Which packages do I need for what?}{faq9} +\end{enumerate} + +\section{Answers} +\index[general]{Answers } + +\begin{enumerate} +\item + \label{faq1} + {\bf How do I build Bacula for platform xxx?} + The bacula spec file contains defines to build for several platforms: + Red Hat 7.x (rh7), Red Hat 8.0 (rh8), Red Hat 9 (rh9), Fedora Core (fc1, + fc3, fc4, fc5, fc6, fc7), Whitebox Enterprise Linux 3.0 (wb3), Red Hat Enterprise Linux + (rhel3, rhel4, rhel5), Mandrake 10.x (mdk), Mandriva 2006.x (mdv) CentOS (centos3, centos4, centos5) + Scientific Linux (sl3, sl4, sl5) and SuSE (su9, su10, su102, su103). The package build is controlled by a mandatory define set at the beginning of the file. These defines basically just control the dependency information that gets coded into the finished rpm package as well + as any special configure options required. The platform define may be edited + in the spec file directly (by default all defines are set to 0 or "not set"). + For example, to build the Red Hat 7.x package find the line in the spec file + which reads + +\footnotesize +\begin{verbatim} + %define rh7 0 + +\end{verbatim} +\normalsize + +and edit it to read + +\footnotesize +\begin{verbatim} + %define rh7 1 + +\end{verbatim} +\normalsize + +Alternately you may pass the define on the command line when calling rpmbuild: + + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" bacula.spec + rpmbuild --rebuild --define build_rh7 1" bacula-x.x.x-x.src.rpm + +\end{verbatim} +\normalsize + +\item + \label{faq2} + {\bf How do I control which database support gets built?} + Another mandatory build define controls which database support is compiled, + one of build\_sqlite, build\_mysql or build\_postgresql. To get the MySQL + package and support either set the + +\footnotesize +\begin{verbatim} + %define mysql 0 + OR + %define mysql4 0 + OR + %define mysql5 0 + +\end{verbatim} +\normalsize + +to + +\footnotesize +\begin{verbatim} + %define mysql 1 + OR + %define mysql4 1 + OR + %define mysql5 1 + +\end{verbatim} +\normalsize + +in the spec file directly or pass it to rpmbuild on the command line: + +\footnotesize +\begin{verbatim} + rpmbuild -ba --define "build_rh7 1" --define "build_mysql 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql4 1" bacula.spec + rpmbuild -ba --define "build_rh7 1" --define "build_mysql5 1" bacula.spec + +\end{verbatim} +\normalsize + +\item + \label{faq3} + {\bf What other defines are used?} + Three other building defines of note are the depkgs\_version, docs\_version and + \_rescuever identifiers. These two defines are set with each release and must + match the version of those sources that are being used to build the packages. + You would not ordinarily need to edit these. See also the Build Options section + below for other build time options that can be passed on the command line. +\item + \label{faq4} + {\bf I'm getting errors about not having permission when I try to build the + packages. Do I need to be root?} + No, you do not need to be root and, in fact, it is better practice to + build rpm packages as a non-root user. Bacula packages are designed to + be built by a regular user but you must make a few changes on your + system to do this. If you are building on your own system then the + simplest method is to add write permissions for all to the build + directory (/usr/src/redhat/, /usr/src/RPM or /usr/src/packages). + To accomplish this, execute the following command as root: + +\footnotesize +\begin{verbatim} + chmod -R 777 /usr/src/redhat + chmod -R 777 /usr/src/RPM + chmod -R 777 /usr/src/packages + +\end{verbatim} +\normalsize + +If you are working on a shared system where you can not use the method +above then you need to recreate the appropriate above directory tree with all +of its subdirectories inside your home directory. Then create a file named + +{\tt .rpmmacros} + +in your home directory (or edit the file if it already exists) +and add the following line: + +\footnotesize +\begin{verbatim} + %_topdir /home/myuser/redhat + +\end{verbatim} +\normalsize + +Another handy directive for the .rpmmacros file if you wish to suppress the +creation of debug rpm packages is: + +\footnotesize +\begin{verbatim} + %debug_package %{nil} + +\end{verbatim} + +\normalsize + +\item + \label{faq5} + {\bf I'm building my own rpms but on all platforms and compiles I get an + unresolved dependency for something called /usr/afsws/bin/pagsh.} This + is a shell from the OpenAFS (Andrew File System). If you are seeing + this then you chose to include the docs/examples directory in your + package. One of the example scripts in this directory is a pagsh + script. Rpmbuild, when scanning for dependencies, looks at the shebang + line of all packaged scripts in addition to checking shared libraries. + To avoid this do not package the examples directory. If you are seeing this + problem you are building a very old bacula package as the examples have been + removed from the doc packaging. + +\item + \label{faq6} + {\bf I'm building my own rpms because you don't publish for my platform. + Can I get my packages released to sourceforge for other people to use?} Yes, + contributions from users are accepted and appreciated. Please examine the + directory platforms/contrib-rpm in the source code for further information. + +\item + \label{faq7} + {\bf Is there an easier way than sorting out all these command line options?} Yes, + there is a gui wizard shell script which you can use to rebuild the src rpm package. + Look in the source archive for platforms/contrib-rpm/rpm\_wizard.sh. This script will + allow you to specify build options using GNOME dialog screens. It requires zenity. + +\item + \label{faq8} + {\bf I just upgraded from 1.36.x to 1.38.x and now my director daemon +won't start. It appears to start but dies silently and I get a "connection +refused" error when starting the console. What is wrong?} Beginning with +1.38 the rpm packages are configured to run the director and storage +daemons as a non-root user. The file daemon runs as user root and group +bacula, the storage daemon as user bacula and group disk, and the director +as user bacula and group bacula. If you are upgrading you will need to +change some file permissions for things to work. Execute the following +commands as root: + +\footnotesize +\begin{verbatim} + chown bacula.bacula /var/bacula/* + chown root.bacula /var/bacula/bacula-fd.9102.state + chown bacula.disk /var/bacula/bacula-sd.9103.state + +\end{verbatim} +\normalsize + +Further, if you are using File storage volumes rather than tapes those +files will also need to have ownership set to user bacula and group bacula. + +\item + \label{faq9} + {\bf There are a lot of rpm packages. Which packages do I need for +what?} For a bacula server you need to select the packsge based upon your +preferred catalog database: one of bacula-mysql, bacula-postgresql or +bacula-sqlite. If your system does not provide an mtx package you also +need bacula-mtx to satisfy that dependancy. For a client machine you need +only install bacula-client. Optionally, for either server or client +machines, you may install a graphical console bacula-gconsole and/or +bacula-wxconsole. The Bacula Administration Tool is installed with the +bacula-bat package. One last package, bacula-updatedb is required only when +upgrading a server more than one database revision level. + + + +\item {\bf Support for RHEL3/4/5, CentOS 3/4/5, Scientific Linux 3/4/5 and x86\_64} + The examples below show + explicit build support for RHEL4 and CentOS 4. Build support + for x86\_64 has also been added. +\end{enumerate} + +\footnotesize +\begin{verbatim} +Build with one of these 3 commands: + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_sqlite 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_postgresql 1" \ + bacula-1.38.3-1.src.rpm + +rpmbuild --rebuild \ + --define "build_rhel4 1" \ + --define "build_mysql4 1" \ + bacula-1.38.3-1.src.rpm + +For CentOS substitute '--define "build_centos4 1"' in place of rhel4. +For Scientific Linux substitute '--define "build_sl4 1"' in place of rhel4. + +For 64 bit support add '--define "build_x86_64 1"' +\end{verbatim} +\normalsize + +\section{Build Options} +\index[general]{Build Options} +The spec file currently supports building on the following platforms: +\footnotesize +\begin{verbatim} +Red Hat builds +--define "build_rh7 1" +--define "build_rh8 1" +--define "build_rh9 1" + +Fedora Core build +--define "build_fc1 1" +--define "build_fc3 1" +--define "build_fc4 1" +--define "build_fc5 1" +--define "build_fc6 1" +--define "build_fc7 1" + +Whitebox Enterprise build +--define "build_wb3 1" + +Red Hat Enterprise builds +--define "build_rhel3 1" +--define "build_rhel4 1" +--define "build_rhel5 1" + +CentOS build +--define "build_centos3 1" +--define "build_centos4 1" +--define "build_centos5 1" + +Scientific Linux build +--define "build_sl3 1" +--define "build_sl4 1" +--define "build_sl5 1" + +SuSE build +--define "build_su9 1" +--define "build_su10 1" +--define "build_su102 1" +--define "build_su103 1" + +Mandrake 10.x build +--define "build_mdk 1" + +Mandriva build +--define "build_mdv 1" + +MySQL support: +for mysql 3.23.x support define this +--define "build_mysql 1" +if using mysql 4.x define this, +currently: Mandrake 10.x, Mandriva 2006.0, SuSE 9.x & 10.0, FC4 & RHEL4 +--define "build_mysql4 1" +if using mysql 5.x define this, +currently: SuSE 10.1 & FC5 +--define "build_mysql5 1" + +PostgreSQL support: +--define "build_postgresql 1" + +Sqlite support: +--define "build_sqlite 1" + +Build the client rpm only in place of one of the above database full builds: +--define "build_client_only 1" + +X86-64 support: +--define "build_x86_64 1" + +Supress build of bgnome-console: +--define "nobuild_gconsole 1" + +Build the WXWindows console: +requires wxGTK >= 2.6 +--define "build_wxconsole 1" + +Build the Bacula Administration Tool: +requires QT >= 4.2 +--define "build_bat 1" + +Build python scripting support: +--define "build_python 1" + +Modify the Packager tag for third party packages: +--define "contrib_packager Your Name " + +\end{verbatim} +\normalsize + +\section{RPM Install Problems} +\index[general]{RPM Install Problems} +In general the RPMs, once properly built should install correctly. +However, when attempting to run the daemons, a number of problems +can occur: +\begin{itemize} +\item [Wrong /var/bacula Permissions] + By default, the Director and Storage daemon do not run with + root permission. If the /var/bacula is owned by root, then it + is possible that the Director and the Storage daemon will not + be able to access this directory, which is used as the Working + Directory. To fix this, the easiest thing to do is: +\begin{verbatim} + chown bacula:bacula /var/bacula +\end{verbatim} + Note: as of 1.38.8 /var/bacula is installed root:bacula with + permissions 770. +\item [The Storage daemon cannot Access the Tape drive] + This can happen in some older RPM releases where the Storage + daemon ran under userid bacula, group bacula. There are two + ways of fixing this: the best is to modify the /etc/init.d/bacula-sd + file so that it starts the Storage daemon with group "disk". + The second way to fix the problem is to change the permissions + of your tape drive (usually /dev/nst0) so that Bacula can access it. + You will probably need to change the permissions of the SCSI control + device as well, which is usually /dev/sg0. The exact names depend + on your configuration, please see the Tape Testing chapter for + more information on devices. +\end{itemize} + diff --git a/docs/manuals/en/utility/setup.sm b/docs/manuals/en/utility/setup.sm new file mode 100644 index 00000000..7c88dc61 --- /dev/null +++ b/docs/manuals/en/utility/setup.sm @@ -0,0 +1,23 @@ +/* + * html2latex + */ + +available { + sun4_sunos.4 + sun4_solaris.2 + rs_aix.3 + rs_aix.4 + sgi_irix +} + +description { + From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX +} + +install { + bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex + bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag + bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag + bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag + man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1 +} diff --git a/docs/manuals/en/utility/translate_images.pl b/docs/manuals/en/utility/translate_images.pl new file mode 100755 index 00000000..c7225118 --- /dev/null +++ b/docs/manuals/en/utility/translate_images.pl @@ -0,0 +1,185 @@ +#!/usr/bin/perl -w +# +use strict; + +# Used to change the names of the image files generated by latex2html from imgxx.png +# to meaningful names. Provision is made to go either from or to the meaningful names. +# The meaningful names are obtained from a file called imagename_translations, which +# is generated by extensions to latex2html in the make_image_file subroutine in +# bacula.perl. + +# Opens the file imagename_translations and reads the contents into a hash. +# The hash is creaed with the imgxx.png files as the key if processing TO +# meaningful filenames, and with the meaningful filenames as the key if +# processing FROM meaningful filenames. +# Then opens the html file(s) indicated in the command-line arguments and +# changes all image references according to the translations described in the +# above file. Finally, it renames the image files. +# +# Original creation: 3-27-05 by Karl Cunningham. +# Modified 5-21-05 to go FROM and TO meaningful filenames. +# +my $TRANSFILE = "imagename_translations"; +my $path; + +# Loads the contents of $TRANSFILE file into the hash referenced in the first +# argument. The hash is loaded to translate old to new if $direction is 0, +# otherwise it is loaded to translate new to old. In this context, the +# 'old' filename is the meaningful name, and the 'new' filename is the +# imgxx.png filename. It is assumed that the old image is the one that +# latex2html has used as the source to create the imgxx.png filename. +# The filename extension is taken from the file +sub read_transfile { + my ($trans,$direction) = @_; + + if (!open IN,"<$path$TRANSFILE") { + print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n"; + print " Image filename translation aborted\n\n"; + exit 0; + } + + while () { + chomp; + my ($new,$old) = split(/\001/); + + # Old filenames will usually have a leading ./ which we don't need. + $old =~ s/^\.\///; + + # The filename extension of the old filename must be made to match + # the new filename because it indicates the encoding format of the image. + my ($ext) = $new =~ /(\.[^\.]*)$/; + $old =~ s/\.[^\.]*$/$ext/; + if ($direction == 0) { + $trans->{$new} = $old; + } else { + $trans->{$old} = $new; + } + } + close IN; +} + +# Translates the image names in the file given as the first argument, according to +# the translations in the hash that is given as the second argument. +# The file contents are read in entirely into a string, the string is processed, and +# the file contents are then written. No particular care is taken to ensure that the +# file is not lost if a system failure occurs at an inopportune time. It is assumed +# that the html files being processed here can be recreated on demand. +# +# Links to other files are added to the %filelist for processing. That way, +# all linked files will be processed (assuming they are local). +sub translate_html { + my ($filename,$trans,$filelist) = @_; + my ($contents,$out,$this,$img,$dest); + my $cnt = 0; + + # If the filename is an external link ignore it. And drop any file:// from + # the filename. + $filename =~ /^(http|ftp|mailto)\:/ and return 0; + $filename =~ s/^file\:\/\///; + # Load the contents of the html file. + if (!open IF,"<$path$filename") { + print "WARNING: Cannot open $path$filename for reading\n"; + print " Image Filename Translation aborted\n\n"; + exit 0; + } + + while () { + $contents .= $_; + } + close IF; + + # Now do the translation... + # First, search for an image filename. + while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) { + $contents = $'; + $out .= $` . $&; + + # The next thing is an image name. Get it and translate it. + $contents =~ /^(.*?)\"/s; + $contents = $'; + $this = $&; + $img = $1; + # If the image is in our list of ones to be translated, do it + # and feed the result to the output. + $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img})); + $out .= $this; + } + $out .= $contents; + + # Now send the translated text to the html file, overwriting what's there. + open OF,">$path$filename" or die "Cannot open $path$filename for writing\n"; + print OF $out; + close OF; + + # Now look for any links to other files and add them to the list of files to do. + while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) { + $out = $'; + $dest = $1; + # Drop an # and anything after it. + $dest =~ s/\#.*//; + $filelist->{$dest} = '' if $dest; + } + return $cnt; +} + +# REnames the image files spefified in the %translate hash. +sub rename_images { + my $translate = shift; + my ($response); + + foreach (keys(%$translate)) { + if (! $translate->{$_}) { + print " WARNING: No destination Filename for $_\n"; + } else { + $response = `mv -f $path$_ $path$translate->{$_} 2>&1`; + $response and print "ERROR from system $response\n"; + } + } +} + +################################################# +############# MAIN ############################# +################################################ + +# %filelist starts out with keys from the @ARGV list. As files are processed, +# any links to other files are added to the %filelist. A hash of processed +# files is kept so we don't do any twice. + +# The first argument must be either --to_meaningful_names or --from_meaningful_names + +my (%translate,$search_regex,%filelist,%completed,$thisfile); +my ($cnt,$direction); + +my $arg0 = shift(@ARGV); +$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or + die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n"; + +$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1; + +(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n"; + +# Use the first argument to get the path to the file of translations. +my $tmp = $ARGV[0]; +($path) = $tmp =~ /(.*\/)/; +$path = '' unless $path; + +read_transfile(\%translate,$direction); + +foreach (@ARGV) { + # Strip the path from the filename, and use it later on. + if (s/(.*\/)//) { + $path = $1; + } else { + $path = ''; + } + $filelist{$_} = ''; + + while ($thisfile = (keys(%filelist))[0]) { + $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile})); + delete($filelist{$thisfile}); + $completed{$thisfile} = ''; + } + print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n"; +} + +rename_images(\%translate); diff --git a/docs/manuals/en/utility/update_version b/docs/manuals/en/utility/update_version new file mode 100755 index 00000000..5c2e0092 --- /dev/null +++ b/docs/manuals/en/utility/update_version @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/utility/update_version.in b/docs/manuals/en/utility/update_version.in new file mode 100644 index 00000000..2766245f --- /dev/null +++ b/docs/manuals/en/utility/update_version.in @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Script file to update the Bacula version +# +out=/tmp/$$ +VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' @bacula@/src/version.h` +. ./do_echo +sed -f ${out} version.tex.in >version.tex +rm -f ${out} diff --git a/docs/manuals/en/utility/utility.tex b/docs/manuals/en/utility/utility.tex new file mode 100644 index 00000000..2efa5cde --- /dev/null +++ b/docs/manuals/en/utility/utility.tex @@ -0,0 +1,79 @@ +%% +%% +%% The following characters must be preceded by a backslash +%% to be entered as printable characters: +%% +%% # $ % & ~ _ ^ \ { } +%% + +\documentclass[11pt,a4paper]{book} +\usepackage{html} +\usepackage{float} +\usepackage{graphicx} +\usepackage{bacula} +\usepackage{longtable} +\usepackage{makeidx} +\usepackage{index} +\usepackage{setspace} +\usepackage{hyperref} +\usepackage{url} + + +\makeindex +\newindex{general}{idx}{ind}{General Index} + +\sloppy + +\begin{document} +\sloppy + +\newfont{\bighead}{cmr17 at 36pt} +\parskip 10pt +\parindent 0pt + +\title{\includegraphics{./bacula-logo.eps} \\ \bigskip + \Huge{Bacula Utility Programs} + \begin{center} + \large{It comes in the night and sucks + the essence from your computers. } + \end{center} +} + + +\author{Kern Sibbald} +\date{\vspace{1.0in}\today \\ + This manual documents Bacula version \input{version} \\ + \vspace{0.2in} + Copyright \copyright 1999-2007, Free Software Foundation Europe + e.V. \\ + \vspace{0.2in} + Permission is granted to copy, distribute and/or modify this document under the terms of the + GNU Free Documentation License, Version 1.2 published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. + A copy of the license is included in the section entitled "GNU Free Documentation License". +} + +\maketitle + +\clearpage +\tableofcontents +\clearpage +\listoffigures +\clearpage +\listoftables +\clearpage + +\include{progs} +\include{bimagemgr-chapter} +\include{rpm-faq} +\include{fdl} + + +% The following line tells link_resolver.pl to not include these files: +% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main + +% pull in the index +\clearpage +\printindex[general] + +\end{document} diff --git a/docs/manuals/en/utility/version.tex.in b/docs/manuals/en/utility/version.tex.in new file mode 100644 index 00000000..ff66dfc6 --- /dev/null +++ b/docs/manuals/en/utility/version.tex.in @@ -0,0 +1 @@ +@VERSION@ (@DATE@) diff --git a/docs/manuals/notes b/docs/manuals/notes new file mode 100644 index 00000000..fe2895c9 --- /dev/null +++ b/docs/manuals/notes @@ -0,0 +1,6 @@ +- Had to change [dir] to [general] in catmaintenance.tex +- Had to rename faq.tex to genfax.tex +- Had to rename install.tex to installation.tex +- had to rename console.tex bconsole.tex +- had to change [console] to [general] in bconsole.tex +- had to change [dir] to [general] in bconsole.tex