clean:
$(RMF) *~ 1 2 3 bacula-doc*.tar.gz
- (cd manual; make clean)
(cd manual-de; make clean)
(cd manual-fr; make clean)
(cd bacula-web; make clean)
$(RMF) -r CVS html-manual/CVS home-page/CVS techlogs/CVS
$(RMF) -rf autom4te.cache bacula-doc-* config.log config.out
$(RMF) -f config.status kernsconfig
- (cd manual; make distclean)
(cd manual-de; make distclean)
(cd manual-fr; make distclean)
(cd bacula-web; make distclean)
manuals/en/utility/Makefile \
manuals/en/utility/update_version \
manuals/en/utility/version.tex \
- manual/Makefile \
- manual/update_version \
- manual/version.tex \
manual-de/Makefile \
manual-de/version.tex \
manual-de/update_version \
manual-fr/Makefile \
manual-fr/version.tex \
manual-fr/update_version \
- developers/Makefile \
- developers/version.tex \
bacula-web/Makefile \
bacula-web/version.tex \
$PFILES ],
chmod 766 manuals/en/install/update_version
chmod 766 manuals/en/problems/update_version
chmod 766 manuals/en/utility/update_version
-chmod 766 manuals//update_version
chmod 766 manual-fr/update_version
chmod 766 manual-de/update_version
- ac_config_files="$ac_config_files autoconf/Make.common Makefile manuals/en/catalog/Makefile manuals/en/catalog/update_version manuals/en/catalog/version.tex manuals/en/concepts/Makefile manuals/en/concepts/update_version manuals/en/concepts/version.tex manuals/en/console/Makefile manuals/en/console/update_version manuals/en/console/version.tex manuals/en/developers/Makefile manuals/en/developers/update_version manuals/en/developers/version.tex manuals/en/install/Makefile manuals/en/install/update_version manuals/en/install/version.tex manuals/en/problems/Makefile manuals/en/problems/update_version manuals/en/problems/version.tex manuals/en/utility/Makefile manuals/en/utility/update_version manuals/en/utility/version.tex manual/Makefile manual/update_version manual/version.tex manual-de/Makefile manual-de/version.tex manual-de/update_version manual-fr/Makefile manual-fr/version.tex manual-fr/update_version developers/Makefile developers/version.tex bacula-web/Makefile bacula-web/version.tex $PFILES"
+ ac_config_files="$ac_config_files autoconf/Make.common Makefile manuals/en/catalog/Makefile manuals/en/catalog/update_version manuals/en/catalog/version.tex manuals/en/concepts/Makefile manuals/en/concepts/update_version manuals/en/concepts/version.tex manuals/en/console/Makefile manuals/en/console/update_version manuals/en/console/version.tex manuals/en/developers/Makefile manuals/en/developers/update_version manuals/en/developers/version.tex manuals/en/install/Makefile manuals/en/install/update_version manuals/en/install/version.tex manuals/en/problems/Makefile manuals/en/problems/update_version manuals/en/problems/version.tex manuals/en/utility/Makefile manuals/en/utility/update_version manuals/en/utility/version.tex manual-de/Makefile manual-de/version.tex manual-de/update_version manual-fr/Makefile manual-fr/version.tex manual-fr/update_version bacula-web/Makefile bacula-web/version.tex $PFILES"
ac_config_commands="$ac_config_commands default"
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
"manuals/en/utility/Makefile" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/Makefile" ;;
"manuals/en/utility/update_version" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/update_version" ;;
"manuals/en/utility/version.tex" ) CONFIG_FILES="$CONFIG_FILES manuals/en/utility/version.tex" ;;
- "manual/Makefile" ) CONFIG_FILES="$CONFIG_FILES manual/Makefile" ;;
- "manual/update_version" ) CONFIG_FILES="$CONFIG_FILES manual/update_version" ;;
- "manual/version.tex" ) CONFIG_FILES="$CONFIG_FILES manual/version.tex" ;;
"manual-de/Makefile" ) CONFIG_FILES="$CONFIG_FILES manual-de/Makefile" ;;
"manual-de/version.tex" ) CONFIG_FILES="$CONFIG_FILES manual-de/version.tex" ;;
"manual-de/update_version" ) CONFIG_FILES="$CONFIG_FILES manual-de/update_version" ;;
"manual-fr/Makefile" ) CONFIG_FILES="$CONFIG_FILES manual-fr/Makefile" ;;
"manual-fr/version.tex" ) CONFIG_FILES="$CONFIG_FILES manual-fr/version.tex" ;;
"manual-fr/update_version" ) CONFIG_FILES="$CONFIG_FILES manual-fr/update_version" ;;
- "developers/Makefile" ) CONFIG_FILES="$CONFIG_FILES developers/Makefile" ;;
- "developers/version.tex" ) CONFIG_FILES="$CONFIG_FILES developers/version.tex" ;;
"bacula-web/Makefile" ) CONFIG_FILES="$CONFIG_FILES bacula-web/Makefile" ;;
"bacula-web/version.tex" ) CONFIG_FILES="$CONFIG_FILES bacula-web/version.tex" ;;
"$PFILES" ) CONFIG_FILES="$CONFIG_FILES $PFILES" ;;
chmod 766 manuals/en/install/update_version
chmod 766 manuals/en/problems/update_version
chmod 766 manuals/en/utility/update_version
-chmod 766 manuals//update_version
chmod 766 manual-fr/update_version
chmod 766 manual-de/update_version
+++ /dev/null
-#
-#
-# Makefile for LaTeX
-#
-# To build everything do
-# make tex
-# make web
-# make html
-# make dvipdf
-#
-# or simply
-#
-# make
-#
-
-IMAGES=../images
-
-first_rule: bacula
-
-bacula: tex web html dvipdf
-
-.SUFFIXES: .tex .html
-.PHONY:
-.DONTCARE:
-
-
-tex:
- @cp -fp ${IMAGES}/hires/*.eps .
- touch developers.idx developersi-general.tex
- -latex -interaction=batchmode developers.tex
- makeindex developers.idx >/dev/null 2>/dev/null
- -latex -interaction=batchmode developers.tex
- @rm -f *.eps *.old
-
-pdf:
- @echo "Making developers pdf"
- @cp -fp ${IMAGES}/hires/*.eps .
- dvipdf developers.dvi developers.pdf
- @rm -f *.eps *.old
-
-dvipdf:
- @echo "Making developers pdfm"
- @cp -fp ${IMAGES}/hires/*.eps .
- dvipdfm -p a4 developers.dvi
- @rm -f *.eps *.old
-
-html:
- @echo "Making developers html"
- @cp -fp ${IMAGES}/*.eps .
- @rm -f next.eps next.png prev.eps prev.png up.eps up.png
- @(if [ -f imagename_translations ] ; then \
- ./translate_images.pl --from_meaningful_names developers.html; \
- fi)
- latex2html -white -no_subdir -split 0 -toc_stars -white -notransparent \
- developers >/dev/null
- ./translate_images.pl --to_meaningful_names developers.html
- @rm -f *.eps *.gif *.jpg *.old
-
-web:
- @echo "Making developers web"
- @mkdir -p developers
- @rm -f developers/*
- @cp -fp ${IMAGES}/*.eps .
- @rm -f next.eps next.png prev.eps prev.png up.eps up.png
- @cp -fp ${IMAGES}/*.eps ${IMAGES}/*.png developers/
- @rm -f developers/next.eps developers/next.png developers/prev.eps developers/prev.png developers/up.eps developers/up.png
- @(if [ -f developers/imagename_translations ] ; then \
- ./translate_images.pl --to_meaningful_names developers/Bacula_Users_Guide.html; \
- fi)
- @rm -rf developers/*.html
- latex2html -split 4 -local_icons -t "Developer's Guide" -long_titles 4 \
- -contents_in_nav -toc_stars -white -notransparent developers >/dev/null
- ./translate_images.pl --to_meaningful_names developers/Developers_Guide.html
- @cp -f developers/Developers_Guide.html developers/index.html
- @rm -f *.eps *.gif *.jpg developers/*.eps *.old
- @rm -f developers/idle.png
- @rm -f developers/win32-*.png developers/wx-console*.png developers/xp-*.png
- @rm -f developers/*.pl developers/*.log developers/*.aux developers/*.idx
- @rm -f developers/*.out WARNINGS
-
-texcheck:
- ./check_tex.pl developers.tex
-
-main_configs:
- pic2graph -density 100 <main_configs.pic >main_configs.png
-
-clean:
- @rm -f 1 2 3
- @rm -f *.png *.gif *.jpg *.eps
- @rm -f *.pdf *.aux *.cp *.fn *.ky *.log *.pg
- @rm -f *.html *.backup *.pdf *.ps *.dvi *.ilg *.lof *.lot
- @rm -f *.cdx *.cnd *.ddx *.ddn *.fdx *.fnd *.ind *.sdx *.snd
- @rm -f *.dnd imagename_translations
- @rm -f *.old WARNINGS *.out *.toc *.idx
- @rm -f images.pl labels.pl internals.pl
- @rm -rf developers
- @rm -f images.tex developersi.tex
-
-
-distclean: clean
- @rm -f developers.html developers.pdf
+++ /dev/null
-%%
-%%
-
-\chapter{Catalog Services}
-\label{_ChapterStart30}
-\index[general]{Services!Catalog }
-\index[general]{Catalog Services }
-
-\section{General}
-\index[general]{General }
-\addcontentsline{toc}{subsection}{General}
-
-This chapter is intended to be a technical discussion of the Catalog services
-and as such is not targeted at end users but rather at developers and system
-administrators that want or need to know more of the working details of {\bf
-Bacula}.
-
-The {\bf Bacula Catalog} services consist of the programs that provide the SQL
-database engine for storage and retrieval of all information concerning files
-that were backed up and their locations on the storage media.
-
-We have investigated the possibility of using the following SQL engines for
-Bacula: Beagle, mSQL, GNU SQL, PostgreSQL, SQLite, Oracle, and MySQL. Each
-presents certain problems with either licensing or maturity. At present, we
-have chosen for development purposes to use MySQL, PostgreSQL and SQLite.
-MySQL was chosen because it is fast, proven to be reliable, widely used, and
-actively being developed. MySQL is released under the GNU GPL license.
-PostgreSQL was chosen because it is a full-featured, very mature database, and
-because Dan Langille did the Bacula driver for it. PostgreSQL is distributed
-under the BSD license. SQLite was chosen because it is small, efficient, and
-can be directly embedded in {\bf Bacula} thus requiring much less effort from
-the system administrator or person building {\bf Bacula}. In our testing
-SQLite has performed very well, and for the functions that we use, it has
-never encountered any errors except that it does not appear to handle
-databases larger than 2GBytes. That said, we would not recommend it for
-serious production use.
-
-The Bacula SQL code has been written in a manner that will allow it to be
-easily modified to support any of the current SQL database systems on the
-market (for example: mSQL, iODBC, unixODBC, Solid, OpenLink ODBC, EasySoft
-ODBC, InterBase, Oracle8, Oracle7, and DB2).
-
-If you do not specify either {\bf \verb{--{with-mysql} or {\bf \verb{--{with-postgresql} or
-{\bf \verb{--{with-sqlite} on the ./configure line, Bacula will use its minimalist
-internal database. This database is kept for build reasons but is no longer
-supported. Bacula {\bf requires} one of the three databases (MySQL,
-PostgreSQL, or SQLite) to run.
-
-\subsection{Filenames and Maximum Filename Length}
-\index[general]{Filenames and Maximum Filename Length }
-\index[general]{Length!Filenames and Maximum Filename }
-\addcontentsline{toc}{subsubsection}{Filenames and Maximum Filename Length}
-
-In general, either MySQL, PostgreSQL or SQLite permit storing arbitrary long
-path names and file names in the catalog database. In practice, there still
-may be one or two places in the Catalog interface code that restrict the
-maximum path length to 512 characters and the maximum file name length to 512
-characters. These restrictions are believed to have been removed. Please note,
-these restrictions apply only to the Catalog database and thus to your ability
-to list online the files saved during any job. All information received and
-stored by the Storage daemon (normally on tape) allows and handles arbitrarily
-long path and filenames.
-
-\subsection{Installing and Configuring MySQL}
-\index[general]{MySQL!Installing and Configuring }
-\index[general]{Installing and Configuring MySQL }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL}
-
-For the details of installing and configuring MySQL, please see the
-\ilink{Installing and Configuring MySQL}{_ChapterStart} chapter of
-this manual.
-
-\subsection{Installing and Configuring PostgreSQL}
-\index[general]{PostgreSQL!Installing and Configuring }
-\index[general]{Installing and Configuring PostgreSQL }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL}
-
-For the details of installing and configuring PostgreSQL, please see the
-\ilink{Installing and Configuring PostgreSQL}{_ChapterStart10}
-chapter of this manual.
-
-\subsection{Installing and Configuring SQLite}
-\index[general]{Installing and Configuring SQLite }
-\index[general]{SQLite!Installing and Configuring }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite}
-
-For the details of installing and configuring SQLite, please see the
-\ilink{Installing and Configuring SQLite}{_ChapterStart33} chapter of
-this manual.
-
-\subsection{Internal Bacula Catalog}
-\index[general]{Catalog!Internal Bacula }
-\index[general]{Internal Bacula Catalog }
-\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog}
-
-Please see the
-\ilink{Internal Bacula Database}{_ChapterStart42} chapter of this
-manual for more details.
-
-\subsection{Database Table Design}
-\index[general]{Design!Database Table }
-\index[general]{Database Table Design }
-\addcontentsline{toc}{subsubsection}{Database Table Design}
-
-All discussions that follow pertain to the MySQL database. The details for the
-PostgreSQL and SQLite databases are essentially identical except for that all
-fields in the SQLite database are stored as ASCII text and some of the
-database creation statements are a bit different. The details of the internal
-Bacula catalog are not discussed here.
-
-Because the Catalog database may contain very large amounts of data for large
-sites, we have made a modest attempt to normalize the data tables to reduce
-redundant information. While reducing the size of the database significantly,
-it does, unfortunately, add some complications to the structures.
-
-In simple terms, the Catalog database must contain a record of all Jobs run by
-Bacula, and for each Job, it must maintain a list of all files saved, with
-their File Attributes (permissions, create date, ...), and the location and
-Media on which the file is stored. This is seemingly a simple task, but it
-represents a huge amount interlinked data. Note: the list of files and their
-attributes is not maintained when using the internal Bacula database. The data
-stored in the File records, which allows the user or administrator to obtain a
-list of all files backed up during a job, is by far the largest volume of
-information put into the Catalog database.
-
-Although the Catalog database has been designed to handle backup data for
-multiple clients, some users may want to maintain multiple databases, one for
-each machine to be backed up. This reduces the risk of confusion of accidental
-restoring a file to the wrong machine as well as reducing the amount of data
-in a single database, thus increasing efficiency and reducing the impact of a
-lost or damaged database.
-
-\section{Sequence of Creation of Records for a Save Job}
-\index[general]{Sequence of Creation of Records for a Save Job }
-\index[general]{Job!Sequence of Creation of Records for a Save }
-\addcontentsline{toc}{subsection}{Sequence of Creation of Records for a Save
-Job}
-
-Start with StartDate, ClientName, Filename, Path, Attributes, MediaName,
-MediaCoordinates. (PartNumber, NumParts). In the steps below, ``Create new''
-means to create a new record whether or not it is unique. ``Create unique''
-means each record in the database should be unique. Thus, one must first
-search to see if the record exists, and only if not should a new one be
-created, otherwise the existing RecordId should be used.
-
-\begin{enumerate}
-\item Create new Job record with StartDate; save JobId
-\item Create unique Media record; save MediaId
-\item Create unique Client record; save ClientId
-\item Create unique Filename record; save FilenameId
-\item Create unique Path record; save PathId
-\item Create unique Attribute record; save AttributeId
- store ClientId, FilenameId, PathId, and Attributes
-\item Create new File record
- store JobId, AttributeId, MediaCoordinates, etc
-\item Repeat steps 4 through 8 for each file
-\item Create a JobMedia record; save MediaId
-\item Update Job record filling in EndDate and other Job statistics
- \end{enumerate}
-
-\section{Database Tables}
-\index[general]{Database Tables }
-\index[general]{Tables!Database }
-\addcontentsline{toc}{subsection}{Database Tables}
-
-\addcontentsline{lot}{table}{Filename Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Filename } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{l| }{\bf Data Type }
-& \multicolumn{1}{l| }{\bf Remark } \\
- \hline
-{FilenameId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {Blob } & {Filename }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Filename} table shown above contains the name of each file backed up
-with the path removed. If different directories or machines contain the same
-filename, only one copy will be saved in this table.
-
-\
-
-\addcontentsline{lot}{table}{Path Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Path } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{PathId } & {integer } & {Primary Key } \\
- \hline
-{Path } & {Blob } & {Full Path }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Path} table contains shown above the path or directory names of all
-directories on the system or systems. The filename and any MSDOS disk name are
-stripped off. As with the filename, only one copy of each directory name is
-kept regardless of how many machines or drives have the same directory. These
-path names should be stored in Unix path name format.
-
-Some simple testing on a Linux file system indicates that separating the
-filename and the path may be more complication than is warranted by the space
-savings. For example, this system has a total of 89,097 files, 60,467 of which
-have unique filenames, and there are 4,374 unique paths.
-
-Finding all those files and doing two stats() per file takes an average wall
-clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux.
-
-Finding all those files and putting them directly into a MySQL database with
-the path and filename defined as TEXT, which is variable length up to 65,535
-characters takes 19 mins 31 seconds and creates a 27.6 MByte database.
-
-Doing the same thing, but inserting them into Blob fields with the filename
-indexed on the first 30 characters and the path name indexed on the 255 (max)
-characters takes 5 mins 18 seconds and creates a 5.24 MB database. Rerunning
-the job (with the database already created) takes about 2 mins 50 seconds.
-
-Running the same as the last one (Path and Filename Blob), but Filename
-indexed on the first 30 characters and the Path on the first 50 characters
-(linear search done there after) takes 5 mins on the average and creates a 3.4
-MB database. Rerunning with the data already in the DB takes 3 mins 35
-seconds.
-
-Finally, saving only the full path name rather than splitting the path and the
-file, and indexing it on the first 50 characters takes 6 mins 43 seconds and
-creates a 7.35 MB database.
-
-\
-
-\addcontentsline{lot}{table}{File Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf File } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{FileId } & {integer } & {Primary Key } \\
- \hline
-{FileIndex } & {integer } & {The sequential file number in the Job } \\
- \hline
-{JobId } & {integer } & {Link to Job Record } \\
- \hline
-{PathId } & {integer } & {Link to Path Record } \\
- \hline
-{FilenameId } & {integer } & {Link to Filename Record } \\
- \hline
-{MarkId } & {integer } & {Used to mark files during Verify Jobs } \\
- \hline
-{LStat } & {tinyblob } & {File attributes in base64 encoding } \\
- \hline
-{MD5 } & {tinyblob } & {MD5 signature in base64 encoding }
-\\ \hline
-
-\end{longtable}
-
-The {\bf File} table shown above contains one entry for each file backed up by
-Bacula. Thus a file that is backed up multiple times (as is normal) will have
-multiple entries in the File table. This will probably be the table with the
-most number of records. Consequently, it is essential to keep the size of this
-record to an absolute minimum. At the same time, this table must contain all
-the information (or pointers to the information) about the file and where it
-is backed up. Since a file may be backed up many times without having changed,
-the path and filename are stored in separate tables.
-
-This table contains by far the largest amount of information in the Catalog
-database, both from the stand point of number of records, and the stand point
-of total database size. As a consequence, the user must take care to
-periodically reduce the number of File records using the {\bf retention}
-command in the Console program.
-
-\
-
-\addcontentsline{lot}{table}{Job Table Layout}
-\begin{longtable}{|l|l|p{2.5in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Job } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{JobId } & {integer } & {Primary Key } \\
- \hline
-{Job } & {tinyblob } & {Unique Job Name } \\
- \hline
-{Name } & {tinyblob } & {Job Name } \\
- \hline
-{PurgedFiles } & {tinyint } & {Used by Bacula for purging/retention periods
-} \\
- \hline
-{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration
-} \\
- \hline
-{Level } & {binary(1) } & {Job Level } \\
- \hline
-{ClientId } & {integer } & {Client index } \\
- \hline
-{JobStatus } & {binary(1) } & {Job Termination Status } \\
- \hline
-{SchedTime } & {datetime } & {Time/date when Job scheduled } \\
- \hline
-{StartTime } & {datetime } & {Time/date when Job started } \\
- \hline
-{EndTime } & {datetime } & {Time/date when Job ended } \\
- \hline
-{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for
-Retention period. } \\
- \hline
-{VolSessionId } & {integer } & {Unique Volume Session ID } \\
- \hline
-{VolSessionTime } & {integer } & {Unique Volume Session Time } \\
- \hline
-{JobFiles } & {integer } & {Number of files saved in Job } \\
- \hline
-{JobBytes } & {bigint } & {Number of bytes saved in Job } \\
- \hline
-{JobErrors } & {integer } & {Number of errors during Job } \\
- \hline
-{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) }
-\\
- \hline
-{PoolId } & {integer } & {Link to Pool Record } \\
- \hline
-{FileSetId } & {integer } & {Link to FileSet Record } \\
- \hline
-{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\
- \hline
-{HasBase } & {tiny integer } & {Set when Base Job run }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Job} table contains one record for each Job run by Bacula. Thus
-normally, there will be one per day per machine added to the database. Note,
-the JobId is used to index Job records in the database, and it often is shown
-to the user in the Console program. However, care must be taken with its use
-as it is not unique from database to database. For example, the user may have
-a database for Client data saved on machine Rufus and another database for
-Client data saved on machine Roxie. In this case, the two database will each
-have JobIds that match those in another database. For a unique reference to a
-Job, see Job below.
-
-The Name field of the Job record corresponds to the Name resource record given
-in the Director's configuration file. Thus it is a generic name, and it will
-be normal to find many Jobs (or even all Jobs) with the same Name.
-
-The Job field contains a combination of the Name and the schedule time of the
-Job by the Director. Thus for a given Director, even with multiple Catalog
-databases, the Job will contain a unique name that represents the Job.
-
-For a given Storage daemon, the VolSessionId and VolSessionTime form a unique
-identification of the Job. This will be the case even if multiple Directors
-are using the same Storage daemon.
-
-The Job Type (or simply Type) can have one of the following values:
-
-\addcontentsline{lot}{table}{Job Types}
-\begin{longtable}{|l|l|}
- \hline
-\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\
- \hline
-{B } & {Backup Job } \\
- \hline
-{V } & {Verify Job } \\
- \hline
-{R } & {Restore Job } \\
- \hline
-{C } & {Console program (not in database) } \\
- \hline
-{D } & {Admin Job } \\
- \hline
-{A } & {Archive Job (not implemented) }
-\\ \hline
-
-\end{longtable}
-
-The JobStatus field specifies how the job terminated, and can be one of the
-following:
-
-\addcontentsline{lot}{table}{Job Statuses}
-\begin{longtable}{|l|l|}
- \hline
-\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\
- \hline
-{C } & {Created but not yet running } \\
- \hline
-{R } & {Running } \\
- \hline
-{B } & {Blocked } \\
- \hline
-{T } & {Terminated normally } \\
- \hline
-{E } & {Terminated in Error } \\
- \hline
-{e } & {Non-fatal error } \\
- \hline
-{f } & {Fatal error } \\
- \hline
-{D } & {Verify Differences } \\
- \hline
-{A } & {Canceled by the user } \\
- \hline
-{F } & {Waiting on the File daemon } \\
- \hline
-{S } & {Waiting on the Storage daemon } \\
- \hline
-{m } & {Waiting for a new Volume to be mounted } \\
- \hline
-{M } & {Waiting for a Mount } \\
- \hline
-{s } & {Waiting for Storage resource } \\
- \hline
-{j } & {Waiting for Job resource } \\
- \hline
-{c } & {Waiting for Client resource } \\
- \hline
-{d } & {Wating for Maximum jobs } \\
- \hline
-{t } & {Waiting for Start Time } \\
- \hline
-{p } & {Waiting for higher priority job to finish }
-\\ \hline
-
-\end{longtable}
-
-\
-
-\addcontentsline{lot}{table}{File Sets Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf FileSet } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{FileSetId } & {integer } & {Primary Key } \\
- \hline
-{FileSet } & {tinyblob } & {FileSet name } \\
- \hline
-{MD5 } & {tinyblob } & {MD5 checksum of FileSet } \\
- \hline
-{CreateTime } & {datetime } & {Time and date Fileset created }
-\\ \hline
-
-\end{longtable}
-
-The {\bf FileSet} table contains one entry for each FileSet that is used. The
-MD5 signature is kept to ensure that if the user changes anything inside the
-FileSet, it will be detected and the new FileSet will be used. This is
-particularly important when doing an incremental update. If the user deletes a
-file or adds a file, we need to ensure that a Full backup is done prior to the
-next incremental.
-
-\
-
-\addcontentsline{lot}{table}{JobMedia Table Layout}
-\begin{longtable}{|l|l|p{2.5in}|}
- \hline
-\multicolumn{3}{|l| }{\bf JobMedia } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{JobMediaId } & {integer } & {Primary Key } \\
- \hline
-{JobId } & {integer } & {Link to Job Record } \\
- \hline
-{MediaId } & {integer } & {Link to Media Record } \\
- \hline
-{FirstIndex } & {integer } & {The index (sequence number) of the first file
-written for this Job to the Media } \\
- \hline
-{LastIndex } & {integer } & {The index of the last file written for this
-Job to the Media } \\
- \hline
-{StartFile } & {integer } & {The physical media (tape) file number of the
-first block written for this Job } \\
- \hline
-{EndFile } & {integer } & {The physical media (tape) file number of the
-last block written for this Job } \\
- \hline
-{StartBlock } & {integer } & {The number of the first block written for
-this Job } \\
- \hline
-{EndBlock } & {integer } & {The number of the last block written for this
-Job } \\
- \hline
-{VolIndex } & {integer } & {The Volume use sequence number within the Job }
-\\ \hline
-
-\end{longtable}
-
-The {\bf JobMedia} table contains one entry at the following: start of
-the job, start of each new tape file, start of each new tape, end of the
-job. Since by default, a new tape file is written every 2GB, in general,
-you will have more than 2 JobMedia records per Job. The number can be
-varied by changing the "Maximum File Size" specified in the Device
-resource. This record allows Bacula to efficiently position close to
-(within 2GB) any given file in a backup. For restoring a full Job,
-these records are not very important, but if you want to retrieve
-a single file that was written near the end of a 100GB backup, the
-JobMedia records can speed it up by orders of magnitude by permitting
-forward spacing files and blocks rather than reading the whole 100GB
-backup.
-
-
-
-\
-
-\addcontentsline{lot}{table}{Media Table Layout}
-\begin{longtable}{|l|l|p{2.4in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Media } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{MediaId } & {integer } & {Primary Key } \\
- \hline
-{VolumeName } & {tinyblob } & {Volume name } \\
- \hline
-{Slot } & {integer } & {Autochanger Slot number or zero } \\
- \hline
-{PoolId } & {integer } & {Link to Pool Record } \\
- \hline
-{MediaType } & {tinyblob } & {The MediaType supplied by the user } \\
- \hline
-{FirstWritten } & {datetime } & {Time/date when first written } \\
- \hline
-{LastWritten } & {datetime } & {Time/date when last written } \\
- \hline
-{LabelDate } & {datetime } & {Time/date when tape labeled } \\
- \hline
-{VolJobs } & {integer } & {Number of jobs written to this media } \\
- \hline
-{VolFiles } & {integer } & {Number of files written to this media } \\
- \hline
-{VolBlocks } & {integer } & {Number of blocks written to this media } \\
- \hline
-{VolMounts } & {integer } & {Number of time media mounted } \\
- \hline
-{VolBytes } & {bigint } & {Number of bytes saved in Job } \\
- \hline
-{VolErrors } & {integer } & {Number of errors during Job } \\
- \hline
-{VolWrites } & {integer } & {Number of writes to media } \\
- \hline
-{MaxVolBytes } & {bigint } & {Maximum bytes to put on this media } \\
- \hline
-{VolCapacityBytes } & {bigint } & {Capacity estimate for this volume } \\
- \hline
-{VolStatus } & {enum } & {Status of media: Full, Archive, Append, Recycle,
-Read-Only, Disabled, Error, Busy } \\
- \hline
-{Recycle } & {tinyint } & {Whether or not Bacula can recycle the Volumes:
-Yes, No } \\
- \hline
-{VolRetention } & {bigint } & {64 bit seconds until expiration } \\
- \hline
-{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\
- \hline
-{MaxVolJobs } & {integer } & {maximum jobs to put on Volume } \\
- \hline
-{MaxVolFiles } & {integer } & {maximume EOF marks to put on Volume }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Volume} table (internally referred to as the Media table) contains
-one entry for each volume, that is each tape, cassette (8mm, DLT, DAT, ...),
-or file on which information is or was backed up. There is one Volume record
-created for each of the NumVols specified in the Pool resource record.
-
-\
-
-\addcontentsline{lot}{table}{Pool Table Layout}
-\begin{longtable}{|l|l|p{2.4in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Pool } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{PoolId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {Tinyblob } & {Pool Name } \\
- \hline
-{NumVols } & {Integer } & {Number of Volumes in the Pool } \\
- \hline
-{MaxVols } & {Integer } & {Maximum Volumes in the Pool } \\
- \hline
-{UseOnce } & {tinyint } & {Use volume once } \\
- \hline
-{UseCatalog } & {tinyint } & {Set to use catalog } \\
- \hline
-{AcceptAnyVolume } & {tinyint } & {Accept any volume from Pool } \\
- \hline
-{VolRetention } & {bigint } & {64 bit seconds to retain volume } \\
- \hline
-{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\
- \hline
-{MaxVolJobs } & {integer } & {max jobs on volume } \\
- \hline
-{MaxVolFiles } & {integer } & {max EOF marks to put on Volume } \\
- \hline
-{MaxVolBytes } & {bigint } & {max bytes to write on Volume } \\
- \hline
-{AutoPrune } & {tinyint } & {yes|no for autopruning } \\
- \hline
-{Recycle } & {tinyint } & {yes|no for allowing auto recycling of Volume }
-\\
- \hline
-{PoolType } & {enum } & {Backup, Copy, Cloned, Archive, Migration } \\
- \hline
-{LabelFormat } & {Tinyblob } & {Label format }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Pool} table contains one entry for each media pool controlled by
-Bacula in this database. One media record exists for each of the NumVols
-contained in the Pool. The PoolType is a Bacula defined keyword. The MediaType
-is defined by the administrator, and corresponds to the MediaType specified in
-the Director's Storage definition record. The CurrentVol is the sequence
-number of the Media record for the current volume.
-
-\
-
-\addcontentsline{lot}{table}{Client Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Client } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{ClientId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {TinyBlob } & {File Services Name } \\
- \hline
-{UName } & {TinyBlob } & {uname -a from Client (not yet used) } \\
- \hline
-{AutoPrune } & {tinyint } & {yes|no for autopruning } \\
- \hline
-{FileRetention } & {bigint } & {64 bit seconds to retain Files } \\
- \hline
-{JobRetention } & {bigint } & {64 bit seconds to retain Job }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Client} table contains one entry for each machine backed up by Bacula
-in this database. Normally the Name is a fully qualified domain name.
-
-\
-
-\addcontentsline{lot}{table}{Unsaved Files Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf UnsavedFiles } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{UnsavedId } & {integer } & {Primary Key } \\
- \hline
-{JobId } & {integer } & {JobId corresponding to this record } \\
- \hline
-{PathId } & {integer } & {Id of path } \\
- \hline
-{FilenameId } & {integer } & {Id of filename }
-\\ \hline
-
-\end{longtable}
-
-The {\bf UnsavedFiles} table contains one entry for each file that was not
-saved. Note! This record is not yet implemented.
-
-\
-
-\addcontentsline{lot}{table}{Counter Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Counter } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{Counter } & {tinyblob } & {Counter name } \\
- \hline
-{MinValue } & {integer } & {Start/Min value for counter } \\
- \hline
-{MaxValue } & {integer } & {Max value for counter } \\
- \hline
-{CurrentValue } & {integer } & {Current counter value } \\
- \hline
-{WrapCounter } & {tinyblob } & {Name of another counter }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Counter} table contains one entry for each permanent counter defined
-by the user.
-
-\
-
-\addcontentsline{lot}{table}{Version Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Version } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{VersionId } & {integer } & {Primary Key }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Version} table defines the Bacula database version number. Bacula
-checks this number before reading the database to ensure that it is compatible
-with the Bacula binary file.
-
-\
-
-\addcontentsline{lot}{table}{Base Files Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf BaseFiles } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{BaseId } & {integer } & {Primary Key } \\
- \hline
-{BaseJobId } & {integer } & {JobId of Base Job } \\
- \hline
-{JobId } & {integer } & {Reference to Job } \\
- \hline
-{FileId } & {integer } & {Reference to File } \\
- \hline
-{FileIndex } & {integer } & {File Index number }
-\\ \hline
-
-\end{longtable}
-
-The {\bf BaseFiles} table contains all the File references for a particular
-JobId that point to a Base file -- i.e. they were previously saved and hence
-were not saved in the current JobId but in BaseJobId under FileId. FileIndex
-is the index of the file, and is used for optimization of Restore jobs to
-prevent the need to read the FileId record when creating the in memory tree.
-This record is not yet implemented.
-
-\
-
-\subsection{MySQL Table Definition}
-\index[general]{MySQL Table Definition }
-\index[general]{Definition!MySQL Table }
-\addcontentsline{toc}{subsubsection}{MySQL Table Definition}
-
-The commands used to create the MySQL tables are as follows:
-
-\footnotesize
-\begin{verbatim}
-USE bacula;
-CREATE TABLE Filename (
- FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name BLOB NOT NULL,
- PRIMARY KEY(FilenameId),
- INDEX (Name(30))
- );
-CREATE TABLE Path (
- PathId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Path BLOB NOT NULL,
- PRIMARY KEY(PathId),
- INDEX (Path(50))
- );
-CREATE TABLE File (
- FileId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- FileIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,
- FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,
- MarkId INTEGER UNSIGNED NOT NULL DEFAULT 0,
- LStat TINYBLOB NOT NULL,
- MD5 TINYBLOB NOT NULL,
- PRIMARY KEY(FileId),
- INDEX (JobId),
- INDEX (PathId),
- INDEX (FilenameId)
- );
-CREATE TABLE Job (
- JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Job TINYBLOB NOT NULL,
- Name TINYBLOB NOT NULL,
- Type BINARY(1) NOT NULL,
- Level BINARY(1) NOT NULL,
- ClientId INTEGER NOT NULL REFERENCES Client,
- JobStatus BINARY(1) NOT NULL,
- SchedTime DATETIME NOT NULL,
- StartTime DATETIME NOT NULL,
- EndTime DATETIME NOT NULL,
- JobTDate BIGINT UNSIGNED NOT NULL,
- VolSessionId INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolSessionTime INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobBytes BIGINT UNSIGNED NOT NULL,
- JobErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,
- JobMissingFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool,
- FileSetId INTEGER UNSIGNED NOT NULL REFERENCES FileSet,
- PurgedFiles TINYINT NOT NULL DEFAULT 0,
- HasBase TINYINT NOT NULL DEFAULT 0,
- PRIMARY KEY(JobId),
- INDEX (Name(128))
- );
-CREATE TABLE FileSet (
- FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- FileSet TINYBLOB NOT NULL,
- MD5 TINYBLOB NOT NULL,
- CreateTime DATETIME NOT NULL,
- PRIMARY KEY(FileSetId)
- );
-CREATE TABLE JobMedia (
- JobMediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- MediaId INTEGER UNSIGNED NOT NULL REFERENCES Media,
- FirstIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- LastIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- StartFile INTEGER UNSIGNED NOT NULL DEFAULT 0,
- EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0,
- StartBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,
- EndBlock INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolIndex INTEGER UNSIGNED NOT NULL DEFAULT 0,
- PRIMARY KEY(JobMediaId),
- INDEX (JobId, MediaId)
- );
-CREATE TABLE Media (
- MediaId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- VolumeName TINYBLOB NOT NULL,
- Slot INTEGER NOT NULL DEFAULT 0,
- PoolId INTEGER UNSIGNED NOT NULL REFERENCES Pool,
- MediaType TINYBLOB NOT NULL,
- FirstWritten DATETIME NOT NULL,
- LastWritten DATETIME NOT NULL,
- LabelDate DATETIME NOT NULL,
- VolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolBlocks INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolMounts INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolErrors INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolWrites INTEGER UNSIGNED NOT NULL DEFAULT 0,
- VolCapacityBytes BIGINT UNSIGNED NOT NULL,
- VolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged',
- 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,
- Recycle TINYINT NOT NULL DEFAULT 0,
- VolRetention BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolUseDuration BIGINT UNSIGNED NOT NULL DEFAULT 0,
- MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
- InChanger TINYINT NOT NULL DEFAULT 0,
- MediaAddressing TINYINT NOT NULL DEFAULT 0,
- VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
- VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
- PRIMARY KEY(MediaId),
- INDEX (PoolId)
- );
-CREATE TABLE Pool (
- PoolId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- NumVols INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVols INTEGER UNSIGNED NOT NULL DEFAULT 0,
- UseOnce TINYINT NOT NULL,
- UseCatalog TINYINT NOT NULL,
- AcceptAnyVolume TINYINT DEFAULT 0,
- VolRetention BIGINT UNSIGNED NOT NULL,
- VolUseDuration BIGINT UNSIGNED NOT NULL,
- MaxVolJobs INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
- MaxVolBytes BIGINT UNSIGNED NOT NULL,
- AutoPrune TINYINT DEFAULT 0,
- Recycle TINYINT DEFAULT 0,
- PoolType ENUM('Backup', 'Copy', 'Cloned', 'Archive', 'Migration', 'Scratch') NOT NULL,
- LabelFormat TINYBLOB,
- Enabled TINYINT DEFAULT 1,
- ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
- UNIQUE (Name(128)),
- PRIMARY KEY (PoolId)
- );
-CREATE TABLE Client (
- ClientId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Name TINYBLOB NOT NULL,
- Uname TINYBLOB NOT NULL, /* full uname -a of client */
- AutoPrune TINYINT DEFAULT 0,
- FileRetention BIGINT UNSIGNED NOT NULL,
- JobRetention BIGINT UNSIGNED NOT NULL,
- UNIQUE (Name(128)),
- PRIMARY KEY(ClientId)
- );
-CREATE TABLE BaseFiles (
- BaseId INTEGER UNSIGNED AUTO_INCREMENT,
- BaseJobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- FileId INTEGER UNSIGNED NOT NULL REFERENCES File,
- FileIndex INTEGER UNSIGNED,
- PRIMARY KEY(BaseId)
- );
-CREATE TABLE UnsavedFiles (
- UnsavedId INTEGER UNSIGNED AUTO_INCREMENT,
- JobId INTEGER UNSIGNED NOT NULL REFERENCES Job,
- PathId INTEGER UNSIGNED NOT NULL REFERENCES Path,
- FilenameId INTEGER UNSIGNED NOT NULL REFERENCES Filename,
- PRIMARY KEY (UnsavedId)
- );
-CREATE TABLE Version (
- VersionId INTEGER UNSIGNED NOT NULL
- );
--- Initialize Version
-INSERT INTO Version (VersionId) VALUES (7);
-CREATE TABLE Counters (
- Counter TINYBLOB NOT NULL,
- MinValue INTEGER,
- MaxValue INTEGER,
- CurrentValue INTEGER,
- WrapCounter TINYBLOB NOT NULL,
- PRIMARY KEY (Counter(128))
- );
-\end{verbatim}
-\normalsize
+++ /dev/null
-#!/usr/bin/perl -w
-# Finds potential problems in tex files, and issues warnings to the console
-# about what it finds. Takes a list of files as its only arguments,
-# and does checks on all the files listed. The assumption is that these are
-# valid (or close to valid) LaTeX files. It follows \include statements
-# recursively to pick up any included tex files.
-#
-#
-#
-# Currently the following checks are made:
-#
-# -- Multiple hyphens not inside a verbatim environment (or \verb). These
-# should be placed inside a \verb{} contruct so they will not be converted
-# to single hyphen by latex and latex2html.
-
-
-# Original creation 3-8-05 by Karl Cunningham karlc -at- keckec -dot- com
-#
-#
-
-use strict;
-
-# The following builds the test string to identify and change multiple
-# hyphens in the tex files. Several constructs are identified but only
-# multiple hyphens are changed; the others are fed to the output
-# unchanged.
-my $b = '\\\\begin\\*?\\s*\\{\\s*'; # \begin{
-my $e = '\\\\end\\*?\\s*\\{\\s*'; # \end{
-my $c = '\\s*\\}'; # closing curly brace
-
-# This captures entire verbatim environments. These are passed to the output
-# file unchanged.
-my $verbatimenv = $b . "verbatim" . $c . ".*?" . $e . "verbatim" . $c;
-
-# This captures \verb{..{ constructs. They are passed to the output unchanged.
-my $verb = '\\\\verb\\*?(.).*?\\1';
-
-# This captures multiple hyphens with a leading and trailing space. These are not changed.
-my $hyphsp = '\\s\\-{2,}\\s';
-
-# This identifies other multiple hyphens.
-my $hyphens = '\\-{2,}';
-
-# This identifies \hyperpage{..} commands, which should be ignored.
-my $hyperpage = '\\\\hyperpage\\*?\\{.*?\\}';
-
-# This builds the actual test string from the above strings.
-#my $teststr = "$verbatimenv|$verb|$tocentry|$hyphens";
-my $teststr = "$verbatimenv|$verb|$hyphsp|$hyperpage|$hyphens";
-
-
-sub get_includes {
- # Get a list of include files from the top-level tex file. The first
- # argument is a pointer to the list of files found. The rest of the
- # arguments is a list of filenames to check for includes.
- my $files = shift;
- my ($fileline,$includefile,$includes);
-
- while (my $filename = shift) {
- # Get a list of all the html files in the directory.
- open my $if,"<$filename" or die "Cannot open input file $filename\n";
- $fileline = 0;
- $includes = 0;
- while (<$if>) {
- chomp;
- $fileline++;
- # If a file is found in an include, process it.
- if (($includefile) = /\\include\s*\{(.*?)\}/) {
- $includes++;
- # Append .tex to the filename
- $includefile .= '.tex';
-
- # If the include file has already been processed, issue a warning
- # and don't do it again.
- my $found = 0;
- foreach (@$files) {
- if ($_ eq $includefile) {
- $found = 1;
- last;
- }
- }
- if ($found) {
- print "$includefile found at line $fileline in $filename was previously included\n";
- } else {
- # The file has not been previously found. Save it and
- # recursively process it.
- push (@$files,$includefile);
- get_includes($files,$includefile);
- }
- }
- }
- close IF;
- }
-}
-
-
-sub check_hyphens {
- my (@files) = @_;
- my ($filedata,$this,$linecnt,$before);
-
- # Build the test string to check for the various environments.
- # We only do the conversion if the multiple hyphens are outside of a
- # verbatim environment (either \begin{verbatim}...\end{verbatim} or
- # \verb{--}). Capture those environments and pass them to the output
- # unchanged.
-
- foreach my $file (@files) {
- # Open the file and load the whole thing into $filedata. A bit wasteful but
- # easier to deal with, and we don't have a problem with speed here.
- $filedata = "";
- open IF,"<$file" or die "Cannot open input file $file";
- while (<IF>) {
- $filedata .= $_;
- }
- close IF;
-
- # Set up to process the file data.
- $linecnt = 1;
-
- # Go through the file data from beginning to end. For each match, save what
- # came before it and what matched. $filedata now becomes only what came
- # after the match.
- # Chech the match to see if it starts with a multiple-hyphen. If so
- # warn the user. Keep track of line numbers so they can be output
- # with the warning message.
- while ($filedata =~ /$teststr/os) {
- $this = $&;
- $before = $`;
- $filedata = $';
- $linecnt += $before =~ tr/\n/\n/;
-
- # Check if the multiple hyphen is present outside of one of the
- # acceptable constructs.
- if ($this =~ /^\-+/) {
- print "Possible unwanted multiple hyphen found in line ",
- "$linecnt of file $file\n";
- }
- $linecnt += $this =~ tr/\n/\n/;
- }
- }
-}
-##################################################################
-# MAIN ####
-##################################################################
-
-my (@includes,$cnt);
-
-# Examine the file pointed to by the first argument to get a list of
-# includes to test.
-get_includes(\@includes,@ARGV);
-
-check_hyphens(@includes);
+++ /dev/null
-%%
-%%
-
-\chapter{Daemon Protocol}
-\label{_ChapterStart2}
-\index{Protocol!Daemon }
-\index{Daemon Protocol }
-
-\section{General}
-\index{General }
-\addcontentsline{toc}{subsection}{General}
-
-This document describes the protocols used between the various daemons. As
-Bacula has developed, it has become quite out of date. The general idea still
-holds true, but the details of the fields for each command, and indeed the
-commands themselves have changed considerably.
-
-It is intended to be a technical discussion of the general daemon protocols
-and as such is not targeted at end users but rather at developers and system
-administrators that want or need to know more of the working details of {\bf
-Bacula}.
-
-\section{Low Level Network Protocol}
-\index{Protocol!Low Level Network }
-\index{Low Level Network Protocol }
-\addcontentsline{toc}{subsection}{Low Level Network Protocol}
-
-At the lowest level, the network protocol is handled by {\bf BSOCK} packets
-which contain a lot of information about the status of the network connection:
-who is at the other end, etc. Each basic {\bf Bacula} network read or write
-actually consists of two low level network read/writes. The first write always
-sends four bytes of data in machine independent byte order. If data is to
-follow, the first four bytes are a positive non-zero integer indicating the
-length of the data that follow in the subsequent write. If the four byte
-integer is zero or negative, it indicates a special request, a sort of network
-signaling capability. In this case, no data packet will follow. The low level
-BSOCK routines expect that only a single thread is accessing the socket at a
-time. It is advised that multiple threads do not read/write the same socket.
-If you must do this, you must provide some sort of locking mechanism. It would
-not be appropriate for efficiency reasons to make every call to the BSOCK
-routines lock and unlock the packet.
-
-\section{General Daemon Protocol}
-\index{General Daemon Protocol }
-\index{Protocol!General Daemon }
-\addcontentsline{toc}{subsection}{General Daemon Protocol}
-
-In general, all the daemons follow the following global rules. There may be
-exceptions depending on the specific case. Normally, one daemon will be
-sending commands to another daemon (specifically, the Director to the Storage
-daemon and the Director to the File daemon).
-
-\begin{itemize}
-\item Commands are always ASCII commands that are upper/lower case dependent
- as well as space sensitive.
-\item All binary data is converted into ASCII (either with printf statements
- or using base64 encoding).
-\item All responses to commands sent are always prefixed with a return
- numeric code where codes in the 1000's are reserved for the Director, the
- 2000's are reserved for the File daemon, and the 3000's are reserved for the
-Storage daemon.
-\item Any response that is not prefixed with a numeric code is a command (or
- subcommand if you like) coming from the other end. For example, while the
- Director is corresponding with the Storage daemon, the Storage daemon can
-request Catalog services from the Director. This convention permits each side
-to send commands to the other daemon while simultaneously responding to
-commands.
-\item Any response that is of zero length, depending on the context, either
- terminates the data stream being sent or terminates command mode prior to
- closing the connection.
-\item Any response that is of negative length is a special sign that normally
- requires a response. For example, during data transfer from the File daemon
- to the Storage daemon, normally the File daemon sends continuously without
-intervening reads. However, periodically, the File daemon will send a packet
-of length -1 indicating that the current data stream is complete and that the
-Storage daemon should respond to the packet with an OK, ABORT JOB, PAUSE,
-etc. This permits the File daemon to efficiently send data while at the same
-time occasionally ``polling'' the Storage daemon for his status or any
-special requests.
-
-Currently, these negative lengths are specific to the daemon, but shortly,
-the range 0 to -999 will be standard daemon wide signals, while -1000 to
--1999 will be for Director user, -2000 to -2999 for the File daemon, and
--3000 to -3999 for the Storage daemon.
-\end{itemize}
-
-\section{The Protocol Used Between the Director and the Storage Daemon}
-\index{Daemon!Protocol Used Between the Director and the Storage }
-\index{Protocol Used Between the Director and the Storage Daemon }
-\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the
-Storage Daemon}
-
-Before sending commands to the File daemon, the Director opens a Message
-channel with the Storage daemon, identifies itself and presents its password.
-If the password check is OK, the Storage daemon accepts the Director. The
-Director then passes the Storage daemon, the JobId to be run as well as the
-File daemon authorization (append, read all, or read for a specific session).
-The Storage daemon will then pass back to the Director a enabling key for this
-JobId that must be presented by the File daemon when opening the job. Until
-this process is complete, the Storage daemon is not available for use by File
-daemons.
-
-\footnotesize
-\begin{verbatim}
-SD: listens
-DR: makes connection
-DR: Hello <Director-name> calling <password>
-SD: 3000 OK Hello
-DR: JobId=nnn Allow=(append, read) Session=(*, SessionId)
- (Session not implemented yet)
-SD: 3000 OK Job Authorization=<password>
-DR: use device=<device-name> media_type=<media-type>
- pool_name=<pool-name> pool_type=<pool_type>
-SD: 3000 OK use device
-\end{verbatim}
-\normalsize
-
-For the Director to be authorized, the \lt{}Director-name\gt{} and the
-\lt{}password\gt{} must match the values in one of the Storage daemon's
-Director resources (there may be several Directors that can access a single
-Storage daemon).
-
-\section{The Protocol Used Between the Director and the File Daemon}
-\index{Daemon!Protocol Used Between the Director and the File }
-\index{Protocol Used Between the Director and the File Daemon }
-\addcontentsline{toc}{subsection}{Protocol Used Between the Director and the
-File Daemon}
-
-A typical conversation might look like the following:
-
-\footnotesize
-\begin{verbatim}
-FD: listens
-DR: makes connection
-DR: Hello <Director-name> calling <password>
-FD: 2000 OK Hello
-DR: JobId=nnn Authorization=<password>
-FD: 2000 OK Job
-DR: storage address = <Storage daemon address> port = <port-number>
- name = <DeviceName> mediatype = <MediaType>
-FD: 2000 OK storage
-DR: include
-DR: <directory1>
-DR: <directory2>
- ...
-DR: Null packet
-FD: 2000 OK include
-DR: exclude
-DR: <directory1>
-DR: <directory2>
- ...
-DR: Null packet
-FD: 2000 OK exclude
-DR: full
-FD: 2000 OK full
-DR: save
-FD: 2000 OK save
-FD: Attribute record for each file as sent to the
- Storage daemon (described above).
-FD: Null packet
-FD: <append close responses from Storage daemon>
- e.g.
- 3000 OK Volumes = <number of volumes>
- 3001 Volume = <volume-id> <start file> <start block>
- <end file> <end block> <volume session-id>
- 3002 Volume data = <date/time of last write> <Number bytes written>
- <number errors>
- ... additional Volume / Volume data pairs for volumes 2 .. n
-FD: Null packet
-FD: close socket
-\end{verbatim}
-\normalsize
-
-\section{The Save Protocol Between the File Daemon and the Storage Daemon}
-\index{Save Protocol Between the File Daemon and the Storage Daemon }
-\index{Daemon!Save Protocol Between the File Daemon and the Storage }
-\addcontentsline{toc}{subsection}{Save Protocol Between the File Daemon and
-the Storage Daemon}
-
-Once the Director has send a {\bf save} command to the File daemon, the File
-daemon will contact the Storage daemon to begin the save.
-
-In what follows: FD: refers to information set via the network from the File
-daemon to the Storage daemon, and SD: refers to information set from the
-Storage daemon to the File daemon.
-
-\subsection{Command and Control Information}
-\index{Information!Command and Control }
-\index{Command and Control Information }
-\addcontentsline{toc}{subsubsection}{Command and Control Information}
-
-Command and control information is exchanged in human readable ASCII commands.
-
-
-\footnotesize
-\begin{verbatim}
-FD: listens
-SD: makes connection
-FD: append open session = <JobId> [<password>]
-SD: 3000 OK ticket = <number>
-FD: append data <ticket-number>
-SD: 3000 OK data address = <IPaddress> port = <port>
-\end{verbatim}
-\normalsize
-
-\subsection{Data Information}
-\index{Information!Data }
-\index{Data Information }
-\addcontentsline{toc}{subsubsection}{Data Information}
-
-The Data information consists of the file attributes and data to the Storage
-daemon. For the most part, the data information is sent one way: from the File
-daemon to the Storage daemon. This allows the File daemon to transfer
-information as fast as possible without a lot of handshaking and network
-overhead.
-
-However, from time to time, the File daemon needs to do a sort of checkpoint
-of the situation to ensure that everything is going well with the Storage
-daemon. To do so, the File daemon sends a packet with a negative length
-indicating that he wishes the Storage daemon to respond by sending a packet of
-information to the File daemon. The File daemon then waits to receive a packet
-from the Storage daemon before continuing.
-
-All data sent are in binary format except for the header packet, which is in
-ASCII. There are two packet types used data transfer mode: a header packet,
-the contents of which are known to the Storage daemon, and a data packet, the
-contents of which are never examined by the Storage daemon.
-
-The first data packet to the Storage daemon will be an ASCII header packet
-consisting of the following data.
-
-\lt{}File-Index\gt{} \lt{}Stream-Id\gt{} \lt{}Info\gt{} where {\bf
-\lt{}File-Index\gt{}} is a sequential number beginning from one that
-increments with each file (or directory) sent.
-
-where {\bf \lt{}Stream-Id\gt{}} will be 1 for the Attributes record and 2 for
-uncompressed File data. 3 is reserved for the MD5 signature for the file.
-
-where {\bf \lt{}Info\gt{}} transmit information about the Stream to the
-Storage Daemon. It is a character string field where each character has a
-meaning. The only character currently defined is 0 (zero), which is simply a
-place holder (a no op). In the future, there may be codes indicating
-compressed data, encrypted data, etc.
-
-Immediately following the header packet, the Storage daemon will expect any
-number of data packets. The series of data packets is terminated by a zero
-length packet, which indicates to the Storage daemon that the next packet will
-be another header packet. As previously mentioned, a negative length packet is
-a request for the Storage daemon to temporarily enter command mode and send a
-reply to the File daemon. Thus an actual conversation might contain the
-following exchanges:
-
-\footnotesize
-\begin{verbatim}
-FD: <1 1 0> (header packet)
-FD: <data packet containing file-attributes>
-FD: Null packet
-FD: <1 2 0>
-FD: <multiple data packets containing the file data>
-FD: Packet length = -1
-SD: 3000 OK
-FD: <2 1 0>
-FD: <data packet containing file-attributes>
-FD: Null packet
-FD: <2 2 0>
-FD: <multiple data packets containing the file data>
-FD: Null packet
-FD: Null packet
-FD: append end session <ticket-number>
-SD: 3000 OK end
-FD: append close session <ticket-number>
-SD: 3000 OK Volumes = <number of volumes>
-SD: 3001 Volume = <volumeid> <start file> <start block>
- <end file> <end block> <volume session-id>
-SD: 3002 Volume data = <date/time of last write> <Number bytes written>
- <number errors>
-SD: ... additional Volume / Volume data pairs for
- volumes 2 .. n
-FD: close socket
-\end{verbatim}
-\normalsize
-
-The information returned to the File daemon by the Storage daemon in response
-to the {\bf append close session} is transmit in turn to the Director.
+++ /dev/null
-/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */
-.MATH { font-family: "Century Schoolbook", serif; }
-.MATH I { font-family: "Century Schoolbook", serif; font-style: italic }
-.BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold }
-
-/* implement both fixed-size and relative sizes */
-SMALL.XTINY { font-size : xx-small }
-SMALL.TINY { font-size : x-small }
-SMALL.SCRIPTSIZE { font-size : smaller }
-SMALL.FOOTNOTESIZE { font-size : small }
-SMALL.SMALL { }
-BIG.LARGE { }
-BIG.XLARGE { font-size : large }
-BIG.XXLARGE { font-size : x-large }
-BIG.HUGE { font-size : larger }
-BIG.XHUGE { font-size : xx-large }
-
-/* heading styles */
-H1 { }
-H2 { }
-H3 { }
-H4 { }
-H5 { }
-
-/* mathematics styles */
-DIV.displaymath { } /* math displays */
-TD.eqno { } /* equation-number cells */
-
-
-/* document-specific styles come next */
+++ /dev/null
-%%
-%%
-
-\documentclass[11pt,a4paper]{report}
-\usepackage{html}
-\usepackage{float}
-\usepackage{graphicx}
-\usepackage{bacula}
-\usepackage{longtable}
-\usepackage{makeidx}
-\usepackage{index}
-\usepackage{setspace}
-\usepackage{hyperref}
-\usepackage{url}
-
-
-\makeindex
-\newindex{general}{idx}{ind}{General Index}
-
-\sloppy
-
-\begin{document}
-\sloppy
-
-\newfont{\bighead}{cmr17 at 36pt}
-\parskip 10pt
-\parindent 0pt
-
-\title{\includegraphics{./bacula-logo.eps} \\ \bigskip
- \Huge{Developers' Guide}
- \begin{center}
- \large{It comes in the night and sucks
- the essence from your computers. }
- \end{center}
-}
-
-
-\author{Kern Sibbald}
-\date{\vspace{1.0in}\today \\
- This manual documents Bacula version \input{version} \\
- \vspace{0.2in}
- Copyright \copyright 1999-2007, Free Software Foundation Europe
- e.V. \\
- \vspace{0.2in}
- Permission is granted to copy, distribute and/or modify this document under the terms of the
- GNU Free Documentation License, Version 1.2 published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
- A copy of the license is included in the section entitled "GNU Free Documentation License".
-}
-
-
-\maketitle
-
-\clearpage
-\tableofcontents
-\clearpage
-\listoffigures
-\clearpage
-\listoftables
-\clearpage
-
-\include{generaldevel}
-\include{platformsupport}
-\include{daemonprotocol}
-\include{director}
-\include{file}
-\include{storage}
-\include{catalog}
-\include{mediaformat}
-\include{porting}
-\include{gui-interface}
-\include{tls-techdoc}
-\include{regression}
-\include{md5}
-\include{mempool}
-\include{netprotocol}
-\include{smartall}
-\include{fdl}
-
-
-% The following line tells link_resolver.pl to not include these files:
-% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main
-
-% pull in the index
-\clearpage
-\printindex
-
-\end{document}
+++ /dev/null
-%%
-%%
-
-\chapter{Director Services Daemon}
-\label{_ChapterStart6}
-\index{Daemon!Director Services }
-\index{Director Services Daemon }
-\addcontentsline{toc}{section}{Director Services Daemon}
-
-This chapter is intended to be a technical discussion of the Director services
-and as such is not targeted at end users but rather at developers and system
-administrators that want or need to know more of the working details of {\bf
-Bacula}.
-
-The {\bf Bacula Director} services consist of the program that supervises all
-the backup and restore operations.
-
-To be written ...
+++ /dev/null
-%---------The file header---------------------------------------------
-
-%% \usepackage[english]{babel} %language selection
-%% \usepackage[T1]{fontenc}
-
-%%\pagenumbering{arabic}
-
-%% \usepackage{hyperref}
-%% \hypersetup{colorlinks,
-%% citecolor=black,
-%% filecolor=black,
-%% linkcolor=black,
-%% urlcolor=black,
-%% pdftex}
-
-
-%---------------------------------------------------------------------
-\chapter{GNU Free Documentation License}
-\index[general]{GNU ree Documentation License}
-\index[general]{License!GNU ree Documentation}
-\addcontentsline{toc}{section}{GNU ree Documentation License}
-
-%\label{label_fdl}
-
- \begin{center}
-
- Version 1.2, November 2002
-
-
- Copyright \copyright 2000,2001,2002 Free Software Foundation, Inc.
-
- \bigskip
-
- 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
- \bigskip
-
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-\end{center}
-
-
-\begin{center}
-{\bf\large Preamble}
-\end{center}
-
-The purpose of this License is to make a manual, textbook, or other
-functional and useful document "free" in the sense of freedom: to
-assure everyone the effective freedom to copy and redistribute it,
-with or without modifying it, either commercially or noncommercially.
-Secondarily, this License preserves for the author and publisher a way
-to get credit for their work, while not being considered responsible
-for modifications made by others.
-
-This License is a kind of "copyleft", which means that derivative
-works of the document must themselves be free in the same sense. It
-complements the GNU General Public License, which is a copyleft
-license designed for free software.
-
-We have designed this License in order to use it for manuals for free
-software, because free software needs free documentation: a free
-program should come with manuals providing the same freedoms that the
-software does. But this License is not limited to software manuals;
-it can be used for any textual work, regardless of subject matter or
-whether it is published as a printed book. We recommend this License
-principally for works whose purpose is instruction or reference.
-
-
-\begin{center}
-{\Large\bf 1. APPLICABILITY AND DEFINITIONS}
-\addcontentsline{toc}{section}{1. APPLICABILITY AND DEFINITIONS}
-\end{center}
-
-This License applies to any manual or other work, in any medium, that
-contains a notice placed by the copyright holder saying it can be
-distributed under the terms of this License. Such a notice grants a
-world-wide, royalty-free license, unlimited in duration, to use that
-work under the conditions stated herein. The \textbf{"Document"}, below,
-refers to any such manual or work. Any member of the public is a
-licensee, and is addressed as \textbf{"you"}. You accept the license if you
-copy, modify or distribute the work in a way requiring permission
-under copyright law.
-
-A \textbf{"Modified Version"} of the Document means any work containing the
-Document or a portion of it, either copied verbatim, or with
-modifications and/or translated into another language.
-
-A \textbf{"Secondary Section"} is a named appendix or a front-matter section of
-the Document that deals exclusively with the relationship of the
-publishers or authors of the Document to the Document's overall subject
-(or to related matters) and contains nothing that could fall directly
-within that overall subject. (Thus, if the Document is in part a
-textbook of mathematics, a Secondary Section may not explain any
-mathematics.) The relationship could be a matter of historical
-connection with the subject or with related matters, or of legal,
-commercial, philosophical, ethical or political position regarding
-them.
-
-The \textbf{"Invariant Sections"} are certain Secondary Sections whose titles
-are designated, as being those of Invariant Sections, in the notice
-that says that the Document is released under this License. If a
-section does not fit the above definition of Secondary then it is not
-allowed to be designated as Invariant. The Document may contain zero
-Invariant Sections. If the Document does not identify any Invariant
-Sections then there are none.
-
-The \textbf{"Cover Texts"} are certain short passages of text that are listed,
-as Front-Cover Texts or Back-Cover Texts, in the notice that says that
-the Document is released under this License. A Front-Cover Text may
-be at most 5 words, and a Back-Cover Text may be at most 25 words.
-
-A \textbf{"Transparent"} copy of the Document means a machine-readable copy,
-represented in a format whose specification is available to the
-general public, that is suitable for revising the document
-straightforwardly with generic text editors or (for images composed of
-pixels) generic paint programs or (for drawings) some widely available
-drawing editor, and that is suitable for input to text formatters or
-for automatic translation to a variety of formats suitable for input
-to text formatters. A copy made in an otherwise Transparent file
-format whose markup, or absence of markup, has been arranged to thwart
-or discourage subsequent modification by readers is not Transparent.
-An image format is not Transparent if used for any substantial amount
-of text. A copy that is not "Transparent" is called \textbf{"Opaque"}.
-
-Examples of suitable formats for Transparent copies include plain
-ASCII without markup, Texinfo input format, LaTeX input format, SGML
-or XML using a publicly available DTD, and standard-conforming simple
-HTML, PostScript or PDF designed for human modification. Examples of
-transparent image formats include PNG, XCF and JPG. Opaque formats
-include proprietary formats that can be read and edited only by
-proprietary word processors, SGML or XML for which the DTD and/or
-processing tools are not generally available, and the
-machine-generated HTML, PostScript or PDF produced by some word
-processors for output purposes only.
-
-The \textbf{"Title Page"} means, for a printed book, the title page itself,
-plus such following pages as are needed to hold, legibly, the material
-this License requires to appear in the title page. For works in
-formats which do not have any title page as such, "Title Page" means
-the text near the most prominent appearance of the work's title,
-preceding the beginning of the body of the text.
-
-A section \textbf{"Entitled XYZ"} means a named subunit of the Document whose
-title either is precisely XYZ or contains XYZ in parentheses following
-text that translates XYZ in another language. (Here XYZ stands for a
-specific section name mentioned below, such as \textbf{"Acknowledgements"},
-\textbf{"Dedications"}, \textbf{"Endorsements"}, or \textbf{"History"}.)
-To \textbf{"Preserve the Title"}
-of such a section when you modify the Document means that it remains a
-section "Entitled XYZ" according to this definition.
-
-The Document may include Warranty Disclaimers next to the notice which
-states that this License applies to the Document. These Warranty
-Disclaimers are considered to be included by reference in this
-License, but only as regards disclaiming warranties: any other
-implication that these Warranty Disclaimers may have is void and has
-no effect on the meaning of this License.
-
-
-\begin{center}
-{\Large\bf 2. VERBATIM COPYING}
-\addcontentsline{toc}{section}{2. VERBATIM COPYING}
-\end{center}
-
-You may copy and distribute the Document in any medium, either
-commercially or noncommercially, provided that this License, the
-copyright notices, and the license notice saying this License applies
-to the Document are reproduced in all copies, and that you add no other
-conditions whatsoever to those of this License. You may not use
-technical measures to obstruct or control the reading or further
-copying of the copies you make or distribute. However, you may accept
-compensation in exchange for copies. If you distribute a large enough
-number of copies you must also follow the conditions in section 3.
-
-You may also lend copies, under the same conditions stated above, and
-you may publicly display copies.
-
-
-\begin{center}
-{\Large\bf 3. COPYING IN QUANTITY}
-\addcontentsline{toc}{section}{3. COPYING IN QUANTITY}
-\end{center}
-
-
-If you publish printed copies (or copies in media that commonly have
-printed covers) of the Document, numbering more than 100, and the
-Document's license notice requires Cover Texts, you must enclose the
-copies in covers that carry, clearly and legibly, all these Cover
-Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
-the back cover. Both covers must also clearly and legibly identify
-you as the publisher of these copies. The front cover must present
-the full title with all words of the title equally prominent and
-visible. You may add other material on the covers in addition.
-Copying with changes limited to the covers, as long as they preserve
-the title of the Document and satisfy these conditions, can be treated
-as verbatim copying in other respects.
-
-If the required texts for either cover are too voluminous to fit
-legibly, you should put the first ones listed (as many as fit
-reasonably) on the actual cover, and continue the rest onto adjacent
-pages.
-
-If you publish or distribute Opaque copies of the Document numbering
-more than 100, you must either include a machine-readable Transparent
-copy along with each Opaque copy, or state in or with each Opaque copy
-a computer-network location from which the general network-using
-public has access to download using public-standard network protocols
-a complete Transparent copy of the Document, free of added material.
-If you use the latter option, you must take reasonably prudent steps,
-when you begin distribution of Opaque copies in quantity, to ensure
-that this Transparent copy will remain thus accessible at the stated
-location until at least one year after the last time you distribute an
-Opaque copy (directly or through your agents or retailers) of that
-edition to the public.
-
-It is requested, but not required, that you contact the authors of the
-Document well before redistributing any large number of copies, to give
-them a chance to provide you with an updated version of the Document.
-
-
-\begin{center}
-{\Large\bf 4. MODIFICATIONS}
-\addcontentsline{toc}{section}{4. MODIFICATIONS}
-\end{center}
-
-You may copy and distribute a Modified Version of the Document under
-the conditions of sections 2 and 3 above, provided that you release
-the Modified Version under precisely this License, with the Modified
-Version filling the role of the Document, thus licensing distribution
-and modification of the Modified Version to whoever possesses a copy
-of it. In addition, you must do these things in the Modified Version:
-
-\begin{itemize}
-\item[A.]
- Use in the Title Page (and on the covers, if any) a title distinct
- from that of the Document, and from those of previous versions
- (which should, if there were any, be listed in the History section
- of the Document). You may use the same title as a previous version
- if the original publisher of that version gives permission.
-
-\item[B.]
- List on the Title Page, as authors, one or more persons or entities
- responsible for authorship of the modifications in the Modified
- Version, together with at least five of the principal authors of the
- Document (all of its principal authors, if it has fewer than five),
- unless they release you from this requirement.
-
-\item[C.]
- State on the Title page the name of the publisher of the
- Modified Version, as the publisher.
-
-\item[D.]
- Preserve all the copyright notices of the Document.
-
-\item[E.]
- Add an appropriate copyright notice for your modifications
- adjacent to the other copyright notices.
-
-\item[F.]
- Include, immediately after the copyright notices, a license notice
- giving the public permission to use the Modified Version under the
- terms of this License, in the form shown in the Addendum below.
-
-\item[G.]
- Preserve in that license notice the full lists of Invariant Sections
- and required Cover Texts given in the Document's license notice.
-
-\item[H.]
- Include an unaltered copy of this License.
-
-\item[I.]
- Preserve the section Entitled "History", Preserve its Title, and add
- to it an item stating at least the title, year, new authors, and
- publisher of the Modified Version as given on the Title Page. If
- there is no section Entitled "History" in the Document, create one
- stating the title, year, authors, and publisher of the Document as
- given on its Title Page, then add an item describing the Modified
- Version as stated in the previous sentence.
-
-\item[J.]
- Preserve the network location, if any, given in the Document for
- public access to a Transparent copy of the Document, and likewise
- the network locations given in the Document for previous versions
- it was based on. These may be placed in the "History" section.
- You may omit a network location for a work that was published at
- least four years before the Document itself, or if the original
- publisher of the version it refers to gives permission.
-
-\item[K.]
- For any section Entitled "Acknowledgements" or "Dedications",
- Preserve the Title of the section, and preserve in the section all
- the substance and tone of each of the contributor acknowledgements
- and/or dedications given therein.
-
-\item[L.]
- Preserve all the Invariant Sections of the Document,
- unaltered in their text and in their titles. Section numbers
- or the equivalent are not considered part of the section titles.
-
-\item[M.]
- Delete any section Entitled "Endorsements". Such a section
- may not be included in the Modified Version.
-
-\item[N.]
- Do not retitle any existing section to be Entitled "Endorsements"
- or to conflict in title with any Invariant Section.
-
-\item[O.]
- Preserve any Warranty Disclaimers.
-\end{itemize}
-
-If the Modified Version includes new front-matter sections or
-appendices that qualify as Secondary Sections and contain no material
-copied from the Document, you may at your option designate some or all
-of these sections as invariant. To do this, add their titles to the
-list of Invariant Sections in the Modified Version's license notice.
-These titles must be distinct from any other section titles.
-
-You may add a section Entitled "Endorsements", provided it contains
-nothing but endorsements of your Modified Version by various
-parties--for example, statements of peer review or that the text has
-been approved by an organization as the authoritative definition of a
-standard.
-
-You may add a passage of up to five words as a Front-Cover Text, and a
-passage of up to 25 words as a Back-Cover Text, to the end of the list
-of Cover Texts in the Modified Version. Only one passage of
-Front-Cover Text and one of Back-Cover Text may be added by (or
-through arrangements made by) any one entity. If the Document already
-includes a cover text for the same cover, previously added by you or
-by arrangement made by the same entity you are acting on behalf of,
-you may not add another; but you may replace the old one, on explicit
-permission from the previous publisher that added the old one.
-
-The author(s) and publisher(s) of the Document do not by this License
-give permission to use their names for publicity for or to assert or
-imply endorsement of any Modified Version.
-
-
-\begin{center}
-{\Large\bf 5. COMBINING DOCUMENTS}
-\addcontentsline{toc}{section}{5. COMBINING DOCUMENTS}
-\end{center}
-
-
-You may combine the Document with other documents released under this
-License, under the terms defined in section 4 above for modified
-versions, provided that you include in the combination all of the
-Invariant Sections of all of the original documents, unmodified, and
-list them all as Invariant Sections of your combined work in its
-license notice, and that you preserve all their Warranty Disclaimers.
-
-The combined work need only contain one copy of this License, and
-multiple identical Invariant Sections may be replaced with a single
-copy. If there are multiple Invariant Sections with the same name but
-different contents, make the title of each such section unique by
-adding at the end of it, in parentheses, the name of the original
-author or publisher of that section if known, or else a unique number.
-Make the same adjustment to the section titles in the list of
-Invariant Sections in the license notice of the combined work.
-
-In the combination, you must combine any sections Entitled "History"
-in the various original documents, forming one section Entitled
-"History"; likewise combine any sections Entitled "Acknowledgements",
-and any sections Entitled "Dedications". You must delete all sections
-Entitled "Endorsements".
-
-\begin{center}
-{\Large\bf 6. COLLECTIONS OF DOCUMENTS}
-\addcontentsline{toc}{section}{6. COLLECTIONS OF DOCUMENTS}
-\end{center}
-
-You may make a collection consisting of the Document and other documents
-released under this License, and replace the individual copies of this
-License in the various documents with a single copy that is included in
-the collection, provided that you follow the rules of this License for
-verbatim copying of each of the documents in all other respects.
-
-You may extract a single document from such a collection, and distribute
-it individually under this License, provided you insert a copy of this
-License into the extracted document, and follow this License in all
-other respects regarding verbatim copying of that document.
-
-
-\begin{center}
-{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS}
-\addcontentsline{toc}{section}{7. AGGREGATION WITH INDEPENDENT WORKS}
-\end{center}
-
-
-A compilation of the Document or its derivatives with other separate
-and independent documents or works, in or on a volume of a storage or
-distribution medium, is called an "aggregate" if the copyright
-resulting from the compilation is not used to limit the legal rights
-of the compilation's users beyond what the individual works permit.
-When the Document is included in an aggregate, this License does not
-apply to the other works in the aggregate which are not themselves
-derivative works of the Document.
-
-If the Cover Text requirement of section 3 is applicable to these
-copies of the Document, then if the Document is less than one half of
-the entire aggregate, the Document's Cover Texts may be placed on
-covers that bracket the Document within the aggregate, or the
-electronic equivalent of covers if the Document is in electronic form.
-Otherwise they must appear on printed covers that bracket the whole
-aggregate.
-
-
-\begin{center}
-{\Large\bf 8. TRANSLATION}
-\addcontentsline{toc}{section}{8. TRANSLATION}
-\end{center}
-
-
-Translation is considered a kind of modification, so you may
-distribute translations of the Document under the terms of section 4.
-Replacing Invariant Sections with translations requires special
-permission from their copyright holders, but you may include
-translations of some or all Invariant Sections in addition to the
-original versions of these Invariant Sections. You may include a
-translation of this License, and all the license notices in the
-Document, and any Warranty Disclaimers, provided that you also include
-the original English version of this License and the original versions
-of those notices and disclaimers. In case of a disagreement between
-the translation and the original version of this License or a notice
-or disclaimer, the original version will prevail.
-
-If a section in the Document is Entitled "Acknowledgements",
-"Dedications", or "History", the requirement (section 4) to Preserve
-its Title (section 1) will typically require changing the actual
-title.
-
-
-\begin{center}
-{\Large\bf 9. TERMINATION}
-\addcontentsline{toc}{section}{9. TERMINATION}
-\end{center}
-
-
-You may not copy, modify, sublicense, or distribute the Document except
-as expressly provided for under this License. Any other attempt to
-copy, modify, sublicense or distribute the Document is void, and will
-automatically terminate your rights under this License. However,
-parties who have received copies, or rights, from you under this
-License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-
-\begin{center}
-{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE}
-\addcontentsline{toc}{section}{10. FUTURE REVISIONS OF THIS LICENSE}
-\end{center}
-
-
-The Free Software Foundation may publish new, revised versions
-of the GNU Free Documentation License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns. See
-http://www.gnu.org/copyleft/.
-
-Each version of the License is given a distinguishing version number.
-If the Document specifies that a particular numbered version of this
-License "or any later version" applies to it, you have the option of
-following the terms and conditions either of that specified version or
-of any later version that has been published (not as a draft) by the
-Free Software Foundation. If the Document does not specify a version
-number of this License, you may choose any version ever published (not
-as a draft) by the Free Software Foundation.
-
-
-\begin{center}
-{\Large\bf ADDENDUM: How to use this License for your documents}
-\addcontentsline{toc}{section}{ADDENDUM: How to use this License for your documents}
-\end{center}
-
-To use this License in a document you have written, include a copy of
-the License in the document and put the following copyright and
-license notices just after the title page:
-
-\bigskip
-\begin{quote}
- Copyright \copyright YEAR YOUR NAME.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.2
- or any later version published by the Free Software Foundation;
- with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
- A copy of the license is included in the section entitled "GNU
- Free Documentation License".
-\end{quote}
-\bigskip
-
-If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
-replace the "with...Texts." line with this:
-
-\bigskip
-\begin{quote}
- with the Invariant Sections being LIST THEIR TITLES, with the
- Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
-\end{quote}
-\bigskip
-
-If you have Invariant Sections without Cover Texts, or some other
-combination of the three, merge those two alternatives to suit the
-situation.
-
-If your document contains nontrivial examples of program code, we
-recommend releasing these examples in parallel under your choice of
-free software license, such as the GNU General Public License,
-to permit their use in free software.
-
-%---------------------------------------------------------------------
+++ /dev/null
-%%
-%%
-
-\chapter{File Services Daemon}
-\label{_ChapterStart11}
-\index{File Services Daemon }
-\index{Daemon!File Services }
-\addcontentsline{toc}{section}{File Services Daemon}
-
-Please note, this section is somewhat out of date as the code has evolved
-significantly. The basic idea has not changed though.
-
-This chapter is intended to be a technical discussion of the File daemon
-services and as such is not targeted at end users but rather at developers and
-system administrators that want or need to know more of the working details of
-{\bf Bacula}.
-
-The {\bf Bacula File Services} consist of the programs that run on the system
-to be backed up and provide the interface between the Host File system and
-Bacula -- in particular, the Director and the Storage services.
-
-When time comes for a backup, the Director gets in touch with the File daemon
-on the client machine and hands it a set of ``marching orders'' which, if
-written in English, might be something like the following:
-
-OK, {\bf File daemon}, it's time for your daily incremental backup. I want you
-to get in touch with the Storage daemon on host archive.mysite.com and perform
-the following save operations with the designated options. You'll note that
-I've attached include and exclude lists and patterns you should apply when
-backing up the file system. As this is an incremental backup, you should save
-only files modified since the time you started your last backup which, as you
-may recall, was 2000-11-19-06:43:38. Please let me know when you're done and
-how it went. Thank you.
-
-So, having been handed everything it needs to decide what to dump and where to
-store it, the File daemon doesn't need to have any further contact with the
-Director until the backup is complete providing there are no errors. If there
-are errors, the error messages will be delivered immediately to the Director.
-While the backup is proceeding, the File daemon will send the file coordinates
-and data for each file being backed up to the Storage daemon, which will in
-turn pass the file coordinates to the Director to put in the catalog.
-
-During a {\bf Verify} of the catalog, the situation is different, since the
-File daemon will have an exchange with the Director for each file, and will
-not contact the Storage daemon.
-
-A {\bf Restore} operation will be very similar to the {\bf Backup} except that
-during the {\bf Restore} the Storage daemon will not send storage coordinates
-to the Director since the Director presumably already has them. On the other
-hand, any error messages from either the Storage daemon or File daemon will
-normally be sent directly to the Directory (this, of course, depends on how
-the Message resource is defined).
-
-\section{Commands Received from the Director for a Backup}
-\index{Backup!Commands Received from the Director for a }
-\index{Commands Received from the Director for a Backup }
-\addcontentsline{toc}{subsection}{Commands Received from the Director for a
-Backup}
-
-To be written ...
-
-\section{Commands Received from the Director for a Restore}
-\index{Commands Received from the Director for a Restore }
-\index{Restore!Commands Received from the Director for a }
-\addcontentsline{toc}{subsection}{Commands Received from the Director for a
-Restore}
-
-To be written ...
+++ /dev/null
-#!/usr/bin/perl -w
-# Fixes various things within tex files.
-
-use strict;
-
-my %args;
-
-
-sub get_includes {
- # Get a list of include files from the top-level tex file.
- my (@list,$file);
-
- foreach my $filename (@_) {
- $filename or next;
- # Start with the top-level latex file so it gets checked too.
- push (@list,$filename);
-
- # Get a list of all the html files in the directory.
- open IF,"<$filename" or die "Cannot open input file $filename";
- while (<IF>) {
- chomp;
- push @list,"$1.tex" if (/\\include\{(.*?)\}/);
- }
-
- close IF;
- }
- return @list;
-}
-
-sub convert_files {
- my (@files) = @_;
- my ($linecnt,$filedata,$output,$itemcnt,$indentcnt,$cnt);
-
- $cnt = 0;
- foreach my $file (@files) {
- # Open the file and load the whole thing into $filedata. A bit wasteful but
- # easier to deal with, and we don't have a problem with speed here.
- $filedata = "";
- open IF,"<$file" or die "Cannot open input file $file";
- while (<IF>) {
- $filedata .= $_;
- }
- close IF;
-
- # We look for a line that starts with \item, and indent the two next lines (if not blank)
- # by three spaces.
- my $linecnt = 3;
- $indentcnt = 0;
- $output = "";
- # Process a line at a time.
- foreach (split(/\n/,$filedata)) {
- $_ .= "\n"; # Put back the return.
- # If this line is less than the third line past the \item command,
- # and the line isn't blank and doesn't start with whitespace
- # add three spaces to the start of the line. Keep track of the number
- # of lines changed.
- if ($linecnt < 3 and !/^\\item/) {
- if (/^[^\n\s]/) {
- $output .= " " . $_;
- $indentcnt++;
- } else {
- $output .= $_;
- }
- $linecnt++;
- } else {
- $linecnt = 3;
- $output .= $_;
- }
- /^\\item / and $linecnt = 1;
- }
-
-
- # This is an item line. We need to process it too. If inside a \begin{description} environment, convert
- # \item {\bf xxx} to \item [xxx] or \item [{xxx}] (if xxx contains '[' or ']'.
- $itemcnt = 0;
- $filedata = $output;
- $output = "";
- my ($before,$descrip,$this,$between);
-
- # Find any \begin{description} environment
- while ($filedata =~ /(\\begin[\s\n]*\{[\s\n]*description[\s\n]*\})(.*?)(\\end[\s\n]*\{[\s\n]*description[\s\n]*\})/s) {
- $output .= $` . $1;
- $filedata = $3 . $';
- $descrip = $2;
-
- # Search for \item {\bf xxx}
- while ($descrip =~ /\\item[\s\n]*\{[\s\n]*\\bf[\s\n]*/s) {
- $descrip = $';
- $output .= $`;
- ($between,$descrip) = find_matching_brace($descrip);
- if (!$descrip) {
- $linecnt = $output =~ tr/\n/\n/;
- print STDERR "Missing matching curly brace at line $linecnt in $file\n" if (!$descrip);
- }
-
- # Now do the replacement.
- $between = '{' . $between . '}' if ($between =~ /\[|\]/);
- $output .= "\\item \[$between\]";
- $itemcnt++;
- }
- $output .= $descrip;
- }
- $output .= $filedata;
-
- # If any hyphens or \item commnads were converted, save the file.
- if ($indentcnt or $itemcnt) {
- open OF,">$file" or die "Cannot open output file $file";
- print OF $output;
- close OF;
- print "$indentcnt indent", ($indentcnt == 1) ? "" : "s"," added in $file\n";
- print "$itemcnt item", ($itemcnt == 1) ? "" : "s"," Changed in $file\n";
- }
-
- $cnt += $indentcnt + $itemcnt;
- }
- return $cnt;
-}
-
-sub find_matching_brace {
- # Finds text up to the next matching brace. Assumes that the input text doesn't contain
- # the opening brace, but we want to find text up to a matching closing one.
- # Returns the text between the matching braces, followed by the rest of the text following
- # (which does not include the matching brace).
- #
- my $str = shift;
- my ($this,$temp);
- my $cnt = 1;
-
- while ($cnt) {
- # Ignore verbatim constructs involving curly braces, or if the character preceding
- # the curly brace is a backslash.
- if ($str =~ /\\verb\*?\{.*?\{|\\verb\*?\}.*?\}|\{|\}/s) {
- $this .= $`;
- $str = $';
- $temp = $&;
-
- if ((substr($this,-1,1) eq '\\') or
- $temp =~ /^\\verb/) {
- $this .= $temp;
- next;
- }
-
- $cnt += ($temp eq '{') ? 1 : -1;
- # If this isn't the matching curly brace ($cnt > 0), include the brace.
- $this .= $temp if ($cnt);
- } else {
- # No matching curly brace found.
- return ($this . $str,'');
- }
- }
- return ($this,$str);
-}
-
-sub check_arguments {
- # Checks command-line arguments for ones starting with -- puts them into
- # a hash called %args and removes them from @ARGV.
- my $args = shift;
- my $i;
-
- for ($i = 0; $i < $#ARGV; $i++) {
- $ARGV[$i] =~ /^\-+/ or next;
- $ARGV[$i] =~ s/^\-+//;
- $args{$ARGV[$i]} = "";
- delete ($ARGV[$i]);
-
- }
-}
-
-##################################################################
-# MAIN ####
-##################################################################
-
-my @includes;
-my $cnt;
-
-check_arguments(\%args);
-die "No Files given to Check\n" if ($#ARGV < 0);
-
-# Examine the file pointed to by the first argument to get a list of
-# includes to test.
-@includes = get_includes(@ARGV);
-
-$cnt = convert_files(@includes);
-print "No lines changed\n" unless $cnt;
+++ /dev/null
-%%
-%%
-
-\chapter{Bacula Developer Notes}
-\label{_ChapterStart10}
-\index{Bacula Developer Notes}
-\index{Notes!Bacula Developer}
-\addcontentsline{toc}{section}{Bacula Developer Notes}
-
-\section{General}
-\index{General}
-\addcontentsline{toc}{subsection}{General}
-
-This document is intended mostly for developers and describes the the general
-framework of making Bacula source changes.
-
-\subsection{Contributions}
-\index{Contributions}
-\addcontentsline{toc}{subsubsection}{Contributions}
-
-Contributions from programmers are broken into two groups. The first are
-contributions that are aids and not essential to Bacula. In general, these
-will be scripts or will go into and examples or contributions directory.
-For these kinds of non-essential contributions there is no obligation to do
-a copyright assignment as described below. However, a copyright assignment
-would still be appreciated.
-
-The second class of contributions are those which will be integrated with
-Bacula and become an essential part. Within this class of contributions, there
-are two hurdles to surmount. One is getting your patch accepted, and two is
-dealing with copyright issues. The following text describes some of the
-requirements for such code.
-
-\subsection{Patches}
-\index{Patches}
-\addcontentsline{toc}{subsubsection}{Patches}
-
-Subject to the copyright assignment described below, your patches should be
-sent in {\bf diff -u} format relative to the current contents of the Source
-Forge SVN, which is the easiest to understand and integrate.
-Please be sure to use the Bacula indenting standard (see below).
-If you have checked out the source with SVN, you can get a diff using:
-
-\begin{verbatim}
-svn update
-svn diff > change.patch
-\end{verbatim}
-
-If you plan on doing significant development work over a period of time,
-after having your first patch reviewed and approved, you will be eligible
-for having developer SVN access so that you can commit your changes
-directly to the SVN repository. To do so, you will need a userid on Source
-Forge.
-
-\subsection{Copyrights}
-\index{Copyrights}
-\addcontentsline{toc}{subsubsection}{Copyrights}
-
-To avoid future problems concerning changing licensing or
-copyrights, all code contributions more than a hand full of lines
-must be in the Public Domain or have the copyright transferred to
-the Free Software Foundation Europe e.V. with a Fiduciary License
-Agreement (FLA) as in the current code. Note, prior to
-November 2004, the code was copyrighted by Kern Sibbald and John
-Walker. After November 2004, the code was copyrighted by Kern
-Sibbald, then on the 15th of November 2006, the copyright was
-transferred to the Free Software Foundation Europe e.V.
-
-Your name should be clearly indicated as the author of the code, and you
-must be extremely careful not to violate any copyrights or use other
-people's code without acknowledging it. The purpose of this requirement is
-to avoid future copyright, patent, or intellectual property problems.
-Please read the LICENSE agreement in the main source code
-directory. When you sign the Fiduciary License Agreement (FLA)
-and send it in, you are argeeing to the terms of that LICENSE
-file.
-
-To understand the possible source of future problems, please
-examine the difficulties Mozilla is (was?) having finding
-previous contributors at \elink{
-http://www.mozilla.org/MPL/missing.html}
-{http://www.mozilla.org/MPL/missing.html}. The other important issue is to
-avoid copyright, patent, or intellectual property violations as are currently
-(May 2003) being claimed by SCO against IBM.
-
-Although the copyright will be held by the Free Software
-Foundation Europe e.V., each developer is expected to indicate
-that he wrote and/or modified a particular module (or file) and
-any other sources. The copyright assignment may seem a bit
-unusual, but in reality, it is not. Most large projects require
-this.
-
-If you have any doubts about this, please don't hesitate to ask. The
-objective is to assure the long term servival of the Bacula project.
-
-Items not needing a copyright assignment are: most small changes,
-enhancements, or bug fixes of 5-10 lines of code, which amount to
-less than 20% of any particular file.
-
-\subsection{Copyright Assignment -- Fiduciary License Agreement}
-\index{Copyright Assignment}
-\index{Assignment!Copyright}
-\addcontentsline{toc}{subsubsection}{Copyright Assignment -- Fiduciary License Agreement}
-
-Since this is not a commercial enterprise, and we prefer to believe in
-everyone's good faith, previously developers could assign the copyright by
-explicitly acknowledging that they do so in their first submission. This
-was sufficient if the developer is independent, or an employee of a
-not-for-profit organization or a university. However, in an effort to
-ensure that the Bacula code is really clean, beginning in August 2006, all
-previous and future developers with SVN access will be asked to submit a
-copyright assignment (or Fiduciary License Agreement -- FLA),
-which means you agree to the LICENSE in the main source
-directory. It also means that you receive back the right to use
-the code that you have submitted.
-
-Any developer who wants to contribute and is employed by a company should
-either list the employer as the owner of the code, or get
-explicit permission from him to sign the copyright assignment.
-This is because in many
-countries, all work that an employee does whether on company time or in the
-employee's free time is considered to be Intellectual Property of the
-company. Obtaining official approval or an FLA from the company will avoid
-misunderstandings between the employee, the company, and the Bacula
-project. A good number of companies have already followed this procedure.
-
-The Fiduciary License Agreement is posted on the Bacula web site at:
-\elink{http://www.bacula.org/FLA-bacula.en.pdf}{http://www.bacula.org/FLA-bacula.en.pdf}
-
-The instructions for filling out this agreement are also at:
-\elink{http://www.bacula.org/?page=fsfe}{http://www.bacula.org/?page=fsfe}
-
-It should be filled out, then sent to:
-
-\begin{verbatim}
- Free Software Foundation Europe
- Freedom Task Force
- Sumatrastrasse 25
- 8006 Zürich
- Switzerland
-\end{verbatim}
-
-Please note that the above address is different from the officially
-registered office mentioned in the document. When you send in such a
-complete document, please notify me: kern at sibbald dot com.
-
-
-
-\section{The Development Cycle}
-\index{Developement Cycle}
-\index{Cycle!Developement}
-\addcontentsline{toc}{subsubsection}{Development Cycle}
-
-As I noted in the 1.38 ReleaseNotes, version 1.38 was different from prior
-versions because it had a lot more contributions. I expect that this trend
-will continue. As a consequence, I am going to modify how I normally do
-development, and instead of making a list of all the features that I will
-implement in the next version, I will personally sign up for one (maybe
-two) projects at a time, and when they are complete, I will release a new
-version.
-
-The difference is that I will have more time to review the new code that is
-being contributed, and will be able to devote more time to a smaller number
-of projects (1.38 had too many new features for me to handle correctly).
-
-I expect that future release schedules will be much the same, and the
-number of new features will also be much the same providing that the
-contributions continue to come -- and they show no signs of let up :-)
-
-\index{Feature Requests}
-{\bf Feature Requests:} \\
-In addition, I would like to "formalize" the feature requests a bit.
-
-Instead of me maintaining an informal list of everything I run into
-(kernstodo), I would like to maintain a "formal" list of projects. This
-means that all new feature requests, including those recently discussed on
-the email lists, must be formally submitted and approved.
-
-Formal submission of feature requests will take two forms: \\
-1. non-mandatory, but highly recommended is to discuss proposed new features
-on the mailing list.\\
-2. Formal submission of an Feature Request in a special format.
-I'll give an example of this below, but you can also find it on the web
-site under "Support -\gt{} Feature Requests". Since it takes a bit of time to
-properly fill out a Feature Request form, you probably should check on the email list
-first.
-
-Once the Feature Request is received by the keeper of the projects list, it
-will be sent to me, and I will either accept it, send it back
-asking for clarification, send it to the email list asking for opinions, or
-reject it.
-
-If it is accepted, it will go in the "projects" file (a simple ASCII file)
-maintained in the main Bacula source directory.
-
-{\bf Implementation of Feature Requests:}\\
-Any qualified developer can sign up for a project. The project must have
-an entry in the projects file, and the developer's name will appear in the
-Status field.
-
-{\bf How Feature Requests are accepted:}\\
-Acceptance of Feature Requests depends on several things: \\
-1. feedback from users. If it is negative, the Feature Request will probably not be
-accepted. \\
-2. the difficulty of the project. A project that is so
-difficult that I cannot imagine finding someone to implement probably won't
-be accepted. \\
- 3. whether or not the Feature Request fits within the
-current stategy of Bacula (for example an Feature Request that requests changing the
-tape to tar format would not be accepted, ...)
-
-{\bf How Feature Requests are prioritized:}\\
-Once an Feature Request is accepted, it needs to be implemented. If you
-can find a developer for it, or one signs up for implementing it, then the
-Feature Request becomes top priority (at least for that developer).
-
-Between releases of Bacula, we will generally solicit Feature Request input
-for the next version, and by way of this email, we suggest that you send
-discuss and send in your Feature Requests for the next release. Please
-verify that the Feature Request is not in the current list (attached to this email).
-
-Once users have had several weeks to submit Feature Requests, the keeper of the
-projects list will
-organize them, and request users to vote on them. This will allow fixing
-prioritizing the Feature Requests. Having a priority is one thing, but
-getting it implement is another thing -- we are hoping that the Bacula
-community will take more responsibility for assuring the implementation of
-accepted Feature Requests.
-
-Feature Request format:
-\begin{verbatim}
-============= Empty Feature Request form ===========
-Item n: One line summary ...
- Date: Date submitted
- Origin: Name and email of originator.
- Status:
-
- What: More detailed explanation ...
-
- Why: Why it is important ...
-
- Notes: Additional notes or features (omit if not used)
-============== End Feature Request form ==============
-\end{verbatim}
-
-\begin{verbatim}
-============= Example Completed Feature Request form ===========
-Item 1: Implement a Migration job type that will move the job
- data from one device to another.
- Origin: Sponsored by Riege Sofware International GmbH. Contact:
- Daniel Holtkamp <holtkamp at riege dot com>
- Date: 28 October 2005
- Status: Partially coded in 1.37 -- much more to do. Assigned to
- Kern.
-
- What: The ability to copy, move, or archive data that is on a
- device to another device is very important.
-
- Why: An ISP might want to backup to disk, but after 30 days
- migrate the data to tape backup and delete it from
- disk. Bacula should be able to handle this
- automatically. It needs to know what was put where,
- and when, and what to migrate -- it is a bit like
- retention periods. Doing so would allow space to be
- freed up for current backups while maintaining older
- data on tape drives.
-
- Notes: Migration could be triggered by:
- Number of Jobs
- Number of Volumes
- Age of Jobs
- Highwater size (keep total size)
- Lowwater mark
-=================================================
-\end{verbatim}
-
-
-\section{Bacula Code Submissions and Projects}
-\index{Submissions and Projects}
-\addcontentsline{toc}{subsection}{Code Submissions and Projects}
-
-Getting code implemented in Bacula works roughly as follows:
-
-\begin{itemize}
-
-\item Kern is the project manager, but prefers not to be a "gate keeper".
- This means that the developers are expected to be self-motivated,
- and once they have experience submit directly to the SVN. However,
- it is a good idea to have your patches reviewed prior to submitting,
- and it is a bad idea to submit monster patches because no one will
- be able to properly review them. See below for more details on this.
-
-\item There are growing numbers of contributions (very good).
-
-\item Some contributions come in the form of relatively small patches,
- which Kern reviews, integrates, documents, tests, and maintains.
-
-\item All Bacula developers take full
- responsibility for writing the code, posting as patches so that I can
- review it as time permits, integrating it at an appropriate time,
- responding to my requests for tweaking it (name changes, ...),
- document it in the code, document it in the manual (even though
- their mother tongue is not English), test it, develop and commit
- regression scripts, and answer in a timely fashion all bug reports --
- even occassionally accepting additional bugs :-)
-
- This is a sustainable way of going forward with Bacula, and the
- direction that the project will be taking more and more. For
- example, in the past, we have had some very dedicated programmers
- who did major projects. However, these
- programmers due to outside obligations (job responsibilities change of
- job, school duties, ...) could not continue to maintain the code. In
- those cases, the code suffers from lack of maintenance, sometimes I
- patch it, sometimes not. In the end, the code gets dropped from the
- project (there are two such contributions that are heading in that
- direction). When ever possible, we would like to avoid this, and
- ensure a continuation of the code and a sharing of the development,
- debugging, documentation, and maintenance responsibilities.
-\end{itemize}
-
-\section{Patches for Released Versions}
-\index{Patches for Released Versions}
-\addcontentsline{toc}{subsection}{Patches for Released Versions}
-If you fix a bug in a released version, you should, unless it is
-an absolutely trivial bug, create and release a patch file for the
-bug. The procedure is as follows:
-
-Fix the bug in the branch and in the trunk.
-
-Make a patch file for the branch and add the branch patch to
-the patches directory in both the branch and the trunk.
-The name should be 2.2.4-xxx.patch where xxx is unique, in this case it can
-be "restore", e.g. 2.2.4-restore.patch. Add to the top of the
-file a brief description and instructions for applying it -- see for example
-2.2.4-poll-mount.patch. The best way to create the patch file is as
-follows:
-
-\begin{verbatim}
- (edit) 2.2.4-restore.patch
- (input description)
- (end edit)
-
- svn diff >>2.2.4-restore.patch
-\end{verbatim}
-
-check to make sure no extra junk got put into the patch file (i.e.
-it should have the patch for that bug only).
-
-If there is not a bug report on the problem, create one, then add the
-patch to the bug report.
-
-Uthen upload it to the 2.2.x release of bacula-patches.
-
-So, end the end, the patch file is:
-\begin{itemize}
-\item Attached to the bug report
-
-\item In Branch-2.2/bacula/patches/...
-
-\item In the trunk
-
-\item Loaded on Source Forge bacula-patches 2.2.x release. When
- you add it, click on the check box to send an Email so that all the
- users that are monitoring SF patches get notified.
-\end{itemize}
-
-
-
-\section{SVN Usage}
-\index{SVN Usage}
-\addcontentsline{toc}{subsection}{SVN Usage}
-
-Please note that if you are familar with CVS, SVN is very
-similar (and better), but there can be a few surprising
-differences.
-
-The *entire* Bacula SourceForge.net Subversion repository can be
-checked out through SVN with the following command:
-
-\begin{verbatim}
-svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula bacula
-\end{verbatim}
-
-With the above command, you will get everything, which is a very large
-amount of data:
-
-\begin{verbatim}
-branches/
- Branch-1.32a/
- ...
- Branch-2.0/
- import/
- vendor/
-tags/
- Release-1.1/
- ...
- Release-2.0.2/
-trunk/
- bacula/
- docs/
- gui/
- regress/
- rescue/
-\end{verbatim}
-
-Note, you should NEVER commit code to any checkout that you have
-done of a tag. All tags (e.g. Release-1.1, ... Release-2.0.2)
-should be considered read-only.
-
-You may commit code to the most recent item in
-branches (in the above the most recent one is Branch-2.0). If
-you want to commit code to an older branch, then please contact
-Kern first.
-
-You may create your own tags and/or branches, but they should
-have a name clearly distinctive from Branch-, Release-, or Beta-,
-which are official names used by the project. If you create a
-tag, then you should NEVER commit code to it, for the same
-reason noted above -- it should serve as a marker for something
-you released. If you create a branch, then you are free to
-commit to it as you wish.
-
-You may, of course, commit to the trunk.
-
-In summary:
-
-\begin{verbatim}
-branches
- Branch-nnn
-tags
- Release-nnn
- Beta-nnn
-\end{verbatim}
-
-are reserved names to be created only by the project manager (or
-with his OK), where the nnn is any sequence of numbers and
-periods (e.g. 2.0, 2.0.1, ...).
-
-In addition all tags even those that you create are read-only
-forever. Typically tags represent release points either in the
-trunc or in a branch.
-
-
-Coming back to getting source code.
-If you only want the current Bacula source code, you could use:
-
-\begin{verbatim}
-svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula bacula
-\end{verbatim}
-
-To view what is in the SVN, point your browser at the following URL:
-http://bacula.svn.sourceforge.net/viewvc/bacula/
-
-Many of the Subversion (svn) commands are almost identical to those that
-you have used for cvs, but some (such as a checkout) can have surprising
-results, so you should take a careful look at the documentation.
-
-Robert has kindly provided the following documentation on the new
-svn repository and how to use it:
-
-Here is the list of branches:
-\begin{verbatim}
- Branch-1.32a
- Branch-1.32e
- Branch-1.34.2
- Branch-1.34.5
- Branch-1.36
- Branch-1.36.1
- Branch-1.36.2
- Branch-1.38
- Branch-2.0
- import
- vendor
-\end{verbatim}
-
-The list of tags is:
-\begin{verbatim}
- Release-1.1 Release-1.19 Release-1.19a Release-1.19b
- Release-1.20 Release-1.21 Release-1.22 Release-1.23
- Release-1.23a Release-1.24 Release-1.25 Release-1.25a
- Release-1.26 Release-1.27 Release-1.27a Release-1.27b
- Release-1.27c Release-1.28 Release-1.29 Release-1.30
- Release-1.31 Release-1.31a Release-1.32 Release-1.32a
- Release-1.32b Release-1.32c Release-1.32d Release-1.32e
- Release-1.32f Release-1.32f-2 Release-1.32f-3 Release-1.32f-4
- Release-1.32f-5 Release-1.34.0 Release-1.34.1 Release-1.34.3
- Release-1.34.4 Release-1.34.5 Release-1.34.6 Release-1.35.1
- Release-1.35.2 Release-1.35.3 Release-1.35.6 Release-1.35.7
- Release-1.35.8 Release-1.36.0 Release-1.36.1 Release-1.36.2
- Release-1.36.3 Release-1.38.0 Release-1.38.1 Release-1.38.10
- Release-1.38.11 Release-1.38.2 Release-1.38.3 Release-1.38.4
- Release-1.38.5 Release-1.38.6 Release-1.38.7 Release-1.38.8
- Release-1.38.9 Release-1.8.1 Release-1.8.2 Release-1.8.3
- Release-1.8.4 Release-1.8.5 Release-1.8.6 Release-2.0.0
- Release-2.0.1 Release-2.0.2
-\end{verbatim}
-
-Here is a list of commands to get you started. The recommended book is
-"Version Control with Subversion", by Ben Collins-Sussmann,
-Brian W. Fitzpatrick, and Michael Pilato, O'Reilly. The book is
-Open Source, so it is also available on line at:
-
-\begin{verbatim}
- http://svnbook.red-bean.com
-\end{verbatim}
-
-Get a list of commands
-
-\begin{verbatim}
- svn help
-\end{verbatim}
-
-Get a help with a command
-
-\begin{verbatim}
- svn help command
-\end{verbatim}
-
-Checkout the HEAD revision of all modules from the project into the
-directory bacula-new
-
-\begin{verbatim}
- svn co https://bacula.svn.sourceforge.net/svnroot/bacula/trunk bacula.new
-\end{verbatim}
-
-Checkout the HEAD revision of the bacula module into the bacula subdirectory
-
-\begin{verbatim}
- svn checkout https://bacula.svn.sourceforge.net/svnroot/bacula/trunk/bacula
-\end{verbatim}
-
-See which files have changed in the working copy
-
-\begin{verbatim}
- svn status
-\end{verbatim}
-
-See which files are out of date
-
-\begin{verbatim}
- svn status -u
-\end{verbatim}
-
-Add a new file file.c
-
-\begin{verbatim}
- svn add file.c
-\end{verbatim}
-
-Create a new directory
-
-\begin{verbatim}
- svn mkdir newdir
-\end{verbatim}
-
-Delete an obsolete file
-
-\begin{verbatim}
- svn delete file.c
-\end{verbatim}
-
-Rename a file
-
-\begin{verbatim}
- svn move file.c newfile.c
-\end{verbatim}
-
-Move a file to a new location
-
-\begin{verbatim}
- svn move file.c ../newdir/file.c
-\end{verbatim}
-
-Copy a file retaining the original history in the new file
-
-\begin{verbatim}
- svn copy file.c newfile.c
-\end{verbatim}
-
-Update the working copy with the outstanding changes
-
-\begin{verbatim}
- svn update
-\end{verbatim}
-
-Compare working copy with the repository
-
-\begin{verbatim}
- svn diff file.c
-\end{verbatim}
-
-Commit the changes in the local working copy
-
-\begin{verbatim}
- svn commit
-\end{verbatim}
-
-Specify which files are ignored in the current directory
-
-\begin{verbatim}
- svn propedit svn:ignore .
-\end{verbatim}
-
-Mark a file to be executable
-
-\begin{verbatim}
- svn propset svn:executable '*' prog.sh
-\end{verbatim}
-
-Unmark a file as executable
-
-\begin{verbatim}
- svn propdel svn:executable prog.sh
-\end{verbatim}
-
-List a file's properties
-
-\begin{verbatim}
- svn proplist file.c
-\end{verbatim}
-
-Create a branch for a new version
-
-\begin{verbatim}
- svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/trunk \
- https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1
-\end{verbatim}
-
-Tag a version for a new release
-
-\begin{verbatim}
- svn copy https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Branch-2.1 \
- https://bacula.svn.sourceforge.net/svnroot/bacula/branches/Release-2.1
-\end{verbatim}
-
-
-Let's say you are working in the directory scripts. You would then do:
-
-\begin{verbatim}
-cd scripts
-(edit some files)
-\end{verbatim}
-
-when you are happy with your changes, you can do the following:
-
-\begin{verbatim}
-cd bacula (to your top level directory)
-svn diff my-changes.patch
-\end{verbatim}
-
-When the command is done, you can look in the file my-changes.patch
-and you will see all the changes you have made to your copy of the
-repository. Make sure that you understand all the changes that
-it reports before proceeding. If you modified files that you do
-do not want to commit to the main repository, you can simply delete
-them from your local directory, and they will be restored from the
-repository with the "svn update" that is shown below. Normally, you
-should not find changes to files that you do not want to commit, and
-if you find yourself in that position a lot, you are probably doing
-something wrong.
-
-Let's assume that now you want to commit your changes to the main
-SVN repository.
-
-First do:
-
-\begin{verbatim}
-cd bacula
-svn update
-\end{verbatim}
-
-When you do this, it will pull any changes made by other developers into
-your local copy of the repository, and it will check for conflicts. If there
-are any, it will tell you, and you will need to resolve them. The problems
-of resolving conflicts are a bit more than this document can cover, but
-you can examine the files it claims have conflicts and look for \lt{}\lt{}\lt{}\lt{}
-or look in the .rej files that it creates. If you have problems, just ask
-on the developer's list.
-
-Note, doing the above "svn update" is not absolutely necessary. There are
-times when you may be working on code and you want to commit it, but you
-explicitly do not want to move up to the latest version of the code in
-the SVN. If that is the case, you can simply skip the "svn update" and
-do the commit shown below. If the commit fails because of a conflict, it
-will tell you, and you must resolve the conflict before it will permit
-you to do the commit.
-
-Once your local copy of the repository has been updated, you can now
-commit your changes:
-
-\begin{verbatim}
-svn commit -m "Some comment about what you changed"
-\end{verbatim}
-
-or if you really only want to commit a single file, you can
-do:
-
-\begin{verbatim}
-svn commit -m "comment" scripts/file-I-edited
-\end{verbatim}
-
-Note, if you have done a build in your directory, or you have added
-other new files, the commit will update only the files that are
-actually in the repository. For example, none of the object files
-are stored in the repository, so when you do a commit, those object
-files will simply be ignored.
-
-If you want to add new files or remove files from the main SVN
-repository, and you are not experienced with SVN, please ask Kern
-to do it. If you follow the simple steps above, it is unlikely that
-you will do any damage to the repository, and if you do, it is always
-possible for us to recover, but it can be painful.
-
-If you are only working in one subdirectory of say the bacula project,
-for example, the scripts directory, you can do your commit from
-that subdirectory, and only the changes in that directory and all its
-subdirectories will be committed. This can be helpful for translators.
-If you are doing a French translation, you will be working in
-docs/manual-fr, and if you are always cd'ed into that directory when
-doing your commits, your commit will effect only that directory. As
-long as you are careful only to change files that you want changed,
-you have little to worry about.
-
-\section{Subversion Resources}
-\index{Subversion (svn) Resources}
-\addcontentsline{toc}{subsection}{Subversion Resources}
-
-\begin{verbatim}
-cvs2svn Statistics:
-------------------
-Total CVS Files: 3286
-Total CVS Revisions: 28924
-Total Unique Tags: 63
-Total Unique Branches: 11
-CVS Repos Size in KB: 232421
-Total SVN Commits: 4116
-First Revision Date: Tue Apr 23 12:42:57 2002
-Last Revision Date: Tue Feb 6 06:37:57 2007
-\end{verbatim}
-
-The new Subversion repository size on Robert's machine:
-
-\begin{verbatim}
-4.0K bacula-tst/dav
-12K bacula-tst/locks
-40K bacula-tst/hooks
-16K bacula-tst/conf
-190M bacula-tst/db/revs
-17M bacula-tst/db/revprops
-4.0K bacula-tst/db/transactions
-206M bacula-tst/db
-206M bacula-tst
-\end{verbatim}
-
-
-Main Subversion Web Page
-\elink{http://subversion.tigris.org}{http://subversion.tigris.org}
-
-Subversion Book
-\elink{http://svnbook.red-bean.com}{http://svnbook.red-bean.com}
-
-Subversion Clients
-\elink{http://subversion.tigris.org/project\_packages.html}{http://subversion.tigris.org/project\_packages.html}
-
- (For Windows users the TortoiseSVN package is awesome)
-
-GUI UNIX client link
-\elink{http://rapidsvn.tigris.org/}{http://rapidsvn.tigris.org/}
-
-A nice KDE GUI client:
-kdesvn
-
-
-
-\section{Developing Bacula}
-\index{Developing Bacula}
-\index{Bacula!Developing}
-\addcontentsline{toc}{subsubsection}{Developing Bacula}
-
-Typically the simplest way to develop Bacula is to open one xterm window
-pointing to the source directory you wish to update; a second xterm window at
-the top source directory level, and a third xterm window at the bacula
-directory \lt{}top\gt{}/src/bacula. After making source changes in one of the
-directories, in the top source directory xterm, build the source, and start
-the daemons by entering:
-
-make and
-
-./startit then in the enter:
-
-./console or
-
-./gnome-console to start the Console program. Enter any commands for testing.
-For example: run kernsverify full.
-
-Note, the instructions here to use {\bf ./startit} are different from using a
-production system where the administrator starts Bacula by entering {\bf
-./bacula start}. This difference allows a development version of {\bf Bacula}
-to be run on a computer at the same time that a production system is running.
-The {\bf ./startit} strip starts {\bf Bacula} using a different set of
-configuration files, and thus permits avoiding conflicts with any production
-system.
-
-To make additional source changes, exit from the Console program, and in the
-top source directory, stop the daemons by entering:
-
-./stopit then repeat the process.
-
-\subsection{Debugging}
-\index{Debugging}
-\addcontentsline{toc}{subsubsection}{Debugging}
-
-Probably the first thing to do is to turn on debug output.
-
-A good place to start is with a debug level of 20 as in {\bf ./startit -d20}.
-The startit command starts all the daemons with the same debug level.
-Alternatively, you can start the appropriate daemon with the debug level you
-want. If you really need more info, a debug level of 60 is not bad, and for
-just about everything a level of 200.
-
-\subsection{Using a Debugger}
-\index{Using a Debugger}
-\index{Debugger!Using a}
-\addcontentsline{toc}{subsubsection}{Using a Debugger}
-
-If you have a serious problem such as a segmentation fault, it can usually be
-found quickly using a good multiple thread debugger such as {\bf gdb}. For
-example, suppose you get a segmentation violation in {\bf bacula-dir}. You
-might use the following to find the problem:
-
-\lt{}start the Storage and File daemons\gt{}
-cd dird
-gdb ./bacula-dir
-run -f -s -c ./dird.conf
-\lt{}it dies with a segmentation fault\gt{}
-where
-The {\bf -f} option is specified on the {\bf run} command to inhibit {\bf
-dird} from going into the background. You may also want to add the {\bf -s}
-option to the run command to disable signals which can potentially interfere
-with the debugging.
-
-As an alternative to using the debugger, each {\bf Bacula} daemon has a built
-in back trace feature when a serious error is encountered. It calls the
-debugger on itself, produces a back trace, and emails the report to the
-developer. For more details on this, please see the chapter in the main Bacula
-manual entitled ``What To Do When Bacula Crashes (Kaboom)''.
-
-\subsection{Memory Leaks}
-\index{Leaks!Memory}
-\index{Memory Leaks}
-\addcontentsline{toc}{subsubsection}{Memory Leaks}
-
-Because Bacula runs routinely and unattended on client and server machines, it
-may run for a long time. As a consequence, from the very beginning, Bacula
-uses SmartAlloc to ensure that there are no memory leaks. To make detection of
-memory leaks effective, all Bacula code that dynamically allocates memory MUST
-have a way to release it. In general when the memory is no longer needed, it
-should be immediately released, but in some cases, the memory will be held
-during the entire time that Bacula is executing. In that case, there MUST be a
-routine that can be called at termination time that releases the memory. In
-this way, we will be able to detect memory leaks. Be sure to immediately
-correct any and all memory leaks that are printed at the termination of the
-daemons.
-
-\subsection{Special Files}
-\index{Files!Special}
-\index{Special Files}
-\addcontentsline{toc}{subsubsection}{Special Files}
-
-Kern uses files named 1, 2, ... 9 with any extension as scratch files. Thus
-any files with these names are subject to being rudely deleted at any time.
-
-\subsection{When Implementing Incomplete Code}
-\index{Code!When Implementing Incomplete}
-\index{When Implementing Incomplete Code}
-\addcontentsline{toc}{subsubsection}{When Implementing Incomplete Code}
-
-Please identify all incomplete code with a comment that contains
-
-\begin{verbatim}
-***FIXME***
-\end{verbatim}
-
-where there are three asterisks (*) before and after the word
-FIXME (in capitals) and no intervening spaces. This is important as it allows
-new programmers to easily recognize where things are partially implemented.
-
-\subsection{Bacula Source File Structure}
-\index{Structure!Bacula Source File}
-\index{Bacula Source File Structure}
-\addcontentsline{toc}{subsubsection}{Bacula Source File Structure}
-
-The distribution generally comes as a tar file of the form {\bf
-bacula.x.y.z.tar.gz} where x, y, and z are the version, release, and update
-numbers respectively.
-
-Once you detar this file, you will have a directory structure as follows:
-
-\footnotesize
-\begin{verbatim}
-|
-Tar file:
-|- depkgs
- |- mtx (autochanger control program + tape drive info)
- |- sqlite (SQLite database program)
-
-Tar file:
-|- depkgs-win32
- |- pthreads (Native win32 pthreads library -- dll)
- |- zlib (Native win32 zlib library)
- |- wx (wxWidgets source code)
-
-Project bacula:
-|- bacula (main source directory containing configuration
- | and installation files)
- |- autoconf (automatic configuration files, not normally used
- | by users)
- |- intl (programs used to translate)
- |- platforms (OS specific installation files)
- |- redhat (Red Hat installation)
- |- solaris (Sun installation)
- |- freebsd (FreeBSD installation)
- |- irix (Irix installation -- not tested)
- |- unknown (Default if system not identified)
- |- po (translations of source strings)
- |- src (source directory; contains global header files)
- |- cats (SQL catalog database interface directory)
- |- console (bacula user agent directory)
- |- dird (Director daemon)
- |- filed (Unix File daemon)
- |- win32 (Win32 files to make bacula-fd be a service)
- |- findlib (Unix file find library for File daemon)
- |- gnome-console (GNOME version of console program)
- |- lib (General Bacula library)
- |- stored (Storage daemon)
- |- tconsole (Tcl/tk console program -- not yet working)
- |- testprogs (test programs -- normally only in Kern's tree)
- |- tools (Various tool programs)
- |- win32 (Native Win32 File daemon)
- |- baculafd (Visual Studio project file)
- |- compat (compatibility interface library)
- |- filed (links to src/filed)
- |- findlib (links to src/findlib)
- |- lib (links to src/lib)
- |- console (beginning of native console program)
- |- wx-console (wxWidget console Win32 specific parts)
- |- wx-console (wxWidgets console main source program)
-
-Project regress:
-|- regress (Regression scripts)
- |- bin (temporary directory to hold Bacula installed binaries)
- |- build (temporary directory to hold Bacula source)
- |- scripts (scripts and .conf files)
- |- tests (test scripts)
- |- tmp (temporary directory for temp files)
- |- working (temporary working directory for Bacula daemons)
-
-Project docs:
-|- docs (documentation directory)
- |- developers (Developer's guide)
- |- home-page (Bacula's home page source)
- |- manual (html document directory)
- |- manual-fr (French translation)
- |- manual-de (German translation)
- |- techlogs (Technical development notes);
-
-Project rescue:
-|- rescue (Bacula rescue CDROM)
- |- linux (Linux rescue CDROM)
- |- cdrom (Linux rescue CDROM code)
- ...
- |- solaris (Solaris rescue -- incomplete)
- |- freebsd (FreeBSD rescue -- incomplete)
-
-Project gui:
-|- gui (Bacula GUI projects)
- |- bacula-web (Bacula web php management code)
- |- bimagemgr (Web application for burning CDROMs)
-
-
-\end{verbatim}
-\normalsize
-
-\subsection{Header Files}
-\index{Header Files}
-\index{Files!Header}
-\addcontentsline{toc}{subsubsection}{Header Files}
-
-Please carefully follow the scheme defined below as it permits in general only
-two header file includes per C file, and thus vastly simplifies programming.
-With a large complex project like Bacula, it isn't always easy to ensure that
-the right headers are invoked in the right order (there are a few kludges to
-make this happen -- i.e. in a few include files because of the chicken and egg
-problem, certain references to typedefs had to be replaced with {\bf void} ).
-
-Every file should include {\bf bacula.h}. It pulls in just about everything,
-with very few exceptions. If you have system dependent ifdefing, please do it
-in {\bf baconfig.h}. The version number and date are kept in {\bf version.h}.
-
-Each of the subdirectories (console, cats, dird, filed, findlib, lib, stored,
-...) contains a single directory dependent include file generally the name of
-the directory, which should be included just after the include of {\bf
-bacula.h}. This file (for example, for the dird directory, it is {\bf dird.h})
-contains either definitions of things generally needed in this directory, or
-it includes the appropriate header files. It always includes {\bf protos.h}.
-See below.
-
-Each subdirectory contains a header file named {\bf protos.h}, which contains
-the prototypes for subroutines exported by files in that directory. {\bf
-protos.h} is always included by the main directory dependent include file.
-
-\subsection{Programming Standards}
-\index{Standards!Programming}
-\index{Programming Standards}
-\addcontentsline{toc}{subsubsection}{Programming Standards}
-
-For the most part, all code should be written in C unless there is a burning
-reason to use C++, and then only the simplest C++ constructs will be used.
-Note, Bacula is slowly evolving to use more and more C++.
-
-Code should have some documentation -- not a lot, but enough so that I can
-understand it. Look at the current code, and you will see that I document more
-than most, but am definitely not a fanatic.
-
-I prefer simple linear code where possible. Gotos are strongly discouraged
-except for handling an error to either bail out or to retry some code, and
-such use of gotos can vastly simplify the program.
-
-Remember this is a C program that is migrating to a {\bf tiny} subset of C++,
-so be conservative in your use of C++ features.
-
-\subsection{Do Not Use}
-\index{Use!Do Not}
-\index{Do Not Use}
-\addcontentsline{toc}{subsubsection}{Do Not Use}
-
-\begin{itemize}
- \item STL -- it is totally incomprehensible.
-\end{itemize}
-
-\subsection{Avoid if Possible}
-\index{Possible!Avoid if}
-\index{Avoid if Possible}
-\addcontentsline{toc}{subsubsection}{Avoid if Possible}
-
-\begin{itemize}
-\item Using {\bf void *} because this generally means that one must
- using casting, and in C++ casting is rather ugly. It is OK to use
- void * to pass structure address where the structure is not known
- to the routines accepting the packet (typically callback routines).
- However, declaring "void *buf" is a bad idea. Please use the
- correct types whenever possible.
-
-\item Using undefined storage specifications such as (short, int, long,
- long long, size\_t ...). The problem with all these is that the number of bytes
- they allocate depends on the compiler and the system. Instead use
- Bacula's types (int8\_t, uint8\_t, int32\_t, uint32\_t, int64\_t, and
- uint64\_t). This guarantees that the variables are given exactly the
- size you want. Please try at all possible to avoid using size\_t ssize\_t
- and the such. They are very system dependent. However, some system
- routines may need them, so their use is often unavoidable.
-
-\item Returning a malloc'ed buffer from a subroutine -- someone will forget
- to release it.
-
-\item Heap allocation (malloc) unless needed -- it is expensive. Use
- POOL\_MEM instead.
-
-\item Templates -- they can create portability problems.
-
-\item Fancy or tricky C or C++ code, unless you give a good explanation of
- why you used it.
-
-\item Too much inheritance -- it can complicate the code, and make reading it
- difficult (unless you are in love with colons)
-
-\end{itemize}
-
-\subsection{Do Use Whenever Possible}
-\index{Possible!Do Use Whenever}
-\index{Do Use Whenever Possible}
-\addcontentsline{toc}{subsubsection}{Do Use Whenever Possible}
-
-\begin{itemize}
-\item Locking and unlocking within a single subroutine.
-
-\item A single point of exit from all subroutines. A goto is
- perfectly OK to use to get out early, but only to a label
- named bail\_out, and possibly an ok\_out. See current code
- examples.
-
-\item Malloc and free within a single subroutine.
-
-\item Comments and global explanations on what your code or algorithm does.
-
-\end{itemize}
-
-\subsection{Indenting Standards}
-\index{Standards!Indenting}
-\index{Indenting Standards}
-\addcontentsline{toc}{subsubsection}{Indenting Standards}
-
-I cannot stand code indented 8 columns at a time. This makes the code
-unreadable. Even 4 at a time uses a lot of space, so I have adopted indenting
-3 spaces at every level. Note, indention is the visual appearance of the
-source on the page, while tabbing is replacing a series of up to 8 spaces from
-a tab character.
-
-The closest set of parameters for the Linux {\bf indent} program that will
-produce reasonably indented code are:
-
-\footnotesize
-\begin{verbatim}
--nbad -bap -bbo -nbc -br -brs -c36 -cd36 -ncdb -ce -ci3 -cli0
--cp36 -d0 -di1 -ndj -nfc1 -nfca -hnl -i3 -ip0 -l85 -lp -npcs
--nprs -npsl -saf -sai -saw -nsob -nss -nbc -ncs -nbfda
-\end{verbatim}
-\normalsize
-
-You can put the above in your .indent.pro file, and then just invoke indent on
-your file. However, be warned. This does not produce perfect indenting, and it
-will mess up C++ class statements pretty badly.
-
-Braces are required in all if statements (missing in some very old code). To
-avoid generating too many lines, the first brace appears on the first line
-(e.g. of an if), and the closing brace is on a line by itself. E.g.
-
-\footnotesize
-\begin{verbatim}
- if (abc) {
- some_code;
- }
-\end{verbatim}
-\normalsize
-
-Just follow the convention in the code. Originally I indented case clauses
-under a switch(), but now I prefer non-indented cases.
-
-\footnotesize
-\begin{verbatim}
- switch (code) {
- case 'A':
- do something
- break;
- case 'B':
- again();
- break;
- default:
- break;
- }
-\end{verbatim}
-\normalsize
-
-Avoid using // style comments except for temporary code or turning off debug
-code. Standard C comments are preferred (this also keeps the code closer to
-C).
-
-Attempt to keep all lines less than 85 characters long so that the whole line
-of code is readable at one time. This is not a rigid requirement.
-
-Always put a brief description at the top of any new file created describing
-what it does and including your name and the date it was first written. Please
-don't forget any Copyrights and acknowledgments if it isn't 100\% your code.
-Also, include the Bacula copyright notice that is in {\bf src/c}.
-
-In general you should have two includes at the top of the an include for the
-particular directory the code is in, for includes are needed, but this should
-be rare.
-
-In general (except for self-contained packages), prototypes should all be put
-in {\bf protos.h} in each directory.
-
-Always put space around assignment and comparison operators.
-
-\footnotesize
-\begin{verbatim}
- a = 1;
- if (b >= 2) {
- cleanup();
- }
-\end{verbatim}
-\normalsize
-
-but your can compress things in a {\bf for} statement:
-
-\footnotesize
-\begin{verbatim}
- for (i=0; i < del.num_ids; i++) {
- ...
-\end{verbatim}
-\normalsize
-
-Don't overuse the inline if (?:). A full {\bf if} is preferred, except in a
-print statement, e.g.:
-
-\footnotesize
-\begin{verbatim}
- if (ua->verbose \&& del.num_del != 0) {
- bsendmsg(ua, _("Pruned %d %s on Volume %s from catalog.\n"), del.num_del,
- del.num_del == 1 ? "Job" : "Jobs", mr->VolumeName);
- }
-\end{verbatim}
-\normalsize
-
-Leave a certain amount of debug code (Dmsg) in code you submit, so that future
-problems can be identified. This is particularly true for complicated code
-likely to break. However, try to keep the debug code to a minimum to avoid
-bloating the program and above all to keep the code readable.
-
-Please keep the same style in all new code you develop. If you include code
-previously written, you have the option of leaving it with the old indenting
-or re-indenting it. If the old code is indented with 8 spaces, then please
-re-indent it to Bacula standards.
-
-If you are using {\bf vim}, simply set your tabstop to 8 and your shiftwidth
-to 3.
-
-\subsection{Tabbing}
-\index{Tabbing}
-\addcontentsline{toc}{subsubsection}{Tabbing}
-
-Tabbing (inserting the tab character in place of spaces) is as normal on all
-Unix systems -- a tab is converted space up to the next column multiple of 8.
-My editor converts strings of spaces to tabs automatically -- this results in
-significant compression of the files. Thus, you can remove tabs by replacing
-them with spaces if you wish. Please don't confuse tabbing (use of tab
-characters) with indenting (visual alignment of the code).
-
-\subsection{Don'ts}
-\index{Don'ts}
-\addcontentsline{toc}{subsubsection}{Don'ts}
-
-Please don't use:
-
-\footnotesize
-\begin{verbatim}
-strcpy()
-strcat()
-strncpy()
-strncat();
-sprintf()
-snprintf()
-\end{verbatim}
-\normalsize
-
-They are system dependent and un-safe. These should be replaced by the Bacula
-safe equivalents:
-
-\footnotesize
-\begin{verbatim}
-char *bstrncpy(char *dest, char *source, int dest_size);
-char *bstrncat(char *dest, char *source, int dest_size);
-int bsnprintf(char *buf, int32_t buf_len, const char *fmt, ...);
-int bvsnprintf(char *str, int32_t size, const char *format, va_list ap);
-\end{verbatim}
-\normalsize
-
-See src/lib/bsys.c for more details on these routines.
-
-Don't use the {\bf \%lld} or the {\bf \%q} printf format editing types to edit
-64 bit integers -- they are not portable. Instead, use {\bf \%s} with {\bf
-edit\_uint64()}. For example:
-
-\footnotesize
-\begin{verbatim}
- char buf[100];
- uint64_t num = something;
- char ed1[50];
- bsnprintf(buf, sizeof(buf), "Num=%s\n", edit_uint64(num, ed1));
-\end{verbatim}
-\normalsize
-
-The edit buffer {\bf ed1} must be at least 27 bytes long to avoid overflow.
-See src/lib/edit.c for more details. If you look at the code, don't start
-screaming that I use {\bf lld}. I actually use subtle trick taught to me by
-John Walker. The {\bf lld} that appears in the editing routine is actually
-{\bf \#define} to a what is needed on your OS (usually ``lld'' or ``q'') and
-is defined in autoconf/configure.in for each OS. C string concatenation causes
-the appropriate string to be concatenated to the ``\%''.
-
-Also please don't use the STL or Templates or any complicated C++ code.
-
-\subsection{Message Classes}
-\index{Classes!Message}
-\index{Message Classes}
-\addcontentsline{toc}{subsubsection}{Message Classes}
-
-Currently, there are five classes of messages: Debug, Error, Job, Memory,
-and Queued.
-
-\subsection{Debug Messages}
-\index{Messages!Debug}
-\index{Debug Messages}
-\addcontentsline{toc}{subsubsection}{Debug Messages}
-
-Debug messages are designed to be turned on at a specified debug level and are
-always sent to STDOUT. There are designed to only be used in the development
-debug process. They are coded as:
-
-DmsgN(level, message, arg1, ...) where the N is a number indicating how many
-arguments are to be substituted into the message (i.e. it is a count of the
-number arguments you have in your message -- generally the number of percent
-signs (\%)). {\bf level} is the debug level at which you wish the message to
-be printed. message is the debug message to be printed, and arg1, ... are the
-arguments to be substituted. Since not all compilers support \#defines with
-varargs, you must explicitly specify how many arguments you have.
-
-When the debug message is printed, it will automatically be prefixed by the
-name of the daemon which is running, the filename where the Dmsg is, and the
-line number within the file.
-
-Some actual examples are:
-
-Dmsg2(20, ``MD5len=\%d MD5=\%s\textbackslash{}n'', strlen(buf), buf);
-
-Dmsg1(9, ``Created client \%s record\textbackslash{}n'', client->hdr.name);
-
-\subsection{Error Messages}
-\index{Messages!Error}
-\index{Error Messages}
-\addcontentsline{toc}{subsubsection}{Error Messages}
-
-Error messages are messages that are related to the daemon as a whole rather
-than a particular job. For example, an out of memory condition my generate an
-error message. They should be very rarely needed. In general, you should be
-using Job and Job Queued messages (Jmsg and Qmsg). They are coded as:
-
-EmsgN(error-code, level, message, arg1, ...) As with debug messages, you must
-explicitly code the of arguments to be substituted in the message. error-code
-indicates the severity or class of error, and it may be one of the following:
-
-\addcontentsline{lot}{table}{Message Error Code Classes}
-\begin{longtable}{lp{3in}}
-{{\bf M\_ABORT} } & {Causes the daemon to immediately abort. This should be
-used only in extreme cases. It attempts to produce a traceback. } \\
-{{\bf M\_ERROR\_TERM} } & {Causes the daemon to immediately terminate. This
-should be used only in extreme cases. It does not produce a traceback. } \\
-{{\bf M\_FATAL} } & {Causes the daemon to terminate the current job, but the
-daemon keeps running } \\
-{{\bf M\_ERROR} } & {Reports the error. The daemon and the job continue
-running } \\
-{{\bf M\_WARNING} } & {Reports an warning message. The daemon and the job
-continue running } \\
-{{\bf M\_INFO} } & {Reports an informational message.}
-
-\end{longtable}
-
-There are other error message classes, but they are in a state of being
-redesigned or deprecated, so please do not use them. Some actual examples are:
-
-
-Emsg1(M\_ABORT, 0, ``Cannot create message thread: \%s\textbackslash{}n'',
-strerror(status));
-
-Emsg3(M\_WARNING, 0, ``Connect to File daemon \%s at \%s:\%d failed. Retrying
-...\textbackslash{}n'', client-\gt{}hdr.name, client-\gt{}address,
-client-\gt{}port);
-
-Emsg3(M\_FATAL, 0, ``bdird\lt{}filed: bad response from Filed to \%s command:
-\%d \%s\textbackslash{}n'', cmd, n, strerror(errno));
-
-\subsection{Job Messages}
-\index{Job Messages}
-\index{Messages!Job}
-\addcontentsline{toc}{subsubsection}{Job Messages}
-
-Job messages are messages that pertain to a particular job such as a file that
-could not be saved, or the number of files and bytes that were saved. They
-Are coded as:
-\begin{verbatim}
-Jmsg(jcr, M\_FATAL, 0, "Text of message");
-\end{verbatim}
-A Jmsg with M\_FATAL will fail the job. The Jmsg() takes varargs so can
-have any number of arguments for substituted in a printf like format.
-Output from the Jmsg() will go to the Job report.
-<br>
-If the Jmsg is followed with a number such as Jmsg1(...), the number
-indicates the number of arguments to be substituted (varargs is not
-standard for \#defines), and what is more important is that the file and
-line number will be prefixed to the message. This permits a sort of debug
-from user's output.
-
-\subsection{Queued Job Messages}
-\index{Queued Job Messages}
-\index{Messages!Job}
-\addcontentsline{toc}{subsubsection}{Queued Job Messages}
-Queued Job messages are similar to Jmsg()s except that the message is
-Queued rather than immediately dispatched. This is necessary within the
-network subroutines and in the message editing routines. This is to prevent
-recursive loops, and to ensure that messages can be delivered even in the
-event of a network error.
-
-
-\subsection{Memory Messages}
-\index{Messages!Memory}
-\index{Memory Messages}
-\addcontentsline{toc}{subsubsection}{Memory Messages}
-
-Memory messages are messages that are edited into a memory buffer. Generally
-they are used in low level routines such as the low level device file dev.c in
-the Storage daemon or in the low level Catalog routines. These routines do not
-generally have access to the Job Control Record and so they return error
-essages reformatted in a memory buffer. Mmsg() is the way to do this.
+++ /dev/null
-# This module does multiple indices, supporting the style of the LaTex 'index'
-# package.
-
-# Version Information:
-# 16-Feb-2005 -- Original Creation. Karl E. Cunningham
-# 14-Mar-2005 -- Clarified and Consolodated some of the code.
-# Changed to smoothly handle single and multiple indices.
-
-# Two LaTeX index formats are supported...
-# --- SINGLE INDEX ---
-# \usepackage{makeidx}
-# \makeindex
-# \index{entry1}
-# \index{entry2}
-# \index{entry3}
-# ...
-# \printindex
-#
-# --- MULTIPLE INDICES ---
-#
-# \usepackage{makeidx}
-# \usepackage{index}
-# \makeindex -- latex2html doesn't care but LaTeX does.
-# \newindex{ref1}{ext1}{ext2}{title1}
-# \newindex{ref2}{ext1}{ext2}{title2}
-# \newindex{ref3}{ext1}{ext2}{title3}
-# \index[ref1]{entry1}
-# \index[ref1]{entry2}
-# \index[ref3]{entry3}
-# \index[ref2]{entry4}
-# \index{entry5}
-# \index[ref3]{entry6}
-# ...
-# \printindex[ref1]
-# \printindex[ref2]
-# \printindex[ref3]
-# \printindex
-# ___________________
-#
-# For the multiple-index style, each index is identified by the ref argument to \newindex, \index,
-# and \printindex. A default index is allowed, which is indicated by omitting the optional
-# argument. The default index does not require a \newindex command. As \index commands
-# are encountered, their entries are stored according
-# to the ref argument. When the \printindex command is encountered, the stored index
-# entries for that argument are retrieved and printed. The title for each index is taken
-# from the last argument in the \newindex command.
-# While processing \index and \printindex commands, if no argument is given the index entries
-# are built into a default index. The title of the default index is simply "Index".
-# This makes the difference between single- and multiple-index processing trivial.
-#
-# Another method can be used by omitting the \printindex command and just using \include to
-# pull in index files created by the makeindex program. These files will start with
-# \begin{theindex}. This command is used to determine where to print the index. Using this
-# approach, the indices will be output in the same order as the newindex commands were
-# originally found (see below). Using a combination of \printindex and \include{indexfile} has not
-# been tested and may produce undesireable results.
-#
-# The index data are stored in a hash for later sorting and output. As \printindex
-# commands are handled, the order in which they were found in the tex filea is saved,
-# associated with the ref argument to \printindex.
-#
-# We use the original %index hash to store the index data into. We append a \002 followed by the
-# name of the index to isolate the entries in different indices from each other. This is necessary
-# so that different indices can have entries with the same name. For the default index, the \002 is
-# appended without the name.
-#
-# Since the index order in the output cannot be determined if the \include{indexfile}
-# command is used, the order will be assumed from the order in which the \newindex
-# commands were originally seen in the TeX files. This order is saved as well as the
-# order determined from any printindex{ref} commands. If \printindex commnads are used
-# to specify the index output, that order will be used. If the \include{idxfile} command
-# is used, the order of the original newindex commands will be used. In this case the
-# default index will be printed last since it doesn't have a corresponding \newindex
-# command and its order cannot be determined. Mixing \printindex and \include{idxfile}
-# commands in the same file is likely to produce less than satisfactory results.
-#
-#
-# The hash containing index data is named %indices. It contains the following data:
-#{
-# 'title' => {
-# $ref1 => $indextitle ,
-# $ref2 => $indextitle ,
-# ...
-# },
-# 'newcmdorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index.
-# 'printindorder' => [ ref1, ref2, ..., * ], # asterisk indicates the position of the default index.
-#}
-
-
-# Globals to handle multiple indices.
-my %indices;
-
-# This tells the system to use up to 7 words in index entries.
-$WORDS_IN_INDEX = 10;
-
-# KEC 2-18-05
-# Handles the \newindex command. This is called if the \newindex command is
-# encountered in the LaTex source. Gets the index ref and title from the arguments.
-# Saves the index ref and title.
-# Note that we are called once to handle multiple \newindex commands that are
-# newline-separated.
-sub do_cmd_newindex {
- my $data = shift;
- # The data is sent to us as fields delimited by their ID #'s. We extract the
- # fields.
- foreach my $line (split("\n",$data)) {
- my @fields = split (/(?:\<\#\d+?\#\>)+/,$line);
-
- # The index name and title are the second and fourth fields in the data.
- if ($line =~ /^</ or $line =~ /^\\newindex/) {
- my ($indexref,$indextitle) = ($fields[1],$fields[4]);
- $indices{'title'}{$indexref} = $indextitle;
- push (@{$indices{'newcmdorder'}},$indexref);
- }
- }
-}
-
-
-# KEC -- Copied from makeidx.perl and modified to do multiple indices.
-# Processes an \index entry from the LaTex file.
-# Gets the optional argument from the index command, which is the name of the index
-# into which to place the entry.
-# Drops the brackets from the index_name
-# Puts the index entry into the html stream
-# Creates the tokenized index entry (which also saves the index entry info
-sub do_real_index {
- local($_) = @_;
- local($pat,$idx_entry,$index_name);
- # catches opt-arg from \index commands for index.sty
- $index_name = &get_next_optional_argument;
- $index_name = "" unless defined $index_name;
- # Drop leading and trailing brackets from the index name.
- $index_name =~ s/^\[|\]$//g;
-
- $idx_entry = &missing_braces unless (
- (s/$next_pair_pr_rx/$pat=$1;$idx_entry=$2;''/e)
- ||(s/$next_pair_rx/$pat=$1;$idx_entry=$2;''/e));
-
- if ($index_name and defined $idx_entry and
- !defined $indices{'title'}{$index_name}) {
- print STDERR "\nInvalid Index Name: \\index \[$index_name\]\{$idx_entry\}\n";
- }
-
- $idx_entry = &named_index_entry($pat, $idx_entry,$index_name);
- $idx_entry.$_;
-}
-
-# Creates and saves an index entry in the index hashes.
-# Modified to do multiple indices.
-# Creates an index_key that allows index entries to have the same characteristics but be in
-# different indices. This index_key is the regular key with the index name appended.
-# Save the index order for the entry in the %index_order hash.
-sub named_index_entry {
- local($br_id, $str, $index_name) = @_;
- my ($index_key);
- # escape the quoting etc characters
- # ! -> \001
- # @ -> \002
- # | -> \003
- $* = 1; $str =~ s/\n\s*/ /g; $* = 0; # remove any newlines
- # protect \001 occurring with images
- $str =~ s/\001/\016/g; # 0x1 to 0xF
- $str =~ s/\\\\/\011/g; # Double backslash -> 0xB
- $str =~ s/\\;SPMquot;/\012/g; # \;SPMquot; -> 0xC
- $str =~ s/;SPMquot;!/\013/g; # ;SPMquot; -> 0xD
- $str =~ s/!/\001/g; # Exclamation point -> 0x1
- $str =~ s/\013/!/g; # 0xD -> Exclaimation point
- $str =~ s/;SPMquot;@/\015/g; # ;SPMquot;@ to 0xF
- $str =~ s/@/\002/g; # At sign -> 0x2
- $str =~ s/\015/@/g; # 0xF to At sign
- $str =~ s/;SPMquot;\|/\017/g; # ;SMPquot;| to 0x11
- $str =~ s/\|/\003/g; # Vertical line to 0x3
- $str =~ s/\017/|/g; # 0x11 to vertical line
- $str =~ s/;SPMquot;(.)/\1/g; # ;SPMquot; -> whatever the next character is
- $str =~ s/\012/;SPMquot;/g; # 0x12 to ;SPMquot;
- $str =~ s/\011/\\\\/g; # 0x11 to double backslash
- local($key_part, $pageref) = split("\003", $str, 2);
-
- # For any keys of the form: blablabla!blablabla, which want to be split at the
- # exclamation point, replace the ! with a comma and a space. We don't do it
- # that way for this index.
- $key_part =~ s/\001/, /g;
- local(@keys) = split("\001", $key_part);
- # If TITLE is not yet available use $before.
- $TITLE = $saved_title if (($saved_title)&&(!($TITLE)||($TITLE eq $default_title)));
- $TITLE = $before unless $TITLE;
- # Save the reference
- local($words) = '';
- if ($SHOW_SECTION_NUMBERS) { $words = &make_idxnum; }
- elsif ($SHORT_INDEX) { $words = &make_shortidxname; }
- else { $words = &make_idxname; }
- local($super_key) = '';
- local($sort_key, $printable_key, $cur_key);
- foreach $key (@keys) {
- $key =~ s/\016/\001/g; # revert protected \001s
- ($sort_key, $printable_key) = split("\002", $key);
- #
- # RRM: 16 May 1996
- # any \label in the printable-key will have already
- # created a label where the \index occurred.
- # This has to be removed, so that the desired label
- # will be found on the Index page instead.
- #
- if ($printable_key =~ /tex2html_anchor_mark/ ) {
- $printable_key =~ s/><tex2html_anchor_mark><\/A><A//g;
- local($tmpA,$tmpB) = split("NAME=\"", $printable_key);
- ($tmpA,$tmpB) = split("\"", $tmpB);
- $ref_files{$tmpA}='';
- $index_labels{$tmpA} = 1;
- }
- #
- # resolve and clean-up the hyperlink index-entries
- # so they can be saved in an index.pl file
- #
- if ($printable_key =~ /$cross_ref_mark/ ) {
- local($label,$id,$ref_label);
- # $printable_key =~ s/$cross_ref_mark#(\w+)#(\w+)>$cross_ref_mark/
- $printable_key =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/
- do { ($label,$id) = ($1,$2);
- $ref_label = $external_labels{$label} unless
- ($ref_label = $ref_files{$label});
- '"' . "$ref_label#$label" . '">' .
- &get_ref_mark($label,$id)}
- /geo;
- }
- $printable_key =~ s/<\#[^\#>]*\#>//go;
- #RRM
- # recognise \char combinations, for a \backslash
- #
- $printable_key =~ s/\&\#;\'134/\\/g; # restore \\s
- $printable_key =~ s/\&\#;\`<BR> /\\/g; # ditto
- $printable_key =~ s/\&\#;*SPMquot;92/\\/g; # ditto
- #
- # $sort_key .= "@$printable_key" if !($printable_key); # RRM
- $sort_key .= "@$printable_key" if !($sort_key); # RRM
- $sort_key =~ tr/A-Z/a-z/;
- if ($super_key) {
- $cur_key = $super_key . "\001" . $sort_key;
- $sub_index{$super_key} .= $cur_key . "\004";
- } else {
- $cur_key = $sort_key;
- }
-
- # Append the $index_name to the current key with a \002 delimiter. This will
- # allow the same index entry to appear in more than one index.
- $index_key = $cur_key . "\002$index_name";
-
- $index{$index_key} .= "";
-
- #
- # RRM, 15 June 1996
- # if there is no printable key, but one is known from
- # a previous index-entry, then use it.
- #
- if (!($printable_key) && ($printable_key{$index_key}))
- { $printable_key = $printable_key{$index_key}; }
-# if (!($printable_key) && ($printable_key{$cur_key}))
-# { $printable_key = $printable_key{$cur_key}; }
- #
- # do not overwrite the printable_key if it contains an anchor
- #
- if (!($printable_key{$index_key} =~ /tex2html_anchor_mark/ ))
- { $printable_key{$index_key} = $printable_key || $key; }
-# if (!($printable_key{$cur_key} =~ /tex2html_anchor_mark/ ))
-# { $printable_key{$cur_key} = $printable_key || $key; }
-
- $super_key = $cur_key;
- }
- #
- # RRM
- # page-ranges, from |( and |) and |see
- #
- if ($pageref) {
- if ($pageref eq "\(" ) {
- $pageref = '';
- $next .= " from ";
- } elsif ($pageref eq "\)" ) {
- $pageref = '';
- local($next) = $index{$index_key};
-# local($next) = $index{$cur_key};
- # $next =~ s/[\|] *$//;
- $next =~ s/(\n )?\| $//;
- $index{$index_key} = "$next to ";
-# $index{$cur_key} = "$next to ";
- }
- }
-
- if ($pageref) {
- $pageref =~ s/\s*$//g; # remove trailing spaces
- if (!$pageref) { $pageref = ' ' }
- $pageref =~ s/see/<i>see <\/i> /g;
- #
- # RRM: 27 Dec 1996
- # check if $pageref corresponds to a style command.
- # If so, apply it to the $words.
- #
- local($tmp) = "do_cmd_$pageref";
- if (defined &$tmp) {
- $words = &$tmp("<#0#>$words<#0#>");
- $words =~ s/<\#[^\#]*\#>//go;
- $pageref = '';
- }
- }
- #
- # RRM: 25 May 1996
- # any \label in the pageref section will have already
- # created a label where the \index occurred.
- # This has to be removed, so that the desired label
- # will be found on the Index page instead.
- #
- if ($pageref) {
- if ($pageref =~ /tex2html_anchor_mark/ ) {
- $pageref =~ s/><tex2html_anchor_mark><\/A><A//g;
- local($tmpA,$tmpB) = split("NAME=\"", $pageref);
- ($tmpA,$tmpB) = split("\"", $tmpB);
- $ref_files{$tmpA}='';
- $index_labels{$tmpA} = 1;
- }
- #
- # resolve and clean-up any hyperlinks in the page-ref,
- # so they can be saved in an index.pl file
- #
- if ($pageref =~ /$cross_ref_mark/ ) {
- local($label,$id,$ref_label);
- # $pageref =~ s/$cross_ref_mark#(\w+)#(\w+)>$cross_ref_mark/
- $pageref =~ s/$cross_ref_mark#([^#]+)#([^>]+)>$cross_ref_mark/
- do { ($label,$id) = ($1,$2);
- $ref_files{$label} = ''; # ???? RRM
- if ($index_labels{$label}) { $ref_label = ''; }
- else { $ref_label = $external_labels{$label}
- unless ($ref_label = $ref_files{$label});
- }
- '"' . "$ref_label#$label" . '">' . &get_ref_mark($label,$id)}/geo;
- }
- $pageref =~ s/<\#[^\#>]*\#>//go;
-
- if ($pageref eq ' ') { $index{$index_key}='@'; }
- else { $index{$index_key} .= $pageref . "\n | "; }
- } else {
- local($thisref) = &make_named_href('',"$CURRENT_FILE#$br_id",$words);
- $thisref =~ s/\n//g;
- $index{$index_key} .= $thisref."\n | ";
- }
- #print "\nREF: $sort_key : $index_key :$index{$index_key}";
-
- #join('',"<A NAME=$br_id>$anchor_invisible_mark<\/A>",$_);
-
- "<A NAME=\"$br_id\">$anchor_invisible_mark<\/A>";
-}
-
-
-# KEC. -- Copied from makeidx.perl, then modified to do multiple indices.
-# Feeds the index entries to the output. This is called for each index to be built.
-#
-# Generates a list of lookup keys for index entries, from both %printable_keys
-# and %index keys.
-# Sorts the keys according to index-sorting rules.
-# Removes keys with a 0x01 token. (duplicates?)
-# Builds a string to go to the index file.
-# Adds the index entries to the string if they belong in this index.
-# Keeps track of which index is being worked on, so only the proper entries
-# are included.
-# Places the index just built in to the output at the proper place.
-{ my $index_number = 0;
-sub add_real_idx {
- print "\nDoing the index ... Index Number $index_number\n";
- local($key, @keys, $next, $index, $old_key, $old_html);
- my ($idx_ref,$keyref);
- # RRM, 15.6.96: index constructed from %printable_key, not %index
- @keys = keys %printable_key;
-
- while (/$idx_mark/) {
- # Get the index reference from what follows the $idx_mark and
- # remove it from the string.
- s/$idxmark\002(.*?)\002/$idxmark/;
- $idx_ref = $1;
- $index = '';
- # include non- makeidx index-entries
- foreach $key (keys %index) {
- next if $printable_key{$key};
- $old_key = $key;
- if ($key =~ s/###(.*)$//) {
- next if $printable_key{$key};
- push (@keys, $key);
- $printable_key{$key} = $key;
- if ($index{$old_key} =~ /HREF="([^"]*)"/i) {
- $old_html = $1;
- $old_html =~ /$dd?([^#\Q$dd\E]*)#/;
- $old_html = $1;
- } else { $old_html = '' }
- $index{$key} = $index{$old_key} . $old_html."</A>\n | ";
- };
- }
- @keys = sort makeidx_keysort @keys;
- @keys = grep(!/\001/, @keys);
- my $cnt = 0;
- foreach $key (@keys) {
- my ($keyref) = $key =~ /.*\002(.*)/;
- next unless ($idx_ref eq $keyref); # KEC.
- $index .= &add_idx_key($key);
- $cnt++;
- }
- print "$cnt Index Entries Added\n";
- $index = '<DD>'.$index unless ($index =~ /^\s*<D(D|T)>/);
- $index_number++; # KEC.
- if ($SHORT_INDEX) {
- print "(compact version with Legend)";
- local($num) = ( $index =~ s/\<D/<D/g );
- if ($num > 50 ) {
- s/$idx_mark/$preindex<HR><DL>\n$index\n<\/DL>$preindex/o;
- } else {
- s/$idx_mark/$preindex<HR><DL>\n$index\n<\/DL>/o;
- }
- } else {
- s/$idx_mark/<DL COMPACT>\n$index\n<\/DL>/o; }
- }
-}
-}
-
-# KEC. Copied from latex2html.pl and modified to support multiple indices.
-# The bibliography and the index should be treated as separate sections
-# in their own HTML files. The \bibliography{} command acts as a sectioning command
-# that has the desired effect. But when the bibliography is constructed
-# manually using the thebibliography environment, or when using the
-# theindex environment it is not possible to use the normal sectioning
-# mechanism. This subroutine inserts a \bibliography{} or a dummy
-# \textohtmlindex command just before the appropriate environments
-# to force sectioning.
-sub add_bbl_and_idx_dummy_commands {
- local($id) = $global{'max_id'};
-
- s/([\\]begin\s*$O\d+$C\s*thebibliography)/$bbl_cnt++; $1/eg;
- ## if ($bbl_cnt == 1) {
- s/([\\]begin\s*$O\d+$C\s*thebibliography)/$id++; "\\bibliography$O$id$C$O$id$C $1"/geo;
- #}
- $global{'max_id'} = $id;
- # KEC. Modified to global substitution to place multiple index tokens.
- s/[\\]begin\s*($O\d+$C)\s*theindex/\\textohtmlindex$1/go;
- # KEC. Modified to pick up the optional argument to \printindex
- s/[\\]printindex\s*(\[.*?\])?/
- do { (defined $1) ? "\\textohtmlindex $1" : "\\textohtmlindex []"; } /ego;
- &lib_add_bbl_and_idx_dummy_commands() if defined(&lib_add_bbl_and_idx_dummy_commands);
-}
-
-# KEC. Copied from latex2html.pl and modified to support multiple indices.
-# For each textohtmlindex mark found, determine the index titles and headers.
-# We place the index ref in the header so the proper index can be generated later.
-# For the default index, the index ref is blank.
-#
-# One problem is that this routine is called twice.. Once for processing the
-# command as originally seen, and once for processing the command when
-# doing the name for the index file. We can detect that by looking at the
-# id numbers (or ref) surrounding the \theindex command, and not incrementing
-# index_number unless a new id (or ref) is seen. This has the side effect of
-# having to unconventionally start the index_number at -1. But it works.
-#
-# Gets the title from the list of indices.
-# If this is the first index, save the title in $first_idx_file. This is what's referenced
-# in the navigation buttons.
-# Increment the index_number for next time.
-# If the indexname command is defined or a newcommand defined for indexname, do it.
-# Save the index TITLE in the toc
-# Save the first_idx_file into the idxfile. This goes into the nav buttons.
-# Build index_labels if needed.
-# Create the index headings and put them in the output stream.
-
-{ my $index_number = 0; # Will be incremented before use.
- my $first_idx_file; # Static
- my $no_increment = 0;
-
-sub do_cmd_textohtmlindex {
- local($_) = @_;
- my ($idxref,$idxnum,$index_name);
-
- # We get called from make_name with the first argument = "\001noincrement". This is a sign
- # to not increment $index_number the next time we are called. We get called twice, once
- # my make_name and once by process_command. Unfortunately, make_name calls us just to set the name
- # but doesn't use the result so we get called a second time by process_command. This works fine
- # except for cases where there are multiple indices except if they aren't named, which is the case
- # when the index is inserted by an include command in latex. In these cases we are only able to use
- # the index number to decide which index to draw from, and we don't know how to increment that index
- # number if we get called a variable number of times for the same index, as is the case between
- # making html (one output file) and web (multiple output files) output formats.
- if (/\001noincrement/) {
- $no_increment = 1;
- return;
- }
-
- # Remove (but save) the index reference
- s/^\s*\[(.*?)\]/{$idxref = $1; "";}/e;
-
- # If we have an $idxref, the index name was specified. In this case, we have all the
- # information we need to carry on. Otherwise, we need to get the idxref
- # from the $index_number and set the name to "Index".
- if ($idxref) {
- $index_name = $indices{'title'}{$idxref};
- } else {
- if (defined ($idxref = $indices{'newcmdorder'}->[$index_number])) {
- $index_name = $indices{'title'}{$idxref};
- } else {
- $idxref = '';
- $index_name = "Index";
- }
- }
-
- $idx_title = "Index"; # The name displayed in the nav bar text.
-
- # Only set $idxfile if we are at the first index. This will point the
- # navigation panel to the first index file rather than the last.
- $first_idx_file = $CURRENT_FILE if ($index_number == 0);
- $idxfile = $first_idx_file; # Pointer for the Index button in the nav bar.
- $toc_sec_title = $index_name; # Index link text in the toc.
- $TITLE = $toc_sec_title; # Title for this index, from which its filename is built.
- if (%index_labels) { &make_index_labels(); }
- if (($SHORT_INDEX) && (%index_segment)) { &make_preindex(); }
- else { $preindex = ''; }
- local $idx_head = $section_headings{'textohtmlindex'};
- local($heading) = join(''
- , &make_section_heading($TITLE, $idx_head)
- , $idx_mark, "\002", $idxref, "\002" );
- local($pre,$post) = &minimize_open_tags($heading);
- $index_number++ unless ($no_increment);
- $no_increment = 0;
- join('',"<BR>\n" , $pre, $_);
-}
-}
-
-# Returns an index key, given the key passed as the first argument.
-# Not modified for multiple indices.
-sub add_idx_key {
- local($key) = @_;
- local($index, $next);
- if (($index{$key} eq '@' )&&(!($index_printed{$key}))) {
- if ($SHORT_INDEX) { $index .= "<DD><BR>\n<DT>".&print_key."\n<DD>"; }
- else { $index .= "<DT><DD><BR>\n<DT>".&print_key."\n<DD>"; }
- } elsif (($index{$key})&&(!($index_printed{$key}))) {
- if ($SHORT_INDEX) {
- $next = "<DD>".&print_key."\n : ". &print_idx_links;
- } else {
- $next = "<DT>".&print_key."\n<DD>". &print_idx_links;
- }
- $index .= $next."\n";
- $index_printed{$key} = 1;
- }
-
- if ($sub_index{$key}) {
- local($subkey, @subkeys, $subnext, $subindex);
- @subkeys = sort(split("\004", $sub_index{$key}));
- if ($SHORT_INDEX) {
- $index .= "<DD>".&print_key unless $index_printed{$key};
- $index .= "<DL>\n";
- } else {
- $index .= "<DT>".&print_key."\n<DD>" unless $index_printed{$key};
- $index .= "<DL COMPACT>\n";
- }
- foreach $subkey (@subkeys) {
- $index .= &add_sub_idx_key($subkey) unless ($index_printed{$subkey});
- }
- $index .= "</DL>\n";
- }
- return $index;
-}
-
-1; # Must be present as the last line.
+++ /dev/null
-# This file serves as a place to put initialization code and constants to
-# affect the behavior of latex2html for generating the bacula manuals.
-
-# $LINKPOINT specifies what filename to use to link to when creating
-# index.html. Not that this is a hard link.
-$LINKPOINT='"$OVERALL_TITLE"';
-
-
-# The following must be the last line of this file.
-1;
+++ /dev/null
-%%
-%%
-
-\chapter{Bacula MD5 Algorithm}
-\label{MD5Chapter}
-\addcontentsline{toc}{section}{}
-
-\section{Command Line Message Digest Utility }
-\index{Utility!Command Line Message Digest }
-\index{Command Line Message Digest Utility }
-\addcontentsline{toc}{subsection}{Command Line Message Digest Utility}
-
-
-This page describes {\bf md5}, a command line utility usable on either Unix or
-MS-DOS/Windows, which generates and verifies message digests (digital
-signatures) using the MD5 algorithm. This program can be useful when
-developing shell scripts or Perl programs for software installation, file
-comparison, and detection of file corruption and tampering.
-
-\subsection{Name}
-\index{Name}
-\addcontentsline{toc}{subsubsection}{Name}
-
-{\bf md5} - generate / check MD5 message digest
-
-\subsection{Synopsis}
-\index{Synopsis }
-\addcontentsline{toc}{subsubsection}{Synopsis}
-
-{\bf md5} [ {\bf -c}{\it signature} ] [ {\bf -u} ] [ {\bf -d}{\it input\_text}
-| {\it infile} ] [ {\it outfile} ]
-
-\subsection{Description}
-\index{Description }
-\addcontentsline{toc}{subsubsection}{Description}
-
-A {\it message digest} is a compact digital signature for an arbitrarily long
-stream of binary data. An ideal message digest algorithm would never generate
-the same signature for two different sets of input, but achieving such
-theoretical perfection would require a message digest as long as the input
-file. Practical message digest algorithms compromise in favour of a digital
-signature of modest size created with an algorithm designed to make
-preparation of input text with a given signature computationally infeasible.
-Message digest algorithms have much in common with techniques used in
-encryption, but to a different end; verification that data have not been
-altered since the signature was published.
-
-Many older programs requiring digital signatures employed 16 or 32 bit {\it
-cyclical redundancy codes} (CRC) originally developed to verify correct
-transmission in data communication protocols, but these short codes, while
-adequate to detect the kind of transmission errors for which they were
-intended, are insufficiently secure for applications such as electronic
-commerce and verification of security related software distributions.
-
-The most commonly used present-day message digest algorithm is the 128 bit MD5
-algorithm, developed by Ron Rivest of the
-\elink{MIT}{http://web.mit.edu/}
-\elink{Laboratory for Computer Science}{http://www.lcs.mit.edu/} and
-\elink{RSA Data Security, Inc.}{http://www.rsa.com/} The algorithm, with a
-reference implementation, was published as Internet
-\elink{RFC 1321}{http://www.fourmilab.ch/md5/rfc1321.html} in April 1992, and
-was placed into the public domain at that time. Message digest algorithms such
-as MD5 are not deemed ``encryption technology'' and are not subject to the
-export controls some governments impose on other data security products.
-(Obviously, the responsibility for obeying the laws in the jurisdiction in
-which you reside is entirely your own, but many common Web and Mail utilities
-use MD5, and I am unaware of any restrictions on their distribution and use.)
-
-The MD5 algorithm has been implemented in numerous computer languages
-including C,
-\elink{Perl}{http://www.perl.org/}, and
-\elink{Java}{http://www.javasoft.com/}; if you're writing a program in such a
-language, track down a suitable subroutine and incorporate it into your
-program. The program described on this page is a {\it command line}
-implementation of MD5, intended for use in shell scripts and Perl programs (it
-is much faster than computing an MD5 signature directly in Perl). This {\bf
-md5} program was originally developed as part of a suite of tools intended to
-monitor large collections of files (for example, the contents of a Web site)
-to detect corruption of files and inadvertent (or perhaps malicious) changes.
-That task is now best accomplished with more comprehensive packages such as
-\elink{Tripwire}{ftp://coast.cs.purdue.edu/pub/COAST/Tripwire/}, but the
-command line {\bf md5} component continues to prove useful for verifying
-correct delivery and installation of software packages, comparing the contents
-of two different systems, and checking for changes in specific files.
-
-\subsection{Options}
-\index{Options }
-\addcontentsline{toc}{subsubsection}{Options}
-
-\begin{description}
-
-\item [{\bf -c}{\it signature} ]
- \index{-csignature }
- Computes the signature of the specified {\it infile} or the string supplied
-by the {\bf -d} option and compares it against the specified {\it signature}.
-If the two signatures match, the exit status will be zero, otherwise the exit
-status will be 1. No signature is written to {\it outfile} or standard
-output; only the exit status is set. The signature to be checked must be
-specified as 32 hexadecimal digits.
-
-\item [{\bf -d}{\it input\_text} ]
- \index{-dinput\_text }
- A signature is computed for the given {\it input\_text} (which must be quoted
-if it contains white space characters) instead of input from {\it infile} or
-standard input. If input is specified with the {\bf -d} option, no {\it
-infile} should be specified.
-
-\item [{\bf -u} ]
- Print how-to-call information.
- \end{description}
-
-\subsection{Files}
-\index{Files }
-\addcontentsline{toc}{subsubsection}{Files}
-
-If no {\it infile} or {\bf -d} option is specified or {\it infile} is a single
-``-'', {\bf md5} reads from standard input; if no {\it outfile} is given, or
-{\it outfile} is a single ``-'', output is sent to standard output. Input and
-output are processed strictly serially; consequently {\bf md5} may be used in
-pipelines.
-
-\subsection{Bugs}
-\index{Bugs }
-\addcontentsline{toc}{subsubsection}{Bugs}
-
-The mechanism used to set standard input to binary mode may be specific to
-Microsoft C; if you rebuild the DOS/Windows version of the program from source
-using another compiler, be sure to verify binary files work properly when read
-via redirection or a pipe.
-
-This program has not been tested on a machine on which {\tt int} and/or {\tt
-long} are longer than 32 bits.
-
-\section{
-\elink{Download md5.zip}{http://www.fourmilab.ch/md5/md5.zip} (Zipped
-archive)}
-\index{Archive!Download md5.zip Zipped }
-\index{Download md5.zip (Zipped archive) }
-\addcontentsline{toc}{subsection}{Download md5.zip (Zipped archive)}
-
-The program is provided as
-\elink{md5.zip}{http://www.fourmilab.ch/md5/md5.zip}, a
-\elink{Zipped}{http://www.pkware.com/} archive containing an ready-to-run
-Win32 command-line executable program, {\tt md5.exe} (compiled using Microsoft
-Visual C++ 5.0), and in source code form along with a {\tt Makefile} to build
-the program under Unix.
-
-\subsection{See Also}
-\index{ALSO!SEE }
-\index{See Also }
-\addcontentsline{toc}{subsubsection}{SEE ALSO}
-
-{\bf sum}(1)
-
-\subsection{Exit Status}
-\index{Status!Exit }
-\index{Exit Status }
-\addcontentsline{toc}{subsubsection}{Exit Status}
-
-{\bf md5} returns status 0 if processing was completed without errors, 1 if
-the {\bf -c} option was specified and the given signature does not match that
-of the input, and 2 if processing could not be performed at all due, for
-example, to a nonexistent input file.
-
-\subsection{Copying}
-\index{Copying }
-\addcontentsline{toc}{subsubsection}{Copying}
-
-\begin{quote}
-This software is in the public domain. Permission to use, copy, modify, and
-distribute this software and its documentation for any purpose and without
-fee is hereby granted, without any conditions or restrictions. This software
-is provided ``as is'' without express or implied warranty.
-\end{quote}
-
-\subsection{Acknowledgements}
-\index{Acknowledgements }
-\addcontentsline{toc}{subsubsection}{Acknowledgements}
-
-The MD5 algorithm was developed by Ron Rivest. The public domain C language
-implementation used in this program was written by Colin Plumb in 1993.
-{\it
-\elink{by John Walker}{http://www.fourmilab.ch/}
-January 6th, MIM }
+++ /dev/null
-%%
-%%
-
-\chapter{Storage Media Output Format}
-\label{_ChapterStart9}
-\index{Format!Storage Media Output}
-\index{Storage Media Output Format}
-\addcontentsline{toc}{section}{Storage Media Output Format}
-
-\section{General}
-\index{General}
-\addcontentsline{toc}{subsection}{General}
-
-This document describes the media format written by the Storage daemon. The
-Storage daemon reads and writes in units of blocks. Blocks contain records.
-Each block has a block header followed by records, and each record has a
-record header followed by record data.
-
-This chapter is intended to be a technical discussion of the Media Format and
-as such is not targeted at end users but rather at developers and system
-administrators that want or need to know more of the working details of {\bf
-Bacula}.
-
-\section{Definitions}
-\index{Definitions}
-\addcontentsline{toc}{subsection}{Definitions}
-
-\begin{description}
-
-\item [Block]
- \index{Block}
- A block represents the primitive unit of information that the Storage daemon
-reads and writes to a physical device. Normally, for a tape device, it will
-be the same as a tape block. The Storage daemon always reads and writes
-blocks. A block consists of block header information followed by records.
-Clients of the Storage daemon (the File daemon) normally never see blocks.
-However, some of the Storage tools (bls, bscan, bextract, ...) may be use
-block header information. In older Bacula tape versions, a block could
-contain records (see record definition below) from multiple jobs. However,
-all blocks currently written by Bacula are block level BB02, and a given
-block contains records for only a single job. Different jobs simply have
-their own private blocks that are intermingled with the other blocks from
-other jobs on the Volume (previously the records were intermingled within
-the blocks). Having only records from a single job in any give block
-permitted moving the VolumeSessionId and VolumeSessionTime (see below) from
-each record heading to the Block header. This has two advantages: 1. a block
-can be quickly rejected based on the contents of the header without reading
-all the records. 2. because there is on the average more than one record per
-block, less data is written to the Volume for each job.
-
-\item [Record]
- \index{Record}
- A record consists of a Record Header, which is managed by the Storage daemon
-and Record Data, which is the data received from the Client. A record is the
-primitive unit of information sent to and from the Storage daemon by the
-Client (File daemon) programs. The details are described below.
-
-\item [JobId]
- \index{JobId}
- A number assigned by the Director daemon for a particular job. This number
-will be unique for that particular Director (Catalog). The daemons use this
-number to keep track of individual jobs. Within the Storage daemon, the JobId
-may not be unique if several Directors are accessing the Storage daemon
-simultaneously.
-
-\item [Session]
- \index{Session}
- A Session is a concept used in the Storage daemon corresponds one to one to a
-Job with the exception that each session is uniquely identified within the
-Storage daemon by a unique SessionId/SessionTime pair (see below).
-
-\item [VolSessionId]
- \index{VolSessionId}
- A unique number assigned by the Storage daemon to a particular session (Job)
-it is having with a File daemon. This number by itself is not unique to the
-given Volume, but with the VolSessionTime, it is unique.
-
-\item [VolSessionTime]
- \index{VolSessionTime}
- A unique number assigned by the Storage daemon to a particular Storage daemon
-execution. It is actually the Unix time\_t value of when the Storage daemon
-began execution cast to a 32 bit unsigned integer. The combination of the
-{\bf VolSessionId} and the {\bf VolSessionTime} for a given Storage daemon is
-guaranteed to be unique for each Job (or session).
-
-\item [FileIndex]
- \index{FileIndex}
- A sequential number beginning at one assigned by the File daemon to the files
-within a job that are sent to the Storage daemon for backup. The Storage
-daemon ensures that this number is greater than zero and sequential. Note,
-the Storage daemon uses negative FileIndexes to flag Session Start and End
-Labels as well as End of Volume Labels. Thus, the combination of
-VolSessionId, VolSessionTime, and FileIndex uniquely identifies the records
-for a single file written to a Volume.
-
-\item [Stream]
- \index{Stream}
- While writing the information for any particular file to the Volume, there
-can be any number of distinct pieces of information about that file, e.g. the
-attributes, the file data, ... The Stream indicates what piece of data it
-is, and it is an arbitrary number assigned by the File daemon to the parts
-(Unix attributes, Win32 attributes, data, compressed data,\ ...) of a file
-that are sent to the Storage daemon. The Storage daemon has no knowledge of
-the details of a Stream; it simply represents a numbered stream of bytes. The
-data for a given stream may be passed to the Storage daemon in single record,
-or in multiple records.
-
-\item [Block Header]
- \index{Block Header}
- A block header consists of a block identification (``BB02''), a block length
-in bytes (typically 64,512) a checksum, and sequential block number. Each
-block starts with a Block Header and is followed by Records. Current block
-headers also contain the VolSessionId and VolSessionTime for the records
-written to that block.
-
-\item [Record Header]
- \index{Record Header}
- A record header contains the Volume Session Id, the Volume Session Time, the
-FileIndex, the Stream, and the size of the data record which follows. The
-Record Header is always immediately followed by a Data Record if the size
-given in the Header is greater than zero. Note, for Block headers of level
-BB02 (version 1.27 and later), the Record header as written to tape does not
-contain the Volume Session Id and the Volume Session Time as these two
-fields are stored in the BB02 Block header. The in-memory record header does
-have those fields for convenience.
-
-\item [Data Record]
- \index{Data Record}
- A data record consists of a binary stream of bytes and is always preceded by
-a Record Header. The details of the meaning of the binary stream of bytes are
-unknown to the Storage daemon, but the Client programs (File daemon) defines
-and thus knows the details of each record type.
-
-\item [Volume Label]
- \index{Volume Label}
- A label placed by the Storage daemon at the beginning of each storage volume.
-It contains general information about the volume. It is written in Record
-format. The Storage daemon manages Volume Labels, and if the client wants, he
-may also read them.
-
-\item [Begin Session Label]
- \index{Begin Session Label}
- The Begin Session Label is a special record placed by the Storage daemon on
-the storage medium as the first record of an append session job with a File
-daemon. This record is useful for finding the beginning of a particular
-session (Job), since no records with the same VolSessionId and VolSessionTime
-will precede this record. This record is not normally visible outside of the
-Storage daemon. The Begin Session Label is similar to the Volume Label except
-that it contains additional information pertaining to the Session.
-
-\item [End Session Label]
- \index{End Session Label}
- The End Session Label is a special record placed by the Storage daemon on the
-storage medium as the last record of an append session job with a File
-daemon. The End Session Record is distinguished by a FileIndex with a value
-of minus two (-2). This record is useful for detecting the end of a
-particular session since no records with the same VolSessionId and
-VolSessionTime will follow this record. This record is not normally visible
-outside of the Storage daemon. The End Session Label is similar to the Volume
-Label except that it contains additional information pertaining to the
-Session.
-\end{description}
-
-\section{Storage Daemon File Output Format}
-\index{Format!Storage Daemon File Output}
-\index{Storage Daemon File Output Format}
-\addcontentsline{toc}{subsection}{Storage Daemon File Output Format}
-
-The file storage and tape storage formats are identical except that tape
-records are by default blocked into blocks of 64,512 bytes, except for the
-last block, which is the actual number of bytes written rounded up to a
-multiple of 1024 whereas the last record of file storage is not rounded up.
-The default block size of 64,512 bytes may be overridden by the user (some
-older tape drives only support block sizes of 32K). Each Session written to
-tape is terminated with an End of File mark (this will be removed later).
-Sessions written to file are simply appended to the end of the file.
-
-\section{Overall Format}
-\index{Format!Overall}
-\index{Overall Format}
-\addcontentsline{toc}{subsection}{Overall Format}
-
-A Bacula output file consists of Blocks of data. Each block contains a block
-header followed by records. Each record consists of a record header followed
-by the record data. The first record on a tape will always be the Volume Label
-Record.
-
-No Record Header will be split across Bacula blocks. However, Record Data may
-be split across any number of Bacula blocks. Obviously this will not be the
-case for the Volume Label which will always be smaller than the Bacula Block
-size.
-
-To simplify reading tapes, the Start of Session (SOS) and End of Session (EOS)
-records are never split across blocks. If this is about to happen, Bacula will
-write a short block before writing the session record (actually, the SOS
-record should always be the first record in a block, excepting perhaps the
-Volume label).
-
-Due to hardware limitations, the last block written to the tape may not be
-fully written. If your drive permits backspace record, Bacula will backup over
-the last record written on the tape, re-read it and verify that it was
-correctly written.
-
-When a new tape is mounted Bacula will write the full contents of the
-partially written block to the new tape ensuring that there is no loss of
-data. When reading a tape, Bacula will discard any block that is not totally
-written, thus ensuring that there is no duplication of data. In addition,
-since Bacula blocks are sequentially numbered within a Job, it is easy to
-ensure that no block is missing or duplicated.
-
-\section{Serialization}
-\index{Serialization}
-\addcontentsline{toc}{subsection}{Serialization}
-
-All Block Headers, Record Headers, and Label Records are written using
-Bacula's serialization routines. These routines guarantee that the data is
-written to the output volume in a machine independent format.
-
-\section{Block Header}
-\index{Header!Block}
-\index{Block Header}
-\addcontentsline{toc}{subsection}{Block Header}
-
-The format of the Block Header (version 1.27 and later) is:
-
-\footnotesize
-\begin{verbatim}
- uint32_t CheckSum; /* Block check sum */
- uint32_t BlockSize; /* Block byte size including the header */
- uint32_t BlockNumber; /* Block number */
- char ID[4] = "BB02"; /* Identification and block level */
- uint32_t VolSessionId; /* Session Id for Job */
- uint32_t VolSessionTime; /* Session Time for Job */
-\end{verbatim}
-\normalsize
-
-The Block header is a fixed length and fixed format and is followed by Record
-Headers and Record Data. The CheckSum field is a 32 bit checksum of the block
-data and the block header but not including the CheckSum field. The Block
-Header is always immediately followed by a Record Header. If the tape is
-damaged, a Bacula utility will be able to recover as much information as
-possible from the tape by recovering blocks which are valid. The Block header
-is written using the Bacula serialization routines and thus is guaranteed to
-be in machine independent format. See below for version 2 of the block header.
-
-
-\section{Record Header}
-\index{Header!Record}
-\index{Record Header}
-\addcontentsline{toc}{subsection}{Record Header}
-
-Each binary data record is preceded by a Record Header. The Record Header is
-fixed length and fixed format, whereas the binary data record is of variable
-length. The Record Header is written using the Bacula serialization routines
-and thus is guaranteed to be in machine independent format.
-
-The format of the Record Header (version 1.27 or later) is:
-
-\footnotesize
-\begin{verbatim}
- int32_t FileIndex; /* File index supplied by File daemon */
- int32_t Stream; /* Stream number supplied by File daemon */
- uint32_t DataSize; /* size of following data record in bytes */
-\end{verbatim}
-\normalsize
-
-This record is followed by the binary Stream data of DataSize bytes, followed
-by another Record Header record and the binary stream data. For the definitive
-definition of this record, see record.h in the src/stored directory.
-
-Additional notes on the above:
-
-\begin{description}
-
-\item [The {\bf VolSessionId} ]
- \index{VolSessionId}
- is a unique sequential number that is assigned by the Storage Daemon to a
-particular Job. This number is sequential since the start of execution of the
-daemon.
-
-\item [The {\bf VolSessionTime} ]
- \index{VolSessionTime}
- is the time/date that the current execution of the Storage Daemon started. It
-assures that the combination of VolSessionId and VolSessionTime is unique for
-every jobs written to the tape, even if there was a machine crash between two
-writes.
-
-\item [The {\bf FileIndex} ]
- \index{FileIndex}
- is a sequential file number within a job. The Storage daemon requires this
-index to be greater than zero and sequential. Note, however, that the File
-daemon may send multiple Streams for the same FileIndex. In addition, the
-Storage daemon uses negative FileIndices to hold the Begin Session Label, the
-End Session Label, and the End of Volume Label.
-
-\item [The {\bf Stream} ]
- \index{Stream}
- is defined by the File daemon and is used to identify separate parts of the
-data saved for each file (Unix attributes, Win32 attributes, file data,
-compressed file data, sparse file data, ...). The Storage Daemon has no idea
-of what a Stream is or what it contains except that the Stream is required to
-be a positive integer. Negative Stream numbers are used internally by the
-Storage daemon to indicate that the record is a continuation of the previous
-record (the previous record would not entirely fit in the block).
-
-For Start Session and End Session Labels (where the FileIndex is negative),
-the Storage daemon uses the Stream field to contain the JobId. The current
-stream definitions are:
-
-\footnotesize
-\begin{verbatim}
-#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */
-#define STREAM_FILE_DATA 2 /* Standard uncompressed data */
-#define STREAM_MD5_SIGNATURE 3 /* MD5 signature for the file */
-#define STREAM_GZIP_DATA 4 /* GZip compressed file data */
-/* Extended Unix attributes with Win32 Extended data. Deprecated. */
-#define STREAM_UNIX_ATTRIBUTES_EX 5 /* Extended Unix attr for Win32 EX */
-#define STREAM_SPARSE_DATA 6 /* Sparse data stream */
-#define STREAM_SPARSE_GZIP_DATA 7
-#define STREAM_PROGRAM_NAMES 8 /* program names for program data */
-#define STREAM_PROGRAM_DATA 9 /* Data needing program */
-#define STREAM_SHA1_SIGNATURE 10 /* SHA1 signature for the file */
-#define STREAM_WIN32_DATA 11 /* Win32 BackupRead data */
-#define STREAM_WIN32_GZIP_DATA 12 /* Gzipped Win32 BackupRead data */
-#define STREAM_MACOS_FORK_DATA 13 /* Mac resource fork */
-#define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */
-#define STREAM_UNIX_ATTRIBUTES_ACCESS_ACL 15 /* Standard ACL attributes on UNIX */
-#define STREAM_UNIX_ATTRIBUTES_DEFAULT_ACL 16 /* Default ACL attributes on UNIX */
-\end{verbatim}
-\normalsize
-
-\item [The {\bf DataSize} ]
- \index{DataSize}
- is the size in bytes of the binary data record that follows the Session
-Record header. The Storage Daemon has no idea of the actual contents of the
-binary data record. For standard Unix files, the data record typically
-contains the file attributes or the file data. For a sparse file the first
-64 bits of the file data contains the storage address for the data block.
-\end{description}
-
-The Record Header is never split across two blocks. If there is not enough
-room in a block for the full Record Header, the block is padded to the end
-with zeros and the Record Header begins in the next block. The data record, on
-the other hand, may be split across multiple blocks and even multiple physical
-volumes. When a data record is split, the second (and possibly subsequent)
-piece of the data is preceded by a new Record Header. Thus each piece of data
-is always immediately preceded by a Record Header. When reading a record, if
-Bacula finds only part of the data in the first record, it will automatically
-read the next record and concatenate the data record to form a full data
-record.
-
-\section{Version BB02 Block Header}
-\index{Version BB02 Block Header}
-\index{Header!Version BB02 Block}
-\addcontentsline{toc}{subsection}{Version BB02 Block Header}
-
-Each session or Job has its own private block. As a consequence, the SessionId
-and SessionTime are written once in each Block Header and not in the Record
-Header. So, the second and current version of the Block Header BB02 is:
-
-\footnotesize
-\begin{verbatim}
- uint32_t CheckSum; /* Block check sum */
- uint32_t BlockSize; /* Block byte size including the header */
- uint32_t BlockNumber; /* Block number */
- char ID[4] = "BB02"; /* Identification and block level */
- uint32_t VolSessionId; /* Applies to all records */
- uint32_t VolSessionTime; /* contained in this block */
-\end{verbatim}
-\normalsize
-
-As with the previous version, the BB02 Block header is a fixed length and
-fixed format and is followed by Record Headers and Record Data. The CheckSum
-field is a 32 bit CRC checksum of the block data and the block header but not
-including the CheckSum field. The Block Header is always immediately followed
-by a Record Header. If the tape is damaged, a Bacula utility will be able to
-recover as much information as possible from the tape by recovering blocks
-which are valid. The Block header is written using the Bacula serialization
-routines and thus is guaranteed to be in machine independent format.
-
-\section{Version 2 Record Header}
-\index{Version 2 Record Header}
-\index{Header!Version 2 Record}
-\addcontentsline{toc}{subsection}{Version 2 Record Header}
-
-Version 2 Record Header is written to the medium when using Version BB02 Block
-Headers. The memory representation of the record is identical to the old BB01
-Record Header, but on the storage medium, the first two fields, namely
-VolSessionId and VolSessionTime are not written. The Block Header is filled
-with these values when the First user record is written (i.e. non label
-record) so that when the block is written, it will have the current and unique
-VolSessionId and VolSessionTime. On reading each record from the Block, the
-VolSessionId and VolSessionTime is filled in the Record Header from the Block
-Header.
-
-\section{Volume Label Format}
-\index{Volume Label Format}
-\index{Format!Volume Label}
-\addcontentsline{toc}{subsection}{Volume Label Format}
-
-Tape volume labels are created by the Storage daemon in response to a {\bf
-label} command given to the Console program, or alternatively by the {\bf
-btape} program. created. Each volume is labeled with the following information
-using the Bacula serialization routines, which guarantee machine byte order
-independence.
-
-For Bacula versions 1.27 and later, the Volume Label Format is:
-
-\footnotesize
-\begin{verbatim}
- char Id[32]; /* Bacula 1.0 Immortal\n */
- uint32_t VerNum; /* Label version number */
- /* VerNum 11 and greater Bacula 1.27 and later */
- btime_t label_btime; /* Time/date tape labeled */
- btime_t write_btime; /* Time/date tape first written */
- /* The following are 0 in VerNum 11 and greater */
- float64_t write_date; /* Date this label written */
- float64_t write_time; /* Time this label written */
- char VolName[128]; /* Volume name */
- char PrevVolName[128]; /* Previous Volume Name */
- char PoolName[128]; /* Pool name */
- char PoolType[128]; /* Pool type */
- char MediaType[128]; /* Type of this media */
- char HostName[128]; /* Host name of writing computer */
- char LabelProg[32]; /* Label program name */
- char ProgVersion[32]; /* Program version */
- char ProgDate[32]; /* Program build date/time */
-\end{verbatim}
-\normalsize
-
-Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, ...)
-is stored in the record FileIndex field of the Record Header and does not
-appear in the data part of the record.
-
-\section{Session Label}
-\index{Label!Session}
-\index{Session Label}
-\addcontentsline{toc}{subsection}{Session Label}
-
-The Session Label is written at the beginning and end of each session as well
-as the last record on the physical medium. It has the following binary format:
-
-
-\footnotesize
-\begin{verbatim}
- char Id[32]; /* Bacula Immortal ... */
- uint32_t VerNum; /* Label version number */
- uint32_t JobId; /* Job id */
- uint32_t VolumeIndex; /* sequence no of vol */
- /* Prior to VerNum 11 */
- float64_t write_date; /* Date this label written */
- /* VerNum 11 and greater */
- btime_t write_btime; /* time/date record written */
- /* The following is zero VerNum 11 and greater */
- float64_t write_time; /* Time this label written */
- char PoolName[128]; /* Pool name */
- char PoolType[128]; /* Pool type */
- char JobName[128]; /* base Job name */
- char ClientName[128];
- /* Added in VerNum 10 */
- char Job[128]; /* Unique Job name */
- char FileSetName[128]; /* FileSet name */
- uint32_t JobType;
- uint32_t JobLevel;
-\end{verbatim}
-\normalsize
-
-In addition, the EOS label contains:
-
-\footnotesize
-\begin{verbatim}
- /* The remainder are part of EOS label only */
- uint32_t JobFiles;
- uint64_t JobBytes;
- uint32_t start_block;
- uint32_t end_block;
- uint32_t start_file;
- uint32_t end_file;
- uint32_t JobErrors;
-\end{verbatim}
-\normalsize
-
-In addition, for VerNum greater than 10, the EOS label contains (in addition
-to the above):
-
-\footnotesize
-\begin{verbatim}
- uint32_t JobStatus /* Job termination code */
-\end{verbatim}
-\normalsize
-
-: Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label,
-...) is stored in the record FileIndex field and does not appear in the data
-part of the record. Also, the Stream field of the Record Header contains the
-JobId. This permits quick filtering without actually reading all the session
-data in many cases.
-
-\section{Overall Storage Format}
-\index{Format!Overall Storage}
-\index{Overall Storage Format}
-\addcontentsline{toc}{subsection}{Overall Storage Format}
-
-\footnotesize
-\begin{verbatim}
- Current Bacula Tape Format
- 6 June 2001
- Version BB02 added 28 September 2002
- Version BB01 is the old deprecated format.
- A Bacula tape is composed of tape Blocks. Each block
- has a Block header followed by the block data. Block
- Data consists of Records. Records consist of Record
- Headers followed by Record Data.
- :=======================================================:
- | |
- | Block Header (24 bytes) |
- | |
- |-------------------------------------------------------|
- | |
- | Record Header (12 bytes) |
- | |
- |-------------------------------------------------------|
- | |
- | Record Data |
- | |
- |-------------------------------------------------------|
- | |
- | Record Header (12 bytes) |
- | |
- |-------------------------------------------------------|
- | |
- | ... |
- Block Header: the first item in each block. The format is
- shown below.
- Partial Data block: occurs if the data from a previous
- block spills over to this block (the normal case except
- for the first block on a tape). However, this partial
- data block is always preceded by a record header.
- Record Header: identifies the Volume Session, the Stream
- and the following Record Data size. See below for format.
- Record data: arbitrary binary data.
- Block Header Format BB02
- :=======================================================:
- | CheckSum (uint32_t) |
- |-------------------------------------------------------|
- | BlockSize (uint32_t) |
- |-------------------------------------------------------|
- | BlockNumber (uint32_t) |
- |-------------------------------------------------------|
- | "BB02" (char [4]) |
- |-------------------------------------------------------|
- | VolSessionId (uint32_t) |
- |-------------------------------------------------------|
- | VolSessionTime (uint32_t) |
- :=======================================================:
- BBO2: Serves to identify the block as a
- Bacula block and also servers as a block format identifier
- should we ever need to change the format.
- BlockSize: is the size in bytes of the block. When reading
- back a block, if the BlockSize does not agree with the
- actual size read, Bacula discards the block.
- CheckSum: a checksum for the Block.
- BlockNumber: is the sequential block number on the tape.
- VolSessionId: a unique sequential number that is assigned
- by the Storage Daemon to a particular Job.
- This number is sequential since the start
- of execution of the daemon.
- VolSessionTime: the time/date that the current execution
- of the Storage Daemon started. It assures
- that the combination of VolSessionId and
- VolSessionTime is unique for all jobs
- written to the tape, even if there was a
- machine crash between two writes.
- Record Header Format BB02
- :=======================================================:
- | FileIndex (int32_t) |
- |-------------------------------------------------------|
- | Stream (int32_t) |
- |-------------------------------------------------------|
- | DataSize (uint32_t) |
- :=======================================================:
- FileIndex: a sequential file number within a job. The
- Storage daemon enforces this index to be
- greater than zero and sequential. Note,
- however, that the File daemon may send
- multiple Streams for the same FileIndex.
- The Storage Daemon uses negative FileIndices
- to identify Session Start and End labels
- as well as the End of Volume labels.
- Stream: defined by the File daemon and is intended to be
- used to identify separate parts of the data
- saved for each file (attributes, file data,
- ...). The Storage Daemon has no idea of
- what a Stream is or what it contains.
- DataSize: the size in bytes of the binary data record
- that follows the Session Record header.
- The Storage Daemon has no idea of the
- actual contents of the binary data record.
- For standard Unix files, the data record
- typically contains the file attributes or
- the file data. For a sparse file
- the first 64 bits of the data contains
- the storage address for the data block.
- Volume Label
- :=======================================================:
- | Id (32 bytes) |
- |-------------------------------------------------------|
- | VerNum (uint32_t) |
- |-------------------------------------------------------|
- | label_date (float64_t) |
- | label_btime (btime_t VerNum 11 |
- |-------------------------------------------------------|
- | label_time (float64_t) |
- | write_btime (btime_t VerNum 11 |
- |-------------------------------------------------------|
- | write_date (float64_t) |
- | 0 (float64_t) VerNum 11 |
- |-------------------------------------------------------|
- | write_time (float64_t) |
- | 0 (float64_t) VerNum 11 |
- |-------------------------------------------------------|
- | VolName (128 bytes) |
- |-------------------------------------------------------|
- | PrevVolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolType (128 bytes) |
- |-------------------------------------------------------|
- | MediaType (128 bytes) |
- |-------------------------------------------------------|
- | HostName (128 bytes) |
- |-------------------------------------------------------|
- | LabelProg (32 bytes) |
- |-------------------------------------------------------|
- | ProgVersion (32 bytes) |
- |-------------------------------------------------------|
- | ProgDate (32 bytes) |
- |-------------------------------------------------------|
- :=======================================================:
-
- Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n"
- (old version also recognized:)
- Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n"
- LabelType (Saved in the FileIndex of the Header record).
- PRE_LABEL -1 Volume label on unwritten tape
- VOL_LABEL -2 Volume label after tape written
- EOM_LABEL -3 Label at EOM (not currently implemented)
- SOS_LABEL -4 Start of Session label (format given below)
- EOS_LABEL -5 End of Session label (format given below)
- VerNum: 11
- label_date: Julian day tape labeled
- label_time: Julian time tape labeled
- write_date: Julian date tape first used (data written)
- write_time: Julian time tape first used (data written)
- VolName: "Physical" Volume name
- PrevVolName: The VolName of the previous tape (if this tape is
- a continuation of the previous one).
- PoolName: Pool Name
- PoolType: Pool Type
- MediaType: Media Type
- HostName: Name of host that is first writing the tape
- LabelProg: Name of the program that labeled the tape
- ProgVersion: Version of the label program
- ProgDate: Date Label program built
- Session Label
- :=======================================================:
- | Id (32 bytes) |
- |-------------------------------------------------------|
- | VerNum (uint32_t) |
- |-------------------------------------------------------|
- | JobId (uint32_t) |
- |-------------------------------------------------------|
- | write_btime (btime_t) VerNum 11 |
- |-------------------------------------------------------|
- | 0 (float64_t) VerNum 11 |
- |-------------------------------------------------------|
- | PoolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolType (128 bytes) |
- |-------------------------------------------------------|
- | JobName (128 bytes) |
- |-------------------------------------------------------|
- | ClientName (128 bytes) |
- |-------------------------------------------------------|
- | Job (128 bytes) |
- |-------------------------------------------------------|
- | FileSetName (128 bytes) |
- |-------------------------------------------------------|
- | JobType (uint32_t) |
- |-------------------------------------------------------|
- | JobLevel (uint32_t) |
- |-------------------------------------------------------|
- | FileSetMD5 (50 bytes) VerNum 11 |
- |-------------------------------------------------------|
- Additional fields in End Of Session Label
- |-------------------------------------------------------|
- | JobFiles (uint32_t) |
- |-------------------------------------------------------|
- | JobBytes (uint32_t) |
- |-------------------------------------------------------|
- | start_block (uint32_t) |
- |-------------------------------------------------------|
- | end_block (uint32_t) |
- |-------------------------------------------------------|
- | start_file (uint32_t) |
- |-------------------------------------------------------|
- | end_file (uint32_t) |
- |-------------------------------------------------------|
- | JobErrors (uint32_t) |
- |-------------------------------------------------------|
- | JobStatus (uint32_t) VerNum 11 |
- :=======================================================:
- * => fields deprecated
- Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n"
- LabelType (in FileIndex field of Header):
- EOM_LABEL -3 Label at EOM
- SOS_LABEL -4 Start of Session label
- EOS_LABEL -5 End of Session label
- VerNum: 11
- JobId: JobId
- write_btime: Bacula time/date this tape record written
- write_date: Julian date tape this record written - deprecated
- write_time: Julian time tape this record written - deprecated.
- PoolName: Pool Name
- PoolType: Pool Type
- MediaType: Media Type
- ClientName: Name of File daemon or Client writing this session
- Not used for EOM_LABEL.
-\end{verbatim}
-\normalsize
-
-\section{Unix File Attributes}
-\index{Unix File Attributes}
-\index{Attributes!Unix File}
-\addcontentsline{toc}{subsection}{Unix File Attributes}
-
-The Unix File Attributes packet consists of the following:
-
-\lt{}File-Index\gt{} \lt{}Type\gt{}
-\lt{}Filename\gt{}@\lt{}File-Attributes\gt{}@\lt{}Link\gt{}
-@\lt{}Extended-Attributes@\gt{} where
-
-\begin{description}
-
-\item [@]
- represents a byte containing a binary zero.
-
-\item [FileIndex]
- \index{FileIndex}
- is the sequential file index starting from one assigned by the File daemon.
-
-\item [Type]
- \index{Type}
- is one of the following:
-
-\footnotesize
-\begin{verbatim}
-#define FT_LNKSAVED 1 /* hard link to file already saved */
-#define FT_REGE 2 /* Regular file but empty */
-#define FT_REG 3 /* Regular file */
-#define FT_LNK 4 /* Soft Link */
-#define FT_DIR 5 /* Directory */
-#define FT_SPEC 6 /* Special file -- chr, blk, fifo, sock */
-#define FT_NOACCESS 7 /* Not able to access */
-#define FT_NOFOLLOW 8 /* Could not follow link */
-#define FT_NOSTAT 9 /* Could not stat file */
-#define FT_NOCHG 10 /* Incremental option, file not changed */
-#define FT_DIRNOCHG 11 /* Incremental option, directory not changed */
-#define FT_ISARCH 12 /* Trying to save archive file */
-#define FT_NORECURSE 13 /* No recursion into directory */
-#define FT_NOFSCHG 14 /* Different file system, prohibited */
-#define FT_NOOPEN 15 /* Could not open directory */
-#define FT_RAW 16 /* Raw block device */
-#define FT_FIFO 17 /* Raw fifo device */
-\end{verbatim}
-\normalsize
-
-\item [Filename]
- \index{Filename}
- is the fully qualified filename.
-
-\item [File-Attributes]
- \index{File-Attributes}
- consists of the 13 fields of the stat() buffer in ASCII base64 format
-separated by spaces. These fields and their meanings are shown below. This
-stat() packet is in Unix format, and MUST be provided (constructed) for ALL
-systems.
-
-\item [Link]
- \index{Link}
- when the FT code is FT\_LNK or FT\_LNKSAVED, the item in question is a Unix
-link, and this field contains the fully qualified link name. When the FT code
-is not FT\_LNK or FT\_LNKSAVED, this field is null.
-
-\item [Extended-Attributes]
- \index{Extended-Attributes}
- The exact format of this field is operating system dependent. It contains
-additional or extended attributes of a system dependent nature. Currently,
-this field is used only on WIN32 systems where it contains a ASCII base64
-representation of the WIN32\_FILE\_ATTRIBUTE\_DATA structure as defined by
-Windows. The fields in the base64 representation of this structure are like
-the File-Attributes separated by spaces.
-\end{description}
-
-The File-attributes consist of the following:
-
-\addcontentsline{lot}{table}{File Attributes}
-\begin{longtable}{|p{0.6in}|p{0.7in}|p{1in}|p{1in}|p{1.4in}|}
- \hline
-\multicolumn{1}{|c|}{\bf Field No. } & \multicolumn{1}{c|}{\bf Stat Name }
-& \multicolumn{1}{c|}{\bf Unix } & \multicolumn{1}{c|}{\bf Win98/NT } &
-\multicolumn{1}{c|}{\bf MacOS } \\
- \hline
-\multicolumn{1}{|c|}{1 } & {st\_dev } & {Device number of filesystem } &
-{Drive number } & {vRefNum } \\
- \hline
-\multicolumn{1}{|c|}{2 } & {st\_ino } & {Inode number } & {Always 0 } &
-{fileID/dirID } \\
- \hline
-\multicolumn{1}{|c|}{3 } & {st\_mode } & {File mode } & {File mode } &
-{777 dirs/apps; 666 docs; 444 locked docs } \\
- \hline
-\multicolumn{1}{|c|}{4 } & {st\_nlink } & {Number of links to the file } &
-{Number of link (only on NTFS) } & {Always 1 } \\
- \hline
-\multicolumn{1}{|c|}{5 } & {st\_uid } & {Owner ID } & {Always 0 } &
-{Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{6 } & {st\_gid } & {Group ID } & {Always 0 } &
-{Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{7 } & {st\_rdev } & {Device ID for special files } &
-{Drive No. } & {Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{8 } & {st\_size } & {File size in bytes } & {File
-size in bytes } & {Data fork file size in bytes } \\
- \hline
-\multicolumn{1}{|c|}{9 } & {st\_blksize } & {Preferred block size } &
-{Always 0 } & {Preferred block size } \\
- \hline
-\multicolumn{1}{|c|}{10 } & {st\_blocks } & {Number of blocks allocated }
-& {Always 0 } & {Number of blocks allocated } \\
- \hline
-\multicolumn{1}{|c|}{11 } & {st\_atime } & {Last access time since epoch }
-& {Last access time since epoch } & {Last access time -66 years } \\
- \hline
-\multicolumn{1}{|c|}{12 } & {st\_mtime } & {Last modify time since epoch }
-& {Last modify time since epoch } & {Last access time -66 years } \\
- \hline
-\multicolumn{1}{|c|}{13 } & {st\_ctime } & {Inode change time since epoch
-} & {File create time since epoch } & {File create time -66 years}
-\\ \hline
-
-\end{longtable}
-
-\section{Old Depreciated Tape Format}
-\index{Old Depreciated Tape Format}
-\index{Format!Old Depreciated Tape}
-\addcontentsline{toc}{subsection}{Old Depreciated Tape Format}
-
-The format of the Block Header (version 1.26 and earlier) is:
-
-\footnotesize
-\begin{verbatim}
- uint32_t CheckSum; /* Block check sum */
- uint32_t BlockSize; /* Block byte size including the header */
- uint32_t BlockNumber; /* Block number */
- char ID[4] = "BB01"; /* Identification and block level */
-\end{verbatim}
-\normalsize
-
-The format of the Record Header (version 1.26 or earlier) is:
-
-\footnotesize
-\begin{verbatim}
- uint32_t VolSessionId; /* Unique ID for this session */
- uint32_t VolSessionTime; /* Start time/date of session */
- int32_t FileIndex; /* File index supplied by File daemon */
- int32_t Stream; /* Stream number supplied by File daemon */
- uint32_t DataSize; /* size of following data record in bytes */
-\end{verbatim}
-\normalsize
-
-\footnotesize
-\begin{verbatim}
- Current Bacula Tape Format
- 6 June 2001
- Version BB01 is the old deprecated format.
- A Bacula tape is composed of tape Blocks. Each block
- has a Block header followed by the block data. Block
- Data consists of Records. Records consist of Record
- Headers followed by Record Data.
- :=======================================================:
- | |
- | Block Header |
- | (16 bytes version BB01) |
- |-------------------------------------------------------|
- | |
- | Record Header |
- | (20 bytes version BB01) |
- |-------------------------------------------------------|
- | |
- | Record Data |
- | |
- |-------------------------------------------------------|
- | |
- | Record Header |
- | (20 bytes version BB01) |
- |-------------------------------------------------------|
- | |
- | ... |
- Block Header: the first item in each block. The format is
- shown below.
- Partial Data block: occurs if the data from a previous
- block spills over to this block (the normal case except
- for the first block on a tape). However, this partial
- data block is always preceded by a record header.
- Record Header: identifies the Volume Session, the Stream
- and the following Record Data size. See below for format.
- Record data: arbitrary binary data.
- Block Header Format BB01 (deprecated)
- :=======================================================:
- | CheckSum (uint32_t) |
- |-------------------------------------------------------|
- | BlockSize (uint32_t) |
- |-------------------------------------------------------|
- | BlockNumber (uint32_t) |
- |-------------------------------------------------------|
- | "BB01" (char [4]) |
- :=======================================================:
- BBO1: Serves to identify the block as a
- Bacula block and also servers as a block format identifier
- should we ever need to change the format.
- BlockSize: is the size in bytes of the block. When reading
- back a block, if the BlockSize does not agree with the
- actual size read, Bacula discards the block.
- CheckSum: a checksum for the Block.
- BlockNumber: is the sequential block number on the tape.
- VolSessionId: a unique sequential number that is assigned
- by the Storage Daemon to a particular Job.
- This number is sequential since the start
- of execution of the daemon.
- VolSessionTime: the time/date that the current execution
- of the Storage Daemon started. It assures
- that the combination of VolSessionId and
- VolSessionTime is unique for all jobs
- written to the tape, even if there was a
- machine crash between two writes.
- Record Header Format BB01 (deprecated)
- :=======================================================:
- | VolSessionId (uint32_t) |
- |-------------------------------------------------------|
- | VolSessionTime (uint32_t) |
- |-------------------------------------------------------|
- | FileIndex (int32_t) |
- |-------------------------------------------------------|
- | Stream (int32_t) |
- |-------------------------------------------------------|
- | DataSize (uint32_t) |
- :=======================================================:
- VolSessionId: a unique sequential number that is assigned
- by the Storage Daemon to a particular Job.
- This number is sequential since the start
- of execution of the daemon.
- VolSessionTime: the time/date that the current execution
- of the Storage Daemon started. It assures
- that the combination of VolSessionId and
- VolSessionTime is unique for all jobs
- written to the tape, even if there was a
- machine crash between two writes.
- FileIndex: a sequential file number within a job. The
- Storage daemon enforces this index to be
- greater than zero and sequential. Note,
- however, that the File daemon may send
- multiple Streams for the same FileIndex.
- The Storage Daemon uses negative FileIndices
- to identify Session Start and End labels
- as well as the End of Volume labels.
- Stream: defined by the File daemon and is intended to be
- used to identify separate parts of the data
- saved for each file (attributes, file data,
- ...). The Storage Daemon has no idea of
- what a Stream is or what it contains.
- DataSize: the size in bytes of the binary data record
- that follows the Session Record header.
- The Storage Daemon has no idea of the
- actual contents of the binary data record.
- For standard Unix files, the data record
- typically contains the file attributes or
- the file data. For a sparse file
- the first 64 bits of the data contains
- the storage address for the data block.
- Volume Label
- :=======================================================:
- | Id (32 bytes) |
- |-------------------------------------------------------|
- | VerNum (uint32_t) |
- |-------------------------------------------------------|
- | label_date (float64_t) |
- |-------------------------------------------------------|
- | label_time (float64_t) |
- |-------------------------------------------------------|
- | write_date (float64_t) |
- |-------------------------------------------------------|
- | write_time (float64_t) |
- |-------------------------------------------------------|
- | VolName (128 bytes) |
- |-------------------------------------------------------|
- | PrevVolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolType (128 bytes) |
- |-------------------------------------------------------|
- | MediaType (128 bytes) |
- |-------------------------------------------------------|
- | HostName (128 bytes) |
- |-------------------------------------------------------|
- | LabelProg (32 bytes) |
- |-------------------------------------------------------|
- | ProgVersion (32 bytes) |
- |-------------------------------------------------------|
- | ProgDate (32 bytes) |
- |-------------------------------------------------------|
- :=======================================================:
-
- Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n"
- (old version also recognized:)
- Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n"
- LabelType (Saved in the FileIndex of the Header record).
- PRE_LABEL -1 Volume label on unwritten tape
- VOL_LABEL -2 Volume label after tape written
- EOM_LABEL -3 Label at EOM (not currently implemented)
- SOS_LABEL -4 Start of Session label (format given below)
- EOS_LABEL -5 End of Session label (format given below)
- label_date: Julian day tape labeled
- label_time: Julian time tape labeled
- write_date: Julian date tape first used (data written)
- write_time: Julian time tape first used (data written)
- VolName: "Physical" Volume name
- PrevVolName: The VolName of the previous tape (if this tape is
- a continuation of the previous one).
- PoolName: Pool Name
- PoolType: Pool Type
- MediaType: Media Type
- HostName: Name of host that is first writing the tape
- LabelProg: Name of the program that labeled the tape
- ProgVersion: Version of the label program
- ProgDate: Date Label program built
- Session Label
- :=======================================================:
- | Id (32 bytes) |
- |-------------------------------------------------------|
- | VerNum (uint32_t) |
- |-------------------------------------------------------|
- | JobId (uint32_t) |
- |-------------------------------------------------------|
- | *write_date (float64_t) VerNum 10 |
- |-------------------------------------------------------|
- | *write_time (float64_t) VerNum 10 |
- |-------------------------------------------------------|
- | PoolName (128 bytes) |
- |-------------------------------------------------------|
- | PoolType (128 bytes) |
- |-------------------------------------------------------|
- | JobName (128 bytes) |
- |-------------------------------------------------------|
- | ClientName (128 bytes) |
- |-------------------------------------------------------|
- | Job (128 bytes) |
- |-------------------------------------------------------|
- | FileSetName (128 bytes) |
- |-------------------------------------------------------|
- | JobType (uint32_t) |
- |-------------------------------------------------------|
- | JobLevel (uint32_t) |
- |-------------------------------------------------------|
- | FileSetMD5 (50 bytes) VerNum 11 |
- |-------------------------------------------------------|
- Additional fields in End Of Session Label
- |-------------------------------------------------------|
- | JobFiles (uint32_t) |
- |-------------------------------------------------------|
- | JobBytes (uint32_t) |
- |-------------------------------------------------------|
- | start_block (uint32_t) |
- |-------------------------------------------------------|
- | end_block (uint32_t) |
- |-------------------------------------------------------|
- | start_file (uint32_t) |
- |-------------------------------------------------------|
- | end_file (uint32_t) |
- |-------------------------------------------------------|
- | JobErrors (uint32_t) |
- |-------------------------------------------------------|
- | JobStatus (uint32_t) VerNum 11 |
- :=======================================================:
- * => fields deprecated
- Id: 32 byte Bacula Identifier "Bacula 1.0 immortal\n"
- LabelType (in FileIndex field of Header):
- EOM_LABEL -3 Label at EOM
- SOS_LABEL -4 Start of Session label
- EOS_LABEL -5 End of Session label
- VerNum: 11
- JobId: JobId
- write_btime: Bacula time/date this tape record written
- write_date: Julian date tape this record written - deprecated
- write_time: Julian time tape this record written - deprecated.
- PoolName: Pool Name
- PoolType: Pool Type
- MediaType: Media Type
- ClientName: Name of File daemon or Client writing this session
- Not used for EOM_LABEL.
-\end{verbatim}
-\normalsize
+++ /dev/null
-%%
-%%
-
-\chapter{Bacula Memory Management}
-\label{_ChapterStart7}
-\index{Management!Bacula Memory}
-\index{Bacula Memory Management}
-\addcontentsline{toc}{section}{Bacula Memory Management}
-
-\section{General}
-\index{General}
-\addcontentsline{toc}{subsection}{General}
-
-This document describes the memory management routines that are used in Bacula
-and is meant to be a technical discussion for developers rather than part of
-the user manual.
-
-Since Bacula may be called upon to handle filenames of varying and more or
-less arbitrary length, special attention needs to be used in the code to
-ensure that memory buffers are sufficiently large. There are four
-possibilities for memory usage within {\bf Bacula}. Each will be described in
-turn. They are:
-
-\begin{itemize}
-\item Statically allocated memory.
-\item Dynamically allocated memory using malloc() and free().
-\item Non-pooled memory.
-\item Pooled memory.
- \end{itemize}
-
-\subsection{Statically Allocated Memory}
-\index{Statically Allocated Memory}
-\index{Memory!Statically Allocated}
-\addcontentsline{toc}{subsubsection}{Statically Allocated Memory}
-
-Statically allocated memory is of the form:
-
-\footnotesize
-\begin{verbatim}
-char buffer[MAXSTRING];
-\end{verbatim}
-\normalsize
-
-The use of this kind of memory is discouraged except when you are 100\% sure
-that the strings to be used will be of a fixed length. One example of where
-this is appropriate is for {\bf Bacula} resource names, which are currently
-limited to 127 characters (MAX\_NAME\_LENGTH). Although this maximum size may
-change, particularly to accommodate Unicode, it will remain a relatively small
-value.
-
-\subsection{Dynamically Allocated Memory}
-\index{Dynamically Allocated Memory}
-\index{Memory!Dynamically Allocated}
-\addcontentsline{toc}{subsubsection}{Dynamically Allocated Memory}
-
-Dynamically allocated memory is obtained using the standard malloc() routines.
-As in:
-
-\footnotesize
-\begin{verbatim}
-char *buf;
-buf = malloc(256);
-\end{verbatim}
-\normalsize
-
-This kind of memory can be released with:
-
-\footnotesize
-\begin{verbatim}
-free(buf);
-\end{verbatim}
-\normalsize
-
-It is recommended to use this kind of memory only when you are sure that you
-know the memory size needed and the memory will be used for short periods of
-time -- that is it would not be appropriate to use statically allocated
-memory. An example might be to obtain a large memory buffer for reading and
-writing files. When {\bf SmartAlloc} is enabled, the memory obtained by
-malloc() will automatically be checked for buffer overwrite (overflow) during
-the free() call, and all malloc'ed memory that is not released prior to
-termination of the program will be reported as Orphaned memory.
-
-\subsection{Pooled and Non-pooled Memory}
-\index{Memory!Pooled and Non-pooled}
-\index{Pooled and Non-pooled Memory}
-\addcontentsline{toc}{subsubsection}{Pooled and Non-pooled Memory}
-
-In order to facility the handling of arbitrary length filenames and to
-efficiently handle a high volume of dynamic memory usage, we have implemented
-routines between the C code and the malloc routines. The first is called
-``Pooled'' memory, and is memory, which once allocated and then released, is
-not returned to the system memory pool, but rather retained in a Bacula memory
-pool. The next request to acquire pooled memory will return any free memory
-block. In addition, each memory block has its current size associated with the
-block allowing for easy checking if the buffer is of sufficient size. This
-kind of memory would normally be used in high volume situations (lots of
-malloc()s and free()s) where the buffer length may have to frequently change
-to adapt to varying filename lengths.
-
-The non-pooled memory is handled by routines similar to those used for pooled
-memory, allowing for easy size checking. However, non-pooled memory is
-returned to the system rather than being saved in the Bacula pool. This kind
-of memory would normally be used in low volume situations (few malloc()s and
-free()s), but where the size of the buffer might have to be adjusted
-frequently.
-
-\paragraph*{Types of Memory Pool:}
-
-Currently there are three memory pool types:
-
-\begin{itemize}
-\item PM\_NOPOOL -- non-pooled memory.
-\item PM\_FNAME -- a filename pool.
-\item PM\_MESSAGE -- a message buffer pool.
-\item PM\_EMSG -- error message buffer pool.
- \end{itemize}
-
-\paragraph*{Getting Memory:}
-
-To get memory, one uses:
-
-\footnotesize
-\begin{verbatim}
-void *get_pool_memory(pool);
-\end{verbatim}
-\normalsize
-
-where {\bf pool} is one of the above mentioned pool names. The size of the
-memory returned will be determined by the system to be most appropriate for
-the application.
-
-If you wish non-pooled memory, you may alternatively call:
-
-\footnotesize
-\begin{verbatim}
-void *get_memory(size_t size);
-\end{verbatim}
-\normalsize
-
-The buffer length will be set to the size specified, and it will be assigned
-to the PM\_NOPOOL pool (no pooling).
-
-\paragraph*{Releasing Memory:}
-
-To free memory acquired by either of the above two calls, use:
-
-\footnotesize
-\begin{verbatim}
-void free_pool_memory(void *buffer);
-\end{verbatim}
-\normalsize
-
-where buffer is the memory buffer returned when the memory was acquired. If
-the memory was originally allocated as type PM\_NOPOOL, it will be released to
-the system, otherwise, it will be placed on the appropriate Bacula memory pool
-free chain to be used in a subsequent call for memory from that pool.
-
-\paragraph*{Determining the Memory Size:}
-
-To determine the memory buffer size, use:
-
-\footnotesize
-\begin{verbatim}
-size_t sizeof_pool_memory(void *buffer);
-\end{verbatim}
-\normalsize
-
-\paragraph*{Resizing Pool Memory:}
-
-To resize pool memory, use:
-
-\footnotesize
-\begin{verbatim}
-void *realloc_pool_memory(void *buffer);
-\end{verbatim}
-\normalsize
-
-The buffer will be reallocated, and the contents of the original buffer will
-be preserved, but the address of the buffer may change.
-
-\paragraph*{Automatic Size Adjustment:}
-
-To have the system check and if necessary adjust the size of your pooled
-memory buffer, use:
-
-\footnotesize
-\begin{verbatim}
-void *check_pool_memory_size(void *buffer, size_t new-size);
-\end{verbatim}
-\normalsize
-
-where {\bf new-size} is the buffer length needed. Note, if the buffer is
-already equal to or larger than {\bf new-size} no buffer size change will
-occur. However, if a buffer size change is needed, the original contents of
-the buffer will be preserved, but the buffer address may change. Many of the
-low level Bacula subroutines expect to be passed a pool memory buffer and use
-this call to ensure the buffer they use is sufficiently large.
-
-\paragraph*{Releasing All Pooled Memory:}
-
-In order to avoid orphaned buffer error messages when terminating the program,
-use:
-
-\footnotesize
-\begin{verbatim}
-void close_memory_pool();
-\end{verbatim}
-\normalsize
-
-to free all unused memory retained in the Bacula memory pool. Note, any memory
-not returned to the pool via free\_pool\_memory() will not be released by this
-call.
-
-\paragraph*{Pooled Memory Statistics:}
-
-For debugging purposes and performance tuning, the following call will print
-the current memory pool statistics:
-
-\footnotesize
-\begin{verbatim}
-void print_memory_pool_stats();
-\end{verbatim}
-\normalsize
-
-an example output is:
-
-\footnotesize
-\begin{verbatim}
-Pool Maxsize Maxused Inuse
- 0 256 0 0
- 1 256 1 0
- 2 256 1 0
-\end{verbatim}
-\normalsize
+++ /dev/null
-%%
-%%
-
-\chapter{TCP/IP Network Protocol}
-\label{_ChapterStart5}
-\index{TCP/IP Network Protocol}
-\index{Protocol!TCP/IP Network}
-\addcontentsline{toc}{section}{TCP/IP Network Protocol}
-
-\section{General}
-\index{General}
-\addcontentsline{toc}{subsection}{General}
-
-This document describes the TCP/IP protocol used by Bacula to communicate
-between the various daemons and services. The definitive definition of the
-protocol can be found in src/lib/bsock.h, src/lib/bnet.c and
-src/lib/bnet\_server.c.
-
-Bacula's network protocol is basically a ``packet oriented'' protocol built on
-a standard TCP/IP streams. At the lowest level all packet transfers are done
-with read() and write() requests on system sockets. Pipes are not used as they
-are considered unreliable for large serial data transfers between various
-hosts.
-
-Using the routines described below (bnet\_open, bnet\_write, bnet\_recv, and
-bnet\_close) guarantees that the number of bytes you write into the socket
-will be received as a single record on the other end regardless of how many
-low level write() and read() calls are needed. All data transferred are
-considered to be binary data.
-
-\section{bnet and Threads}
-\index{Threads!bnet and}
-\index{Bnet and Threads}
-\addcontentsline{toc}{subsection}{bnet and Threads}
-
-These bnet routines work fine in a threaded environment. However, they assume
-that there is only one reader or writer on the socket at any time. It is
-highly recommended that only a single thread access any BSOCK packet. The
-exception to this rule is when the socket is first opened and it is waiting
-for a job to start. The wait in the Storage daemon is done in one thread and
-then passed to another thread for subsequent handling.
-
-If you envision having two threads using the same BSOCK, think twice, then you
-must implement some locking mechanism. However, it probably would not be
-appropriate to put locks inside the bnet subroutines for efficiency reasons.
-
-\section{bnet\_open}
-\index{Bnet\_open}
-\addcontentsline{toc}{subsection}{bnet\_open}
-
-To establish a connection to a server, use the subroutine:
-
-BSOCK *bnet\_open(void *jcr, char *host, char *service, int port, int *fatal)
-bnet\_open(), if successful, returns the Bacula sock descriptor pointer to be
-used in subsequent bnet\_send() and bnet\_read() requests. If not successful,
-bnet\_open() returns a NULL. If fatal is set on return, it means that a fatal
-error occurred and that you should not repeatedly call bnet\_open(). Any error
-message will generally be sent to the JCR.
-
-\section{bnet\_send}
-\index{Bnet\_send}
-\addcontentsline{toc}{subsection}{bnet\_send}
-
-To send a packet, one uses the subroutine:
-
-int bnet\_send(BSOCK *sock) This routine is equivalent to a write() except
-that it handles the low level details. The data to be sent is expected to be
-in sock-\gt{}msg and be sock-\gt{}msglen bytes. To send a packet, bnet\_send()
-first writes four bytes in network byte order than indicate the size of the
-following data packet. It returns:
-
-\footnotesize
-\begin{verbatim}
- Returns 0 on failure
- Returns 1 on success
-\end{verbatim}
-\normalsize
-
-In the case of a failure, an error message will be sent to the JCR contained
-within the bsock packet.
-
-\section{bnet\_fsend}
-\index{Bnet\_fsend}
-\addcontentsline{toc}{subsection}{bnet\_fsend}
-
-This form uses:
-
-int bnet\_fsend(BSOCK *sock, char *format, ...) and it allows you to send a
-formatted messages somewhat like fprintf(). The return status is the same as
-bnet\_send.
-
-\section{Additional Error information}
-\index{Information!Additional Error}
-\index{Additional Error information}
-\addcontentsline{toc}{subsection}{Additional Error information}
-
-Fro additional error information, you can call {\bf is\_bnet\_error(BSOCK
-*bsock)} which will return 0 if there is no error or non-zero if there is an
-error on the last transmission. The {\bf is\_bnet\_stop(BSOCK *bsock)}
-function will return 0 if there no errors and you can continue sending. It
-will return non-zero if there are errors or the line is closed (no more
-transmissions should be sent).
-
-\section{bnet\_recv}
-\index{Bnet\_recv}
-\addcontentsline{toc}{subsection}{bnet\_recv}
-
-To read a packet, one uses the subroutine:
-
-int bnet\_recv(BSOCK *sock) This routine is similar to a read() except that it
-handles the low level details. bnet\_read() first reads packet length that
-follows as four bytes in network byte order. The data is read into
-sock-\gt{}msg and is sock-\gt{}msglen bytes. If the sock-\gt{}msg is not large
-enough, bnet\_recv() realloc() the buffer. It will return an error (-2) if
-maxbytes is less than the record size sent. It returns:
-
-\footnotesize
-\begin{verbatim}
- * Returns number of bytes read
- * Returns 0 on end of file
- * Returns -1 on hard end of file (i.e. network connection close)
- * Returns -2 on error
-\end{verbatim}
-\normalsize
-
-It should be noted that bnet\_recv() is a blocking read.
-
-\section{bnet\_sig}
-\index{Bnet\_sig}
-\addcontentsline{toc}{subsection}{bnet\_sig}
-
-To send a ``signal'' from one daemon to another, one uses the subroutine:
-
-int bnet\_sig(BSOCK *sock, SIGNAL) where SIGNAL is one of the following:
-
-\begin{enumerate}
-\item BNET\_EOF - deprecated use BNET\_EOD
-\item BNET\_EOD - End of data stream, new data may follow
-\item BNET\_EOD\_POLL - End of data and poll all in one
-\item BNET\_STATUS - Request full status
-\item BNET\_TERMINATE - Conversation terminated, doing close()
-\item BNET\_POLL - Poll request, I'm hanging on a read
-\item BNET\_HEARTBEAT - Heartbeat Response requested
-\item BNET\_HB\_RESPONSE - Only response permitted to HB
-\item BNET\_PROMPT - Prompt for UA
- \end{enumerate}
-
-\section{bnet\_strerror}
-\index{Bnet\_strerror}
-\addcontentsline{toc}{subsection}{bnet\_strerror}
-
-Returns a formated string corresponding to the last error that occurred.
-
-\section{bnet\_close}
-\index{Bnet\_close}
-\addcontentsline{toc}{subsection}{bnet\_close}
-
-The connection with the server remains open until closed by the subroutine:
-
-void bnet\_close(BSOCK *sock)
-
-\section{Becoming a Server}
-\index{Server!Becoming a}
-\index{Becoming a Server}
-\addcontentsline{toc}{subsection}{Becoming a Server}
-
-The bnet\_open() and bnet\_close() routines described above are used on the
-client side to establish a connection and terminate a connection with the
-server. To become a server (i.e. wait for a connection from a client), use the
-routine {\bf bnet\_thread\_server}. The calling sequence is a bit complicated,
-please refer to the code in bnet\_server.c and the code at the beginning of
-each daemon as examples of how to call it.
-
-\section{Higher Level Conventions}
-\index{Conventions!Higher Level}
-\index{Higher Level Conventions}
-\addcontentsline{toc}{subsection}{Higher Level Conventions}
-
-Within Bacula, we have established the convention that any time a single
-record is passed, it is sent with bnet\_send() and read with bnet\_recv().
-Thus the normal exchange between the server (S) and the client (C) are:
-
-\footnotesize
-\begin{verbatim}
-S: wait for connection C: attempt connection
-S: accept connection C: bnet_send() send request
-S: bnet_recv() wait for request
-S: act on request
-S: bnet_send() send ack C: bnet_recv() wait for ack
-\end{verbatim}
-\normalsize
-
-Thus a single command is sent, acted upon by the server, and then
-acknowledged.
-
-In certain cases, such as the transfer of the data for a file, all the
-information or data cannot be sent in a single packet. In this case, the
-convention is that the client will send a command to the server, who knows
-that more than one packet will be returned. In this case, the server will
-enter a loop:
-
-\footnotesize
-\begin{verbatim}
-while ((n=bnet_recv(bsock)) > 0) {
- act on request
-}
-if (n < 0)
- error
-\end{verbatim}
-\normalsize
-
-The client will perform the following:
-
-\footnotesize
-\begin{verbatim}
-bnet_send(bsock);
-bnet_send(bsock);
-...
-bnet_sig(bsock, BNET_EOD);
-\end{verbatim}
-\normalsize
-
-Thus the client will send multiple packets and signal to the server when all
-the packets have been sent by sending a zero length record.
+++ /dev/null
-%%
-%%
-
-\chapter{Platform Support}
-\label{_PlatformChapter}
-\index{Support!Platform}
-\index{Platform Support}
-\addcontentsline{toc}{section}{Platform Support}
-
-\section{General}
-\index{General }
-\addcontentsline{toc}{subsection}{General}
-
-This chapter describes the requirements for having a
-supported platform (Operating System). In general, Bacula is
-quite portable. It supports 32 and 64 bit architectures as well
-as bigendian and littleendian machines. For full
-support, the platform (Operating System) must implement POSIX Unix
-system calls. However, for File daemon support only, a small
-compatibility library can be written to support almost any
-architecture.
-
-Currently Linux, FreeBSD, and Solaris are fully supported
-platforms, which means that the code has been tested on those
-machines and passes a full set of regression tests.
-
-In addition, the Windows File daemon is supported on most versions
-of Windows, and finally, there are a number of other platforms
-where the File daemon (client) is known to run: NetBSD, OpenBSD,
-Mac OSX, SGI, ...
-
-\section{Requirements to become a Supported Platform}
-\index{Requirements!Platform}
-\index{Platform Requirements}
-\addcontentsline{toc}{subsection}{Platform Requirements}
-
-As mentioned above, in order to become a fully supported platform, it
-must support POSIX Unix system calls. In addition, the following
-requirements must be met:
-
-\begin{itemize}
-\item The principal developer (currently Kern) must have
- non-root ssh access to a test machine running the platform.
-\item The ideal requirements and minimum requirements
- for this machine are given below.
-\item There must be a defined platform champion who is normally
- a system administrator for the machine that is available. This
- person need not be a developer/programmer but must be familiar
- with system administration of the platform.
-\item There must be at least one person designated who will
- run regression tests prior to each release. Releases occur
- approximately once every 6 months, but can be more frequent.
- It takes at most a day's effort to setup the regression scripts
- in the beginning, and after that, they can either be run daily
- or on demand before a release. Running the regression scripts
- involves only one or two command line commands and is fully
- automated.
-\item Ideally there are one or more persons who will package
- each Bacula release.
-\item Ideally there are one or more developers who can respond to
- and fix platform specific bugs.
-\end{itemize}
-
-Ideal requirements for a test machine:
-\begin{itemize}
-\item The principal developer will have non-root ssh access to
- the test machine at all times.
-\item The pricipal developer will have a root password.
-\item The test machine will provide approximately 200 MB of
- disk space for continual use.
-\item The test machine will have approximately 500 MB of free
- disk space for temporary use.
-\item The test machine will run the most common version of the OS.
-\item The test machine will have an autochanger of DDS-4 technology
- or later having two or more tapes.
-\item The test machine will have MySQL and/or PostgreSQL database
- access for account "bacula" available.
-\item The test machine will have sftp access.
-\item The test machine will provide an smtp server.
-\end{itemize}
-
-Minimum requirements for a test machine:
-\begin{itemize}
-\item The principal developer will have non-root ssh access to
- the test machine when requested approximately once a month.
-\item The pricipal developer not have root access.
-\item The test machine will provide approximately 80 MB of
- disk space for continual use.
-\item The test machine will have approximately 300 MB of free
- disk space for temporary use.
-\item The test machine will run the the OS.
-\item The test machine will have a tape drive of DDS-4 technology
- or later that can be scheduled for access.
-\item The test machine will not have MySQL and/or PostgreSQL database
- access.
-\item The test machine will have no sftp access.
-\item The test machine will provide no email access.
-\end{itemize}
-
-Bare bones test machine requirements:
-\begin{itemize}
-\item The test machine is available only to a designated
- test person (your own machine).
-\item The designated test person runs the regession
- tests on demand.
-\item The test machine has a tape drive available.
-\end{itemize}
+++ /dev/null
-%%
-%%
-
-\chapter{Bacula Porting Notes}
-\label{_ChapterStart1}
-\index{Notes!Bacula Porting}
-\index{Bacula Porting Notes}
-\addcontentsline{toc}{section}{Bacula Porting Notes}
-
-This document is intended mostly for developers who wish to port Bacula to a
-system that is not {\bf officially} supported.
-
-It is hoped that Bacula clients will eventually run on every imaginable system
-that needs backing up (perhaps even a Palm). It is also hoped that the Bacula
-Directory and Storage daemons will run on every system capable of supporting
-them.
-
-\section{Porting Requirements}
-\index{Requirements!Porting}
-\index{Porting Requirements}
-\addcontentsline{toc}{section}{Porting Requirements}
-
-In General, the following holds true:
-
-\begin{itemize}
-\item {\bf Bacula} has been compiled and run on Linux RedHat, FreeBSD, and
- Solaris systems.
-\item In addition, clients exist on Win32, and Irix
-\item It requires GNU C++ to compile. You can try with other compilers, but
- you are on your own. The Irix client is built with the Irix complier, but, in
- general, you will need GNU.
-\item Your compiler must provide support for 64 bit signed and unsigned
- integers.
-\item You will need a recent copy of the {\bf autoconf} tools loaded on your
- system (version 2.13 or later). The {\bf autoconf} tools are used to build
- the configuration program, but are not part of the Bacula source
-distribution.
-\item There are certain third party packages that Bacula needs. Except for
- MySQL, they can all be found in the {\bf depkgs} and {\bf depkgs1} releases.
-\item To build the Win32 binaries, we use Microsoft VC++ standard
- 2003. Please see the instructions in
- bacula-source/src/win32/README.win32 for more details. If you
- want to use VC++ Express, please see README.vc8. Our build is
- done under the most recent version of Cygwin, but Cygwin is
- not used in the Bacula binaries that are produced.
- Unfortunately, we do not have the resources to help you build
- your own version of the Win32 FD, so you are pretty much on
- your own. You can ask the bacula-devel list for help, but
- please don't expect much.
-\item {\bf Bacula} requires a good implementation of pthreads to work.
-\item The source code has been written with portability in mind and is mostly
- POSIX compatible. Thus porting to any POSIX compatible operating system
- should be relatively easy.
-\end{itemize}
-
-\section{Steps to Take for Porting}
-\index{Porting!Steps to Take for}
-\index{Steps to Take for Porting}
-\addcontentsline{toc}{section}{Steps to Take for Porting}
-
-\begin{itemize}
-\item The first step is to ensure that you have version 2.13 or later of the
- {\bf autoconf} tools loaded. You can skip this step, but making changes to
- the configuration program will be difficult or impossible.
-\item The run a {\bf ./configure} command in the main source directory and
- examine the output. It should look something like the following:
-
-\footnotesize
-\begin{verbatim}
-Configuration on Mon Oct 28 11:42:27 CET 2002:
- Host: i686-pc-linux-gnu -- redhat 7.3
- Bacula version: 1.27 (26 October 2002)
- Source code location: .
- Install binaries: /sbin
- Install config files: /etc/bacula
- C Compiler: gcc
- C++ Compiler: c++
- Compiler flags: -g -O2
- Linker flags:
- Libraries: -lpthread
- Statically Linked Tools: no
- Database found: no
- Database type: Internal
- Database lib:
- Job Output Email: root@localhost
- Traceback Email: root@localhost
- SMTP Host Address: localhost
- Director Port 9101
- File daemon Port 9102
- Storage daemon Port 9103
- Working directory /etc/bacula/working
- SQL binaries Directory
- Large file support: yes
- readline support: yes
- cweb support: yes /home/kern/bacula/depkgs/cweb
- TCP Wrappers support: no
- ZLIB support: yes
- enable-smartalloc: yes
- enable-gnome: no
- gmp support: yes
-\end{verbatim}
-\normalsize
-
-The details depend on your system. The first thing to check is that it
-properly identified your host on the {\bf Host:} line. The first part (added
-in version 1.27) is the GNU four part identification of your system. The part
-after the -- is your system and the system version. Generally, if your system
-is not yet supported, you must correct these.
-\item If the {\bf ./configure} does not function properly, you must determine
- the cause and fix it. Generally, it will be because some required system
- routine is not available on your machine.
-\item To correct problems with detection of your system type or with routines
- and libraries, you must edit the file {\bf
- \lt{}bacula-src\gt{}/autoconf/configure.in}. This is the ``source'' from
-which {\bf configure} is built. In general, most of the changes for your
-system will be made in {\bf autoconf/aclocal.m4} in the routine {\bf
-BA\_CHECK\_OPSYS} or in the routine {\bf BA\_CHECK\_OPSYS\_DISTNAME}. I have
-already added the necessary code for most systems, but if yours shows up as
-{\bf unknown} you will need to make changes. Then as mentioned above, you
-will need to set a number of system dependent items in {\bf configure.in} in
-the {\bf case} statement at approximately line 1050 (depending on the Bacula
-release).
-\item The items to in the case statement that corresponds to your system are
- the following:
-
-\begin{itemize}
-\item DISTVER -- set to the version of your operating system. Typically some
- form of {\bf uname} obtains it.
-\item TAPEDRIVE -- the default tape drive. Not too important as the user can
- set it as an option.
-\item PSCMD -- set to the {\bf ps} command that will provide the PID in the
- first field and the program name in the second field. If this is not set
- properly, the {\bf bacula stop} script will most likely not be able to stop
-Bacula in all cases.
-\item hostname -- command to return the base host name (non-qualified) of
- your system. This is generally the machine name. Not too important as the
- user can correct this in his configuration file.
-\item CFLAGS -- set any special compiler flags needed. Many systems need a
- special flag to make pthreads work. See cygwin for an example.
-\item LDFLAGS -- set any special loader flags. See cygwin for an example.
-\item PTHREAD\_LIB -- set for any special pthreads flags needed during
- linking. See freebsd as an example.
-\item lld -- set so that a ``long long int'' will be properly edited in a
- printf() call.
-\item llu -- set so that a ``long long unsigned'' will be properly edited in
- a printf() call.
-\item PFILES -- set to add any files that you may define is your platform
- subdirectory. These files are used for installation of automatic system
- startup of Bacula daemons.
-\end{itemize}
-
-\item To rebuild a new version of {\bf configure} from a changed {\bf
- autoconf/configure.in} you enter {\bf make configure} in the top level Bacula
- source directory. You must have done a ./configure prior to trying to rebuild
- the configure script or it will get into an infinite loop.
-\item If the {\bf make configure} gets into an infinite loop, ctl-c it, then
- do {\bf ./configure} (no options are necessary) and retry the {\bf make
- configure}, which should now work.
-\item To rebuild {\bf configure} you will need to have {\bf autoconf} version
- 2.57-3 or higher loaded. Older versions of autoconf will complain about
- unknown or bad options, and won't work.
-\item After you have a working {\bf configure} script, you may need to make a
- few system dependent changes to the way Bacula works. Generally, these are
- done in {\bf src/baconfig.h}. You can find a few examples of system dependent
-changes toward the end of this file. For example, on Irix systems, there is
-no definition for {\bf socklen\_t}, so it is made in this file. If your
-system has structure alignment requirements, check the definition of BALIGN
-in this file. Currently, all Bacula allocated memory is aligned on a {\bf
-double} boundary.
-\item If you are having problems with Bacula's type definitions, you might
- look at {\bf src/bc\_types.h} where all the types such as {\bf uint32\_t},
- {\bf uint64\_t}, etc. that Bacula uses are defined.
-\end{itemize}
+++ /dev/null
-/*
- * html2latex
- */
-
-available {
- sun4_sunos.4
- sun4_solaris.2
- rs_aix.3
- rs_aix.4
- sgi_irix
-}
-
-description {
- From Jeffrey Schaefer, Geometry Center. Translates HTML document to LaTeX
-}
-
-install {
- bin/html2latex /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex
- bin/html2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.tag
- bin/html2latex-local.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex-local.tag
- bin/webtex2latex.tag /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/webtex2latex.tag
- man/man1/html2latex.1 /afs/rpi.edu/dept/acs/rpinfo/filters/GChtml2latex/html2latex.1
-}
+++ /dev/null
-%%
-%%
-
-\addcontentsline{lof}{figure}{Smart Memory Allocation with Orphaned Buffer
-Detection}
-\includegraphics{./smartall.eps}
-
-\chapter{Smart Memory Allocation}
-\label{_ChapterStart4}
-\index{Detection!Smart Memory Allocation With Orphaned Buffer }
-\index{Smart Memory Allocation With Orphaned Buffer Detection }
-\addcontentsline{toc}{section}{Smart Memory Allocation With Orphaned Buffer
-Detection}
-
-Few things are as embarrassing as a program that leaks, yet few errors are so
-easy to commit or as difficult to track down in a large, complicated program
-as failure to release allocated memory. SMARTALLOC replaces the standard C
-library memory allocation functions with versions which keep track of buffer
-allocations and releases and report all orphaned buffers at the end of program
-execution. By including this package in your program during development and
-testing, you can identify code that loses buffers right when it's added and
-most easily fixed, rather than as part of a crisis debugging push when the
-problem is identified much later in the testing cycle (or even worse, when the
-code is in the hands of a customer). When program testing is complete, simply
-recompiling with different flags removes SMARTALLOC from your program,
-permitting it to run without speed or storage penalties.
-
-In addition to detecting orphaned buffers, SMARTALLOC also helps to find other
-common problems in management of dynamic storage including storing before the
-start or beyond the end of an allocated buffer, referencing data through a
-pointer to a previously released buffer, attempting to release a buffer twice
-or releasing storage not obtained from the allocator, and assuming the initial
-contents of storage allocated by functions that do not guarantee a known
-value. SMARTALLOC's checking does not usually add a large amount of overhead
-to a program (except for programs which use {\tt realloc()} extensively; see
-below). SMARTALLOC focuses on proper storage management rather than internal
-consistency of the heap as checked by the malloc\_debug facility available on
-some systems. SMARTALLOC does not conflict with malloc\_debug and both may be
-used together, if you wish. SMARTALLOC makes no assumptions regarding the
-internal structure of the heap and thus should be compatible with any C
-language implementation of the standard memory allocation functions.
-
-\subsection{ Installing SMARTALLOC}
-\index{SMARTALLOC!Installing }
-\index{Installing SMARTALLOC }
-\addcontentsline{toc}{subsection}{Installing SMARTALLOC}
-
-SMARTALLOC is provided as a Zipped archive,
-\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}; see the
-download instructions below.
-
-To install SMARTALLOC in your program, simply add the statement:
-
-to every C program file which calls any of the memory allocation functions
-({\tt malloc}, {\tt calloc}, {\tt free}, etc.). SMARTALLOC must be used for
-all memory allocation with a program, so include file for your entire program,
-if you have such a thing. Next, define the symbol SMARTALLOC in the
-compilation before the inclusion of smartall.h. I usually do this by having my
-Makefile add the ``{\tt -DSMARTALLOC}'' option to the C compiler for
-non-production builds. You can define the symbol manually, if you prefer, by
-adding the statement:
-
-{\tt \#define SMARTALLOC}
-
-At the point where your program is all done and ready to relinquish control to
-the operating system, add the call:
-
-{\tt \ \ \ \ \ \ \ \ sm\_dump(}{\it datadump}{\tt );}
-
-where {\it datadump} specifies whether the contents of orphaned buffers are to
-be dumped in addition printing to their size and place of allocation. The data
-are dumped only if {\it datadump} is nonzero, so most programs will normally
-use ``{\tt sm\_dump(0);}''. If a mysterious orphaned buffer appears that can't
-be identified from the information this prints about it, replace the statement
-with ``{\tt sm\_dump(1)};''. Usually the dump of the buffer's data will
-furnish the additional clues you need to excavate and extirpate the elusive
-error that left the buffer allocated.
-
-Finally, add the files ``smartall.h'' and ``smartall.c'' from this release to
-your source directory, make dependencies, and linker input. You needn't make
-inclusion of smartall.c in your link optional; if compiled with SMARTALLOC not
-defined it generates no code, so you may always include it knowing it will
-waste no storage in production builds. Now when you run your program, if it
-leaves any buffers around when it's done, each will be reported by {\tt
-sm\_dump()} on stderr as follows:
-
-\footnotesize
-\begin{verbatim}
-Orphaned buffer: 120 bytes allocated at line 50 of gutshot.c
-\end{verbatim}
-\normalsize
-
-\subsection{ Squelching a SMARTALLOC}
-\index{SMARTALLOC!Squelching a }
-\index{Squelching a SMARTALLOC }
-\addcontentsline{toc}{subsection}{Squelching a SMARTALLOC}
-
-Usually, when you first install SMARTALLOC in an existing program you'll find
-it nattering about lots of orphaned buffers. Some of these turn out to be
-legitimate errors, but some are storage allocated during program
-initialisation that, while dynamically allocated, is logically static storage
-not intended to be released. Of course, you can get rid of the complaints
-about these buffers by adding code to release them, but by doing so you're
-adding unnecessary complexity and code size to your program just to silence
-the nattering of a SMARTALLOC, so an escape hatch is provided to eliminate the
-need to release these buffers.
-
-Normally all storage allocated with the functions {\tt malloc()}, {\tt
-calloc()}, and {\tt realloc()} is monitored by SMARTALLOC. If you make the
-function call:
-
-\footnotesize
-\begin{verbatim}
- sm_static(1);
-\end{verbatim}
-\normalsize
-
-you declare that subsequent storage allocated by {\tt malloc()}, {\tt
-calloc()}, and {\tt realloc()} should not be considered orphaned if found to
-be allocated when {\tt sm\_dump()} is called. I use a call on ``{\tt
-sm\_static(1);}'' before I allocate things like program configuration tables
-so I don't have to add code to release them at end of program time. After
-allocating unmonitored data this way, be sure to add a call to:
-
-\footnotesize
-\begin{verbatim}
- sm_static(0);
-\end{verbatim}
-\normalsize
-
-to resume normal monitoring of buffer allocations. Buffers allocated while
-{\tt sm\_static(1}) is in effect are not checked for having been orphaned but
-all the other safeguards provided by SMARTALLOC remain in effect. You may
-release such buffers, if you like; but you don't have to.
-
-\subsection{ Living with Libraries}
-\index{Libraries!Living with }
-\index{Living with Libraries }
-\addcontentsline{toc}{subsection}{Living with Libraries}
-
-Some library functions for which source code is unavailable may gratuitously
-allocate and return buffers that contain their results, or require you to pass
-them buffers which they subsequently release. If you have source code for the
-library, by far the best approach is to simply install SMARTALLOC in it,
-particularly since this kind of ill-structured dynamic storage management is
-the source of so many storage leaks. Without source code, however, there's no
-option but to provide a way to bypass SMARTALLOC for the buffers the library
-allocates and/or releases with the standard system functions.
-
-For each function {\it xxx} redefined by SMARTALLOC, a corresponding routine
-named ``{\tt actually}{\it xxx}'' is furnished which provides direct access to
-the underlying system function, as follows:
-
-\begin{quote}
-
-\begin{longtable}{ll}
-\multicolumn{1}{l }{\bf Standard function } & \multicolumn{1}{l }{\bf Direct
-access function } \\
-{{\tt malloc(}{\it size}{\tt )} } & {{\tt actuallymalloc(}{\it size}{\tt )}
-} \\
-{{\tt calloc(}{\it nelem}{\tt ,} {\it elsize}{\tt )} } & {{\tt
-actuallycalloc(}{\it nelem}, {\it elsize}{\tt )} } \\
-{{\tt realloc(}{\it ptr}{\tt ,} {\it size}{\tt )} } & {{\tt
-actuallyrealloc(}{\it ptr}, {\it size}{\tt )} } \\
-{{\tt free(}{\it ptr}{\tt )} } & {{\tt actuallyfree(}{\it ptr}{\tt )} }
-
-\end{longtable}
-
-\end{quote}
-
-For example, suppose there exists a system library function named ``{\tt
-getimage()}'' which reads a raster image file and returns the address of a
-buffer containing it. Since the library routine allocates the image directly
-with {\tt malloc()}, you can't use SMARTALLOC's {\tt free()}, as that call
-expects information placed in the buffer by SMARTALLOC's special version of
-{\tt malloc()}, and hence would report an error. To release the buffer you
-should call {\tt actuallyfree()}, as in this code fragment:
-
-\footnotesize
-\begin{verbatim}
- struct image *ibuf = getimage("ratpack.img");
- display_on_screen(ibuf);
- actuallyfree(ibuf);
-\end{verbatim}
-\normalsize
-
-Conversely, suppose we are to call a library function, ``{\tt putimage()}'',
-which writes an image buffer into a file and then releases the buffer with
-{\tt free()}. Since the system {\tt free()} is being called, we can't pass a
-buffer allocated by SMARTALLOC's allocation routines, as it contains special
-information that the system {\tt free()} doesn't expect to be there. The
-following code uses {\tt actuallymalloc()} to obtain the buffer passed to such
-a routine.
-
-\footnotesize
-\begin{verbatim}
- struct image *obuf =
- (struct image *) actuallymalloc(sizeof(struct image));
- dump_screen_to_image(obuf);
- putimage("scrdump.img", obuf); /* putimage() releases obuf */
-\end{verbatim}
-\normalsize
-
-It's unlikely you'll need any of the ``actually'' calls except under very odd
-circumstances (in four products and three years, I've only needed them once),
-but they're there for the rare occasions that demand them. Don't use them to
-subvert the error checking of SMARTALLOC; if you want to disable orphaned
-buffer detection, use the {\tt sm\_static(1)} mechanism described above. That
-way you don't forfeit all the other advantages of SMARTALLOC as you do when
-using {\tt actuallymalloc()} and {\tt actuallyfree()}.
-
-\subsection{ SMARTALLOC Details}
-\index{SMARTALLOC Details }
-\index{Details!SMARTALLOC }
-\addcontentsline{toc}{subsection}{SMARTALLOC Details}
-
-When you include ``smartall.h'' and define SMARTALLOC, the following standard
-system library functions are redefined with the \#define mechanism to call
-corresponding functions within smartall.c instead. (For details of the
-redefinitions, please refer to smartall.h.)
-
-\footnotesize
-\begin{verbatim}
- void *malloc(size_t size)
- void *calloc(size_t nelem, size_t elsize)
- void *realloc(void *ptr, size_t size)
- void free(void *ptr)
- void cfree(void *ptr)
-\end{verbatim}
-\normalsize
-
-{\tt cfree()} is a historical artifact identical to {\tt free()}.
-
-In addition to allocating storage in the same way as the standard library
-functions, the SMARTALLOC versions expand the buffers they allocate to include
-information that identifies where each buffer was allocated and to chain all
-allocated buffers together. When a buffer is released, it is removed from the
-allocated buffer chain. A call on {\tt sm\_dump()} is able, by scanning the
-chain of allocated buffers, to find all orphaned buffers. Buffers allocated
-while {\tt sm\_static(1)} is in effect are specially flagged so that, despite
-appearing on the allocated buffer chain, {\tt sm\_dump()} will not deem them
-orphans.
-
-When a buffer is allocated by {\tt malloc()} or expanded with {\tt realloc()},
-all bytes of newly allocated storage are set to the hexadecimal value 0x55
-(alternating one and zero bits). Note that for {\tt realloc()} this applies
-only to the bytes added at the end of buffer; the original contents of the
-buffer are not modified. Initializing allocated storage to a distinctive
-nonzero pattern is intended to catch code that erroneously assumes newly
-allocated buffers are cleared to zero; in fact their contents are random. The
-{\tt calloc()} function, defined as returning a buffer cleared to zero,
-continues to zero its buffers under SMARTALLOC.
-
-Buffers obtained with the SMARTALLOC functions contain a special sentinel byte
-at the end of the user data area. This byte is set to a special key value
-based upon the buffer's memory address. When the buffer is released, the key
-is tested and if it has been overwritten an assertion in the {\tt free}
-function will fail. This catches incorrect program code that stores beyond the
-storage allocated for the buffer. At {\tt free()} time the queue links are
-also validated and an assertion failure will occur if the program has
-destroyed them by storing before the start of the allocated storage.
-
-In addition, when a buffer is released with {\tt free()}, its contents are
-immediately destroyed by overwriting them with the hexadecimal pattern 0xAA
-(alternating bits, the one's complement of the initial value pattern). This
-will usually trip up code that keeps a pointer to a buffer that's been freed
-and later attempts to reference data within the released buffer. Incredibly,
-this is {\it legal} in the standard Unix memory allocation package, which
-permits programs to free() buffers, then raise them from the grave with {\tt
-realloc()}. Such program ``logic'' should be fixed, not accommodated, and
-SMARTALLOC brooks no such Lazarus buffer`` nonsense.
-
-Some C libraries allow a zero size argument in calls to {\tt malloc()}. Since
-this is far more likely to indicate a program error than a defensible
-programming stratagem, SMARTALLOC disallows it with an assertion.
-
-When the standard library {\tt realloc()} function is called to expand a
-buffer, it attempts to expand the buffer in place if possible, moving it only
-if necessary. Because SMARTALLOC must place its own private storage in the
-buffer and also to aid in error detection, its version of {\tt realloc()}
-always moves and copies the buffer except in the trivial case where the size
-of the buffer is not being changed. By forcing the buffer to move on every
-call and destroying the contents of the old buffer when it is released,
-SMARTALLOC traps programs which keep pointers into a buffer across a call on
-{\tt realloc()} which may move it. This strategy may prove very costly to
-programs which make extensive use of {\tt realloc()}. If this proves to be a
-problem, such programs may wish to use {\tt actuallymalloc()}, {\tt
-actuallyrealloc()}, and {\tt actuallyfree()} for such frequently-adjusted
-buffers, trading error detection for performance. Although not specified in
-the System V Interface Definition, many C library implementations of {\tt
-realloc()} permit an old buffer argument of NULL, causing {\tt realloc()} to
-allocate a new buffer. The SMARTALLOC version permits this.
-
-\subsection{ When SMARTALLOC is Disabled}
-\index{When SMARTALLOC is Disabled }
-\index{Disabled!When SMARTALLOC is }
-\addcontentsline{toc}{subsection}{When SMARTALLOC is Disabled}
-
-When SMARTALLOC is disabled by compiling a program with the symbol SMARTALLOC
-not defined, calls on the functions otherwise redefined by SMARTALLOC go
-directly to the system functions. In addition, compile-time definitions
-translate calls on the ''{\tt actually}...{\tt ()}`` functions into the
-corresponding library calls; ''{\tt actuallymalloc(100)}``, for example,
-compiles into ''{\tt malloc(100)}``. The two special SMARTALLOC functions,
-{\tt sm\_dump()} and {\tt sm\_static()}, are defined to generate no code
-(hence the null statement). Finally, if SMARTALLOC is not defined, compilation
-of the file smartall.c generates no code or data at all, effectively removing
-it from the program even if named in the link instructions.
-
-Thus, except for unusual circumstances, a program that works with SMARTALLOC
-defined for testing should require no changes when built without it for
-production release.
-
-\subsection{ The {\tt alloc()} Function}
-\index{Function!alloc }
-\index{Alloc() Function }
-\addcontentsline{toc}{subsection}{alloc() Function}
-
-Many programs I've worked on use very few direct calls to {\tt malloc()},
-using the identically declared {\tt alloc()} function instead. Alloc detects
-out-of-memory conditions and aborts, removing the need for error checking on
-every call of {\tt malloc()} (and the temptation to skip checking for
-out-of-memory).
-
-As a convenience, SMARTALLOC supplies a compatible version of {\tt alloc()} in
-the file alloc.c, with its definition in the file alloc.h. This version of
-{\tt alloc()} is sensitive to the definition of SMARTALLOC and cooperates with
-SMARTALLOC's orphaned buffer detection. In addition, when SMARTALLOC is
-defined and {\tt alloc()} detects an out of memory condition, it takes
-advantage of the SMARTALLOC diagnostic information to identify the file and
-line number of the call on {\tt alloc()} that failed.
-
-\subsection{ Overlays and Underhandedness}
-\index{Underhandedness!Overlays and }
-\index{Overlays and Underhandedness }
-\addcontentsline{toc}{subsection}{Overlays and Underhandedness}
-
-String constants in the C language are considered to be static arrays of
-characters accessed through a pointer constant. The arrays are potentially
-writable even though their pointer is a constant. SMARTALLOC uses the
-compile-time definition {\tt ./smartall.wml} to obtain the name of the file in
-which a call on buffer allocation was performed. Rather than reserve space in
-a buffer to save this information, SMARTALLOC simply stores the pointer to the
-compiled-in text of the file name. This works fine as long as the program does
-not overlay its data among modules. If data are overlayed, the area of memory
-which contained the file name at the time it was saved in the buffer may
-contain something else entirely when {\tt sm\_dump()} gets around to using the
-pointer to edit the file name which allocated the buffer.
-
-If you want to use SMARTALLOC in a program with overlayed data, you'll have to
-modify smartall.c to either copy the file name to a fixed-length field added
-to the {\tt abufhead} structure, or else allocate storage with {\tt malloc()},
-copy the file name there, and set the {\tt abfname} pointer to that buffer,
-then remember to release the buffer in {\tt sm\_free}. Either of these
-approaches are wasteful of storage and time, and should be considered only if
-there is no alternative. Since most initial debugging is done in non-overlayed
-environments, the restrictions on SMARTALLOC with data overlaying may never
-prove a problem. Note that conventional overlaying of code, by far the most
-common form of overlaying, poses no problems for SMARTALLOC; you need only be
-concerned if you're using exotic tools for data overlaying on MS-DOS or other
-address-space-challenged systems.
-
-Since a C language ''constant`` string can actually be written into, most C
-compilers generate a unique copy of each string used in a module, even if the
-same constant string appears many times. In modules that contain many calls on
-allocation functions, this results in substantial wasted storage for the
-strings that identify the file name. If your compiler permits optimization of
-multiple occurrences of constant strings, enabling this mode will eliminate
-the overhead for these strings. Of course, it's up to you to make sure
-choosing this compiler mode won't wreak havoc on some other part of your
-program.
-
-\subsection{ Test and Demonstration Program}
-\index{Test and Demonstration Program }
-\index{Program!Test and Demonstration }
-\addcontentsline{toc}{subsection}{Test and Demonstration Program}
-
-A test and demonstration program, smtest.c, is supplied with SMARTALLOC. You
-can build this program with the Makefile included. Please refer to the
-comments in smtest.c and the Makefile for information on this program. If
-you're attempting to use SMARTALLOC on a new machine or with a new compiler or
-operating system, it's a wise first step to check it out with smtest first.
-
-\subsection{ Invitation to the Hack}
-\index{Hack!Invitation to the }
-\index{Invitation to the Hack }
-\addcontentsline{toc}{subsection}{Invitation to the Hack}
-
-SMARTALLOC is not intended to be a panacea for storage management problems,
-nor is it universally applicable or effective; it's another weapon in the
-arsenal of the defensive professional programmer attempting to create reliable
-products. It represents the current state of evolution of expedient debug code
-which has been used in several commercial software products which have,
-collectively, sold more than third of a million copies in the retail market,
-and can be expected to continue to develop through time as it is applied to
-ever more demanding projects.
-
-The version of SMARTALLOC here has been tested on a Sun SPARCStation, Silicon
-Graphics Indigo2, and on MS-DOS using both Borland and Microsoft C. Moving
-from compiler to compiler requires the usual small changes to resolve disputes
-about prototyping of functions, whether the type returned by buffer allocation
-is {\tt char\ *} or {\tt void\ *}, and so forth, but following those changes
-it works in a variety of environments. I hope you'll find SMARTALLOC as useful
-for your projects as I've found it in mine.
-
-\section{
-\elink{}{http://www.fourmilab.ch/smartall/smartall.zip}
-\elink{Download smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}
-(Zipped archive)}
-\index{Archive! Download smartall.zip Zipped }
-\index{ Download smartall.zip (Zipped archive) }
-\addcontentsline{toc}{section}{ Download smartall.zip (Zipped archive)}
-
-SMARTALLOC is provided as
-\elink{smartall.zip}{http://www.fourmilab.ch/smartall/smartall.zip}, a
-\elink{Zipped}{http://www.pkware.com/} archive containing source code,
-documentation, and a {\tt Makefile} to build the software under Unix.
-
-\subsection{ Copying}
-\index{Copying }
-\addcontentsline{toc}{subsection}{Copying}
-
-\begin{quote}
-SMARTALLOC is in the public domain. Permission to use, copy, modify, and
-distribute this software and its documentation for any purpose and without fee
-is hereby granted, without any conditions or restrictions. This software is
-provided ''as is`` without express or implied warranty.
-\end{quote}
-
-{\it
-\elink{by John Walker}{http://www.fourmilab.ch}
-October 30th, 1998 }
+++ /dev/null
-%%
-%%
-
-\chapter{Storage Daemon Design}
-\label{_ChapterStart3}
-\index{Storage Daemon Design }
-\index{Design!Storage Daemon }
-\addcontentsline{toc}{section}{Storage Daemon Design}
-
-This chapter is intended to be a technical discussion of the Storage daemon
-services and as such is not targeted at end users but rather at developers and
-system administrators that want or need to know more of the working details of
-{\bf Bacula}.
-
-This document is somewhat out of date.
-
-\section{SD Design Introduction}
-\index{Introduction!SD Design }
-\index{SD Design Introduction }
-\addcontentsline{toc}{section}{SD Design Introduction}
-
-The Bacula Storage daemon provides storage resources to a Bacula installation.
-An individual Storage daemon is associated with a physical permanent storage
-device (for example, a tape drive, CD writer, tape changer or jukebox, etc.),
-and may employ auxiliary storage resources (such as space on a hard disk file
-system) to increase performance and/or optimize use of the permanent storage
-medium.
-
-Any number of storage daemons may be run on a given machine; each associated
-with an individual storage device connected to it, and BACULA operations may
-employ storage daemons on any number of hosts connected by a network, local or
-remote. The ability to employ remote storage daemons (with appropriate
-security measures) permits automatic off-site backup, possibly to publicly
-available backup repositories.
-
-\section{SD Development Outline}
-\index{Outline!SD Development }
-\index{SD Development Outline }
-\addcontentsline{toc}{section}{SD Development Outline}
-
-In order to provide a high performance backup and restore solution that scales
-to very large capacity devices and networks, the storage daemon must be able
-to extract as much performance from the storage device and network with which
-it interacts. In order to accomplish this, storage daemons will eventually
-have to sacrifice simplicity and painless portability in favor of techniques
-which improve performance. My goal in designing the storage daemon protocol
-and developing the initial prototype storage daemon is to provide for these
-additions in the future, while implementing an initial storage daemon which is
-very simple and portable to almost any POSIX-like environment. This original
-storage daemon (and its evolved descendants) can serve as a portable solution
-for non-demanding backup requirements (such as single servers of modest size,
-individual machines, or small local networks), while serving as the starting
-point for development of higher performance configurable derivatives which use
-techniques such as POSIX threads, shared memory, asynchronous I/O, buffering
-to high-speed intermediate media, and support for tape changers and jukeboxes.
-
-
-\section{SD Connections and Sessions}
-\index{Sessions!SD Connections and }
-\index{SD Connections and Sessions }
-\addcontentsline{toc}{section}{SD Connections and Sessions}
-
-A client connects to a storage server by initiating a conventional TCP
-connection. The storage server accepts the connection unless its maximum
-number of connections has been reached or the specified host is not granted
-access to the storage server. Once a connection has been opened, the client
-may make any number of Query requests, and/or initiate (if permitted), one or
-more Append sessions (which transmit data to be stored by the storage daemon)
-and/or Read sessions (which retrieve data from the storage daemon).
-
-Most requests and replies sent across the connection are simple ASCII strings,
-with status replies prefixed by a four digit status code for easier parsing.
-Binary data appear in blocks stored and retrieved from the storage. Any
-request may result in a single-line status reply of ``{\tt 3201\ Notification\
-pending}'', which indicates the client must send a ``Query notification''
-request to retrieve one or more notifications posted to it. Once the
-notifications have been returned, the client may then resubmit the request
-which resulted in the 3201 status.
-
-The following descriptions omit common error codes, yet to be defined, which
-can occur from most or many requests due to events like media errors,
-restarting of the storage daemon, etc. These details will be filled in, along
-with a comprehensive list of status codes along with which requests can
-produce them in an update to this document.
-
-\subsection{SD Append Requests}
-\index{Requests!SD Append }
-\index{SD Append Requests }
-\addcontentsline{toc}{subsection}{SD Append Requests}
-
-\begin{description}
-
-\item [{append open session = \lt{}JobId\gt{} [ \lt{}Password\gt{} ] }]
- \index{SPAN class }
- A data append session is opened with the Job ID given by {\it JobId} with
-client password (if required) given by {\it Password}. If the session is
-successfully opened, a status of {\tt 3000\ OK} is returned with a ``{\tt
-ticket\ =\ }{\it number}'' reply used to identify subsequent messages in the
-session. If too many sessions are open, or a conflicting session (for
-example, a read in progress when simultaneous read and append sessions are
-not permitted), a status of ``{\tt 3502\ Volume\ busy}'' is returned. If no
-volume is mounted, or the volume mounted cannot be appended to, a status of
-``{\tt 3503\ Volume\ not\ mounted}'' is returned.
-
-\item [append data = \lt{}ticket-number\gt{} ]
- \index{SPAN class }
- If the append data is accepted, a status of {\tt 3000\ OK data address =
-\lt{}IPaddress\gt{} port = \lt{}port\gt{}} is returned, where the {\tt
-IPaddress} and {\tt port} specify the IP address and port number of the data
-channel. Error status codes are {\tt 3504\ Invalid\ ticket\ number} and {\tt
-3505\ Session\ aborted}, the latter of which indicates the entire append
-session has failed due to a daemon or media error.
-
-Once the File daemon has established the connection to the data channel
-opened by the Storage daemon, it will transfer a header packet followed by
-any number of data packets. The header packet is of the form:
-
-{\tt \lt{}file-index\gt{} \lt{}stream-id\gt{} \lt{}info\gt{}}
-
-The details are specified in the
-\ilink{Daemon Protocol}{_ChapterStart2} section of this
-document.
-
-\item [*append abort session = \lt{}ticket-number\gt{} ]
- \index{SPAN class }
- The open append session with ticket {\it ticket-number} is aborted; any blocks
-not yet written to permanent media are discarded. Subsequent attempts to
-append data to the session will receive an error status of {\tt 3505\
-Session\ aborted}.
-
-\item [append end session = \lt{}ticket-number\gt{} ]
- \index{SPAN class }
- The open append session with ticket {\it ticket-number} is marked complete; no
-further blocks may be appended. The storage daemon will give priority to
-saving any buffered blocks from this session to permanent media as soon as
-possible.
-
-\item [append close session = \lt{}ticket-number\gt{} ]
- \index{SPAN class }
- The append session with ticket {\it ticket} is closed. This message does not
-receive an {\tt 3000\ OK} reply until all of the content of the session are
-stored on permanent media, at which time said reply is given, followed by a
-list of volumes, from first to last, which contain blocks from the session,
-along with the first and last file and block on each containing session data
-and the volume session key identifying data from that session in lines with
-the following format:
-
-{\tt {\tt Volume = }\lt{}Volume-id\gt{} \lt{}start-file\gt{}
-\lt{}start-block\gt{} \lt{}end-file\gt{} \lt{}end-block\gt{}
-\lt{}volume-session-id\gt{}}where {\it Volume-id} is the volume label, {\it
-start-file} and {\it start-block} are the file and block containing the first
-data from that session on the volume, {\it end-file} and {\it end-block} are
-the file and block with the last data from the session on the volume and {\it
-volume-session-id} is the volume session ID for blocks from the session
-stored on that volume.
-\end{description}
-
-\subsection{SD Read Requests}
-\index{SD Read Requests }
-\index{Requests!SD Read }
-\addcontentsline{toc}{subsection}{SD Read Requests}
-
-\begin{description}
-
-\item [Read open session = \lt{}JobId\gt{} \lt{}Volume-id\gt{}
- \lt{}start-file\gt{} \lt{}start-block\gt{} \lt{}end-file\gt{}
- \lt{}end-block\gt{} \lt{}volume-session-id\gt{} \lt{}password\gt{} ]
-\index{SPAN class }
-where {\it Volume-id} is the volume label, {\it start-file} and {\it
-start-block} are the file and block containing the first data from that
-session on the volume, {\it end-file} and {\it end-block} are the file and
-block with the last data from the session on the volume and {\it
-volume-session-id} is the volume session ID for blocks from the session
-stored on that volume.
-
-If the session is successfully opened, a status of
-
-{\tt {\tt 3100\ OK Ticket\ =\ }{\it number}``}
-
-is returned with a reply used to identify subsequent messages in the session.
-If too many sessions are open, or a conflicting session (for example, an
-append in progress when simultaneous read and append sessions are not
-permitted), a status of ''{\tt 3502\ Volume\ busy}`` is returned. If no
-volume is mounted, or the volume mounted cannot be appended to, a status of
-''{\tt 3503\ Volume\ not\ mounted}`` is returned. If no block with the given
-volume session ID and the correct client ID number appears in the given first
-file and block for the volume, a status of ''{\tt 3505\ Session\ not\
-found}`` is returned.
-
-\item [Read data = \lt{}Ticket\gt{} \gt{} \lt{}Block\gt{} ]
- \index{SPAN class }
- The specified Block of data from open read session with the specified Ticket
-number is returned, with a status of {\tt 3000\ OK} followed by a ''{\tt
-Length\ =\ }{\it size}`` line giving the length in bytes of the block data
-which immediately follows. Blocks must be retrieved in ascending order, but
-blocks may be skipped. If a block number greater than the largest stored on
-the volume is requested, a status of ''{\tt 3201\ End\ of\ volume}`` is
-returned. If a block number greater than the largest in the file is
-requested, a status of ''{\tt 3401\ End\ of\ file}`` is returned.
-
-\item [Read close session = \lt{}Ticket\gt{} ]
- \index{SPAN class }
- The read session with Ticket number is closed. A read session may be closed
-at any time; you needn't read all its blocks before closing it.
-\end{description}
-
-{\it by
-\elink{John Walker}{http://www.fourmilab.ch/}
-January 30th, MM }
-
-\section{SD Data Structures}
-\index{SD Data Structures}
-\addcontentsline{toc}{section}{SD Data Structures}
-
-In the Storage daemon, there is a Device resource (i.e. from conf file)
-that describes each physical device. When the physical device is used it
-is controled by the DEVICE structure (defined in dev.h), and typically
-refered to as dev in the C++ code. Anyone writing or reading a physical
-device must ultimately get a lock on the DEVICE structure -- this controls
-the device. However, multiple Jobs (defined by a JCR structure src/jcr.h)
-can be writing a physical DEVICE at the same time (of course they are
-sequenced by locking the DEVICE structure). There are a lot of job
-dependent "device" variables that may be different for each Job such as
-spooling (one job may spool and another may not, and when a job is
-spooling, it must have an i/o packet open, each job has its own record and
-block structures, ...), so there is a device control record or DCR that is
-the primary way of interfacing to the physical device. The DCR contains
-all the job specific data as well as a pointer to the Device resource
-(DEVRES structure) and the physical DEVICE structure.
-
-Now if a job is writing to two devices (it could be writing two separate
-streams to the same device), it must have two DCRs. Today, the code only
-permits one. This won't be hard to change, but it is new code.
-
-Today three jobs (threads), two physical devices each job
- writes to only one device:
-
-\begin{verbatim}
- Job1 -> DCR1 -> DEVICE1
- Job2 -> DCR2 -> DEVICE1
- Job3 -> DCR3 -> DEVICE2
-\end{verbatim}
-
-To be implemented three jobs, three physical devices, but
- job1 is writing simultaneously to three devices:
-
-\begin{verbatim}
- Job1 -> DCR1 -> DEVICE1
- -> DCR4 -> DEVICE2
- -> DCR5 -> DEVICE3
- Job2 -> DCR2 -> DEVICE1
- Job3 -> DCR3 -> DEVICE2
-
- Job = job control record
- DCR = Job contorl data for a specific device
- DEVICE = Device only control data
-\end{verbatim}
-
+++ /dev/null
-%%
-%%
-
-%\author{Landon Fuller}
-%\title{Bacula TLS Additions}
-
-\chapter{TLS}
-\label{_Chapter_TLS}
-\index{TLS}
-
-Written by Landon Fuller
-
-\section{Introduction to TLS}
-\index{TLS Introduction}
-\index{Introduction!TLS}
-\addcontentsline{toc}{section}{TLS Introduction}
-
-This patch includes all the back-end code necessary to add complete TLS
-data encryption support to Bacula. In addition, support for TLS in
-Console/Director communications has been added as a proof of concept.
-Adding support for the remaining daemons will be straight-forward.
-Supported features of this patchset include:
-
-\begin{itemize}
-\item Client/Server TLS Requirement Negotiation
-\item TLSv1 Connections with Server and Client Certificate
-Validation
-\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying
-\end{itemize}
-
-This document will refer to both ``server'' and ``client'' contexts. These
-terms refer to the accepting and initiating peer, respectively.
-
-Diffie-Hellman anonymous ciphers are not supported by this patchset. The
-use of DH anonymous ciphers increases the code complexity and places
-explicit trust upon the two-way Cram-MD5 implementation. Cram-MD5 is
-subject to known plaintext attacks, and is should be considered
-considerably less secure than PKI certificate-based authentication.
-
-Appropriate autoconf macros have been added to detect and use OpenSSL. Two
-additional preprocessor defines have been added: \emph{HAVE\_TLS} and
-\emph{HAVE\_OPENSSL}. All changes not specific to OpenSSL rely on
-\emph{HAVE\_TLS}. OpenSSL-specific code is constrained to
-\emph{src/lib/tls.c} to facilitate the support of alternative TLS
-implementations.
-
-\section{New Configuration Directives}
-\index{TLS Configuration Directives}
-\index{Directives!TLS Configuration}
-\addcontentsline{toc}{section}{New Configuration Directives}
-
-Additional configuration directives have been added to both the Console and
-Director resources. These new directives are defined as follows:
-
-\begin{itemize}
-\item \underline{TLS Enable} \emph{(yes/no)}
-Enable TLS support.
-
-\item \underline{TLS Require} \emph{(yes/no)}
-Require TLS connections.
-
-\item \underline{TLS Certificate} \emph{(path)}
-Path to PEM encoded TLS certificate. Used as either a client or server
-certificate.
-
-\item \underline{TLS Key} \emph{(path)}
-Path to PEM encoded TLS private key. Must correspond with the TLS
-certificate.
-
-\item \underline{TLS Verify Peer} \emph{(yes/no)}
-Verify peer certificate. Instructs server to request and verify the
-client's x509 certificate. Any client certificate signed by a known-CA
-will be accepted unless the TLS Allowed CN configuration directive is used.
-Not valid in a client context.
-
-\item \underline{TLS Allowed CN} \emph{(string list)}
-Common name attribute of allowed peer certificates. If directive is
-specified, all client certificates will be verified against this list.
-This directive may be specified more than once. Not valid in a client
-context.
-
-\item \underline{TLS CA Certificate File} \emph{(path)}
-Path to PEM encoded TLS CA certificate(s). Multiple certificates are
-permitted in the file. One of \emph{TLS CA Certificate File} or \emph{TLS
-CA Certificate Dir} are required in a server context if \underline{TLS
-Verify Peer} is also specified, and are always required in a client
-context.
-
-\item \underline{TLS CA Certificate Dir} \emph{(path)}
-Path to TLS CA certificate directory. In the current implementation,
-certificates must be stored PEM encoded with OpenSSL-compatible hashes.
-One of \emph{TLS CA Certificate File} or \emph{TLS CA Certificate Dir} are
-required in a server context if \emph{TLS Verify Peer} is also specified,
-and are always required in a client context.
-
-\item \underline{TLS DH File} \emph{(path)}
-Path to PEM encoded Diffie-Hellman parameter file. If this directive is
-specified, DH ephemeral keying will be enabled, allowing for forward
-secrecy of communications. This directive is only valid within a server
-context. To generate the parameter file, you may use openssl:
-\footnotesize
-\begin{verbatim}
-openssl dhparam -out dh1024.pem -5 1024
-\end{verbatim}
-\normalsize
-\end{itemize}
-
-\section{TLS API Implementation}
-\index{TLS API Implimentation}
-\index{API Implimentation!TLS}
-\addcontentsline{toc}{section}{TLS API Implementation}
-
-To facilitate the use of additional TLS libraries, all OpenSSL-specific
-code has been implemented within \emph{src/lib/tls.c}. In turn, a generic
-TLS API is exported.
-
-\subsection{Library Initialization and Cleanup}
-\index{Library Initialization and Cleanup}
-\index{Initialization and Cleanup!Library}
-\addcontentsline{toc}{subsection}{Library Initialization and Cleanup}
-
-\footnotesize
-\begin{verbatim}
-int init_tls (void);
-\end{verbatim}
-\normalsize
-
-Performs TLS library initialization, including seeding of the PRNG. PRNG
-seeding has not yet been implemented for win32.
-
-\footnotesize
-\begin{verbatim}
-int cleanup_tls (void);
-\end{verbatim}
-\normalsize
-
-Performs TLS library cleanup.
-
-\subsection{Manipulating TLS Contexts}
-\index{TLS Context Manipulation}
-\index{Contexts!Manipulating TLS}
-\addcontentsline{toc}{subsection}{Manipulating TLS Contexts}
-
-\footnotesize
-\begin{verbatim}
-TLS_CONTEXT *new_tls_context (const char *ca_certfile,
- const char *ca_certdir, const char *certfile,
- const char *keyfile, const char *dhfile, bool verify_peer);
-\end{verbatim}
-\normalsize
-
-Allocates and initalizes a new opaque \emph{TLS\_CONTEXT} structure. The
-\emph{TLS\_CONTEXT} structure maintains default TLS settings from which
-\emph{TLS\_CONNECTION} structures are instantiated. In the future the
-\emph{TLS\_CONTEXT} structure may be used to maintain the TLS session
-cache. \emph{ca\_certfile} and \emph{ca\_certdir} arguments are used to
-initialize the CA verification stores. The \emph{certfile} and
-\emph{keyfile} arguments are used to initialize the local certificate and
-private key. If \emph{dhfile} is non-NULL, it is used to initialize
-Diffie-Hellman ephemeral keying. If \emph{verify\_peer} is \emph{true} ,
-client certificate validation is enabled.
-
-\footnotesize
-\begin{verbatim}
-void free_tls_context (TLS_CONTEXT *ctx);
-\end{verbatim}
-\normalsize
-
-Deallocated a previously allocated \emph{TLS\_CONTEXT} structure.
-
-\subsection{Performing Post-Connection Verification}
-\index{TLS Post-Connection Verification}
-\index{Verification!TLS Post-Connection}
-\addcontentsline{toc}{subsection}{Performing Post-Connection Verification}
-
-\footnotesize
-\begin{verbatim}
-bool tls_postconnect_verify_host (TLS_CONNECTION *tls, const char *host);
-\end{verbatim}
-\normalsize
-
-Performs post-connection verification of the peer-supplied x509
-certificate. Checks whether the \emph{subjectAltName} and
-\emph{commonName} attributes match the supplied \emph{host} string.
-Returns \emph{true} if there is a match, \emph{false} otherwise.
-
-\footnotesize
-\begin{verbatim}
-bool tls_postconnect_verify_cn (TLS_CONNECTION *tls, alist *verify_list);
-\end{verbatim}
-\normalsize
-
-Performs post-connection verification of the peer-supplied x509
-certificate. Checks whether the \emph{commonName} attribute matches any
-strings supplied via the \emph{verify\_list} parameter. Returns
-\emph{true} if there is a match, \emph{false} otherwise.
-
-\subsection{Manipulating TLS Connections}
-\index{TLS Connection Manipulation}
-\index{Connections!Manipulating TLS}
-\addcontentsline{toc}{subsection}{Manipulating TLS Connections}
-
-\footnotesize
-\begin{verbatim}
-TLS_CONNECTION *new_tls_connection (TLS_CONTEXT *ctx, int fd);
-\end{verbatim}
-\normalsize
-
-Allocates and initializes a new \emph{TLS\_CONNECTION} structure with
-context \emph{ctx} and file descriptor \emph{fd}.
-
-\footnotesize
-\begin{verbatim}
-void free_tls_connection (TLS_CONNECTION *tls);
-\end{verbatim}
-\normalsize
-
-Deallocates memory associated with the \emph{tls} structure.
-
-\footnotesize
-\begin{verbatim}
-bool tls_bsock_connect (BSOCK *bsock);
-\end{verbatim}
-\normalsize
-
-Negotiates a a TLS client connection via \emph{bsock}. Returns \emph{true}
-if successful, \emph{false} otherwise. Will fail if there is a TLS
-protocol error or an invalid certificate is presented
-
-\footnotesize
-\begin{verbatim}
-bool tls_bsock_accept (BSOCK *bsock);
-\end{verbatim}
-\normalsize
-
-Accepts a TLS client connection via \emph{bsock}. Returns \emph{true} if
-successful, \emph{false} otherwise. Will fail if there is a TLS protocol
-error or an invalid certificate is presented.
-
-\footnotesize
-\begin{verbatim}
-bool tls_bsock_shutdown (BSOCK *bsock);
-\end{verbatim}
-\normalsize
-
-Issues a blocking TLS shutdown request to the peer via \emph{bsock}. This function may not wait for the peer's reply.
-
-\footnotesize
-\begin{verbatim}
-int tls_bsock_writen (BSOCK *bsock, char *ptr, int32_t nbytes);
-\end{verbatim}
-\normalsize
-
-Writes \emph{nbytes} from \emph{ptr} via the \emph{TLS\_CONNECTION}
-associated with \emph{bsock}. Due to OpenSSL's handling of \emph{EINTR},
-\emph{bsock} is set non-blocking at the start of the function, and restored
-to its original blocking state before the function returns. Less than
-\emph{nbytes} may be written if an error occurs. The actual number of
-bytes written will be returned.
-
-\footnotesize
-\begin{verbatim}
-int tls_bsock_readn (BSOCK *bsock, char *ptr, int32_t nbytes);
-\end{verbatim}
-\normalsize
-
-Reads \emph{nbytes} from the \emph{TLS\_CONNECTION} associated with
-\emph{bsock} and stores the result in \emph{ptr}. Due to OpenSSL's
-handling of \emph{EINTR}, \emph{bsock} is set non-blocking at the start of
-the function, and restored to its original blocking state before the
-function returns. Less than \emph{nbytes} may be read if an error occurs.
-The actual number of bytes read will be returned.
-
-\section{Bnet API Changes}
-\index{Bnet API Changes}
-\index{API Changes!Bnet}
-\addcontentsline{toc}{section}{Bnet API Changes}
-
-A minimal number of changes were required in the Bnet socket API. The BSOCK
-structure was expanded to include an associated TLS\_CONNECTION structure,
-as well as a flag to designate the current blocking state of the socket.
-The blocking state flag is required for win32, where it does not appear
-possible to discern the current blocking state of a socket.
-
-\subsection{Negotiating a TLS Connection}
-\index{Negotiating a TLS Connection}
-\index{TLS Connection!Negotiating}
-\addcontentsline{toc}{subsection}{Negotiating a TLS Connection}
-
-\emph{bnet\_tls\_server()} and \emph{bnet\_tls\_client()} were both
-implemented using the new TLS API as follows:
-
-\footnotesize
-\begin{verbatim}
-int bnet_tls_client(TLS_CONTEXT *ctx, BSOCK * bsock);
-\end{verbatim}
-\normalsize
-
-Negotiates a TLS session via \emph{bsock} using the settings from
-\emph{ctx}. Returns 1 if successful, 0 otherwise.
-
-\footnotesize
-\begin{verbatim}
-int bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list);
-\end{verbatim}
-\normalsize
-
-Accepts a TLS client session via \emph{bsock} using the settings from
-\emph{ctx}. If \emph{verify\_list} is non-NULL, it is passed to
-\emph{tls\_postconnect\_verify\_cn()} for client certificate verification.
-
-\subsection{Manipulating Socket Blocking State}
-\index{Manipulating Socket Blocking State}
-\index{Socket Blocking State!Manipulating}
-\index{Blocking State!Socket!Manipulating}
-\addcontentsline{toc}{subsection}{Manipulating Socket Blocking State}
-
-Three functions were added for manipulating the blocking state of a socket
-on both Win32 and Unix-like systems. The Win32 code was written according
-to the MSDN documentation, but has not been tested.
-
-These functions are prototyped as follows:
-
-\footnotesize
-\begin{verbatim}
-int bnet_set_nonblocking (BSOCK *bsock);
-\end{verbatim}
-\normalsize
-
-Enables non-blocking I/O on the socket associated with \emph{bsock}.
-Returns a copy of the socket flags prior to modification.
-
-\footnotesize
-\begin{verbatim}
-int bnet_set_blocking (BSOCK *bsock);
-\end{verbatim}
-\normalsize
-
-Enables blocking I/O on the socket associated with \emph{bsock}. Returns a
-copy of the socket flags prior to modification.
-
-\footnotesize
-\begin{verbatim}
-void bnet_restore_blocking (BSOCK *bsock, int flags);
-\end{verbatim}
-\normalsize
-
-Restores blocking or non-blocking IO setting on the socket associated with
-\emph{bsock}. The \emph{flags} argument must be the return value of either
-\emph{bnet\_set\_blocking()} or \emph{bnet\_restore\_blocking()}.
-
-\pagebreak
-
-\section{Authentication Negotiation}
-\index{Authentication Negotiation}
-\index{Negotiation!TLS Authentication}
-\addcontentsline{toc}{section}{Authentication Negotiation}
-
-Backwards compatibility with the existing SSL negotiation hooks implemented
-in src/lib/cram-md5.c have been maintained. The
-\emph{cram\_md5\_get\_auth()} function has been modified to accept an
-integer pointer argument, tls\_remote\_need. The TLS requirement
-advertised by the remote host is returned via this pointer.
-
-After exchanging cram-md5 authentication and TLS requirements, both the
-client and server independently decide whether to continue:
-
-\footnotesize
-\begin{verbatim}
-if (!cram_md5_get_auth(dir, password, &tls_remote_need) ||
- !cram_md5_auth(dir, password, tls_local_need)) {
-[snip]
-/* Verify that the remote host is willing to meet our TLS requirements */
-if (tls_remote_need < tls_local_need && tls_local_need != BNET_TLS_OK &&
- tls_remote_need != BNET_TLS_OK) {
- sendit(_("Authorization problem:"
- " Remote server did not advertise required TLS support.\n"));
- auth_success = false;
- goto auth_done;
-}
-
-/* Verify that we are willing to meet the remote host's requirements */
-if (tls_remote_need > tls_local_need && tls_local_need != BNET_TLS_OK &&
- tls_remote_need != BNET_TLS_OK) {
- sendit(_("Authorization problem:"
- " Remote server requires TLS.\n"));
- auth_success = false;
- goto auth_done;
-}
-\end{verbatim}
-\normalsize
+++ /dev/null
-#!/usr/bin/perl -w
-#
-use strict;
-
-# Used to change the names of the image files generated by latex2html from imgxx.png
-# to meaningful names. Provision is made to go either from or to the meaningful names.
-# The meaningful names are obtained from a file called imagename_translations, which
-# is generated by extensions to latex2html in the make_image_file subroutine in
-# bacula.perl.
-
-# Opens the file imagename_translations and reads the contents into a hash.
-# The hash is creaed with the imgxx.png files as the key if processing TO
-# meaningful filenames, and with the meaningful filenames as the key if
-# processing FROM meaningful filenames.
-# Then opens the html file(s) indicated in the command-line arguments and
-# changes all image references according to the translations described in the
-# above file. Finally, it renames the image files.
-#
-# Original creation: 3-27-05 by Karl Cunningham.
-# Modified 5-21-05 to go FROM and TO meaningful filenames.
-#
-my $TRANSFILE = "imagename_translations";
-my $path;
-
-# Loads the contents of $TRANSFILE file into the hash referenced in the first
-# argument. The hash is loaded to translate old to new if $direction is 0,
-# otherwise it is loaded to translate new to old. In this context, the
-# 'old' filename is the meaningful name, and the 'new' filename is the
-# imgxx.png filename. It is assumed that the old image is the one that
-# latex2html has used as the source to create the imgxx.png filename.
-# The filename extension is taken from the file
-sub read_transfile {
- my ($trans,$direction) = @_;
-
- if (!open IN,"<$path$TRANSFILE") {
- print "WARNING: Cannot open image translation file $path$TRANSFILE for reading\n";
- print " Image filename translation aborted\n\n";
- exit 0;
- }
-
- while (<IN>) {
- chomp;
- my ($new,$old) = split(/\001/);
-
- # Old filenames will usually have a leading ./ which we don't need.
- $old =~ s/^\.\///;
-
- # The filename extension of the old filename must be made to match
- # the new filename because it indicates the encoding format of the image.
- my ($ext) = $new =~ /(\.[^\.]*)$/;
- $old =~ s/\.[^\.]*$/$ext/;
- if ($direction == 0) {
- $trans->{$new} = $old;
- } else {
- $trans->{$old} = $new;
- }
- }
- close IN;
-}
-
-# Translates the image names in the file given as the first argument, according to
-# the translations in the hash that is given as the second argument.
-# The file contents are read in entirely into a string, the string is processed, and
-# the file contents are then written. No particular care is taken to ensure that the
-# file is not lost if a system failure occurs at an inopportune time. It is assumed
-# that the html files being processed here can be recreated on demand.
-#
-# Links to other files are added to the %filelist for processing. That way,
-# all linked files will be processed (assuming they are local).
-sub translate_html {
- my ($filename,$trans,$filelist) = @_;
- my ($contents,$out,$this,$img,$dest);
- my $cnt = 0;
-
- # If the filename is an external link ignore it. And drop any file:// from
- # the filename.
- $filename =~ /^(http|ftp|mailto)\:/ and return 0;
- $filename =~ s/^file\:\/\///;
- # Load the contents of the html file.
- if (!open IF,"<$path$filename") {
- print "WARNING: Cannot open $path$filename for reading\n";
- print " Image Filename Translation aborted\n\n";
- exit 0;
- }
-
- while (<IF>) {
- $contents .= $_;
- }
- close IF;
-
- # Now do the translation...
- # First, search for an image filename.
- while ($contents =~ /\<\s*IMG[^\>]*SRC=\"/si) {
- $contents = $';
- $out .= $` . $&;
-
- # The next thing is an image name. Get it and translate it.
- $contents =~ /^(.*?)\"/s;
- $contents = $';
- $this = $&;
- $img = $1;
- # If the image is in our list of ones to be translated, do it
- # and feed the result to the output.
- $cnt += $this =~ s/$img/$trans->{$img}/ if (defined($trans->{$img}));
- $out .= $this;
- }
- $out .= $contents;
-
- # Now send the translated text to the html file, overwriting what's there.
- open OF,">$path$filename" or die "Cannot open $path$filename for writing\n";
- print OF $out;
- close OF;
-
- # Now look for any links to other files and add them to the list of files to do.
- while ($out =~ /\<\s*A[^\>]*HREF=\"(.*?)\"/si) {
- $out = $';
- $dest = $1;
- # Drop an # and anything after it.
- $dest =~ s/\#.*//;
- $filelist->{$dest} = '' if $dest;
- }
- return $cnt;
-}
-
-# REnames the image files spefified in the %translate hash.
-sub rename_images {
- my $translate = shift;
- my ($response);
-
- foreach (keys(%$translate)) {
- if (! $translate->{$_}) {
- print " WARNING: No destination Filename for $_\n";
- } else {
- $response = `mv -f $path$_ $path$translate->{$_} 2>&1`;
- $response and print "ERROR from system $response\n";
- }
- }
-}
-
-#################################################
-############# MAIN #############################
-################################################
-
-# %filelist starts out with keys from the @ARGV list. As files are processed,
-# any links to other files are added to the %filelist. A hash of processed
-# files is kept so we don't do any twice.
-
-# The first argument must be either --to_meaningful_names or --from_meaningful_names
-
-my (%translate,$search_regex,%filelist,%completed,$thisfile);
-my ($cnt,$direction);
-
-my $arg0 = shift(@ARGV);
-$arg0 =~ /^(--to_meaningful_names|--from_meaningful_names)$/ or
- die "ERROR: First argument must be either \'--to_meaningful_names\' or \'--from_meaningful_names\'\n";
-
-$direction = ($arg0 eq '--to_meaningful_names') ? 0 : 1;
-
-(@ARGV) or die "ERROR: Filename(s) to process must be given as arguments\n";
-
-# Use the first argument to get the path to the file of translations.
-my $tmp = $ARGV[0];
-($path) = $tmp =~ /(.*\/)/;
-$path = '' unless $path;
-
-read_transfile(\%translate,$direction);
-
-foreach (@ARGV) {
- # Strip the path from the filename, and use it later on.
- if (s/(.*\/)//) {
- $path = $1;
- } else {
- $path = '';
- }
- $filelist{$_} = '';
-
- while ($thisfile = (keys(%filelist))[0]) {
- $cnt += translate_html($thisfile,\%translate,\%filelist) if (!exists($completed{$thisfile}));
- delete($filelist{$thisfile});
- $completed{$thisfile} = '';
- }
- print "translate_images.pl: $cnt image filenames translated ",($direction)?"from":"to"," meaningful names\n";
-}
-
-rename_images(\%translate);
+++ /dev/null
-2.2.6 (10 November 2007)
+++ /dev/null
-@VERSION@ (@DATE@)
# Script file to update the Bacula version
#
out=/tmp/$$
-VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/Branch-2.2/bacula/src/version.h`
-DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/Branch-2.2/bacula/src/version.h`
+VERSION=`sed -n -e 's/^.*VERSION.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h`
+DATE=`sed -n -e 's/^.*[ \t]*BDATE.*"\(.*\)"$/\1/p' /home/kern/bacula/k/src/version.h`
. ./do_echo
sed -f ${out} version.tex.in >version.tex
rm -f ${out}
-2.2.6 (10 November 2007)
+2.3.6 (04 November 2007)