#
# make
#
+# for rapid development do:
+# make tex
+# make show
+#
+#
+# If you are having problems getting "make" to work, debugging it is
+# easier if can see the output from latex, which is normally redirected
+# to /dev/null. To see it, do the following:
+#
+# cd docs/manual
+# make tex
+# latex bacula.tex
+#
+# typically the latex command will stop indicating the error (e.g. a
+# missing \ in front of a _ or a missing { or ] ...
+#
+# The following characters must be preceded by a backslash
+# to be entered as printable characters:
+#
+# # $ % & ~ _ ^ \ { }
+#
IMAGES=../../../images
+MANUALSDIR=../..
DOC=developers
+BSYSMANUALDIR=../../../bsysmanual
+COVERSDIR=../../../covers
+PDFCOVERSDIR=$(COVERSDIR)/pdf
+SVGCOVERSDIR=$(COVERSDIR)/svg
+EPSCOVERSDIR=$(COVERSDIR)/eps
+LICENSESDIR=$(MANUALSDIR)/licences
+COVERNAME=coverpage-developers
+BSYSMANNAME=bsysmanual-coverpagebackground
+LICENCES=$(wildcard $(LICENSESDIR)/*.tex)
+BSYSCOMPILERFILE=bsys-compiler-mode.tex
+PDFCOMPILERFILE=$(MANUALSDIR)/bsys-pdflatex-mode.tex
+TEXCOMPILERFILE=$(MANUALSDIR)/bsys-latex-mode.tex
+WEBCOMPILERFILE=$(MANUALSDIR)/bsys-web-mode.tex
first_rule: all
.DONTCARE:
-tex:
+pdfcovers:
+ @echo -n "Linking coverpage and background PDF format..."
+ @(cd $(SVGCOVERSDIR) ; make pdf)
+ @ln -sf `pwd`/${PDFCOVERSDIR}/${COVERNAME}.pdf `pwd`/${BSYSMANUALDIR}/${BSYSMANNAME}.pdf
+ @echo "Done."
+
+pdfimages:
+ @echo "Generating PDF images..."
+ @(cd ${IMAGES}/svg ; make pdf)
+ @echo "Done."
+
+pngimages:
+ @echo "Generating PNG images..."
+ @(cd ${IMAGES}/svg ; make png)
+ @echo "Done."
+
+epsimages:
+ @echo "Generating EPS images..."
+ @(cd ${IMAGES}/svg ; make eps)
+ @rm -rf ${IMAGES}/png
+ @rm -rf ${IMAGES}/pdf
+ @echo "Done."
+
+epscovers:
+ @echo -n "Linking coverpage and background EPS format..."
+ @(cd $(SVGCOVERSDIR) ; make eps)
+ @ln -sf `pwd`/${EPSCOVERSDIR}/${COVERNAME}.eps `pwd`/${BSYSMANUALDIR}/${BSYSMANNAME}.eps
+ @rm -f `pwd`/${BSYSMANUALDIR}/${BSYSMANNAME}.pdf
+ @echo "Done."
+
+commonfiles:
+ @echo -n "Linking shared files..."
+ @(for L in $(LICENCES); do ln -sf $$L .; done)
+ @echo "Done"
+
+tex: epscovers epsimages commonfiles
@../../update_version
- @cp -fp ${IMAGES}/hires/*.eps .
+ @ln -sf $(TEXCOMPILERFILE) $(BSYSCOMPILERFILE)
+# @cp -fp ${IMAGES}/hires/*.eps .
touch ${DOC}.idx ${DOC}i-general.tex
-latex -interaction=batchmode ${DOC}.tex
makeindex ${DOC}.idx >/dev/null 2>/dev/null
-latex -interaction=batchmode ${DOC}.tex
-pdf:
- @echo "Making ${DOC} pdf"
- @cp -fp ${IMAGES}/hires/*.eps .
- dvipdf ${DOC}.dvi ${DOC}.pdf
- @rm -f *.eps *.old
-
-dvipdf:
- @echo "Making ${DOC} pdfm"
- @cp -fp ${IMAGES}/hires/*.eps .
- dvipdfm -p a4 ${DOC}.dvi >tex.out 2>&1
+pdflatex: pdfcovers pdfimages commonfiles
+ @ln -sf $(PDFCOMPILERFILE) $(BSYSCOMPILERFILE)
+ pdflatex -interaction=batchmode ${DOC}.tex
+ makeindex ${DOC}.idx -o ${DOC}.ind 2>/dev/null
+ makeindex ${DOC}.ddx -o ${DOC}.dnd >/dev/null 2>/dev/null
+ makeindex ${DOC}.fdx -o ${DOC}.fnd >/dev/null 2>/dev/null
+ makeindex ${DOC}.sdx -o ${DOC}.snd >/dev/null 2>/dev/null
+ makeindex ${DOC}.cdx -o ${DOC}.cnd >/dev/null 2>/dev/null
+ pdflatex -interaction=batchmode ${DOC}.tex
+ pdflatex -interaction=batchmode ${DOC}.tex
html:
@echo "Making ${DOC} html"
- @cp -fp ${IMAGES}/*.eps .
+# @cp -fp ${IMAGES}/*.eps .
@rm -f next.eps next.png prev.eps prev.png up.eps up.png
@touch ${DOC}.html
@(if [ -f imagename_translations ] ; then \
@rm -rf ${DOC}
@mkdir -p ${DOC}
@rm -f ${DOC}/*
- @cp -fp ${IMAGES}/*.eps .
+# @cp -fp ${IMAGES}/*.eps .
@rm -f next.eps next.png prev.eps prev.png up.eps up.png
@(if [ -f ${DOC}/imagename_translations ] ; then \
./translate_images.pl --to_meaningful_names ${DOC}/Developer*Guide.html; \
This chapter is intended to be a technical discussion of the Catalog services
and as such is not targeted at end users but rather at developers and system
administrators that want or need to know more of the working details of {\bf
-Bacula}.
+Bacula}.
The {\bf Bacula Catalog} services consist of the programs that provide the SQL
database engine for storage and retrieval of all information concerning files
-that were backed up and their locations on the storage media.
+that were backed up and their locations on the storage media.
We have investigated the possibility of using the following SQL engines for
Bacula: Beagle, mSQL, GNU SQL, PostgreSQL, SQLite, Oracle, and MySQL. Each
The Bacula SQL code has been written in a manner that will allow it to be
easily modified to support any of the current SQL database systems on the
market (for example: mSQL, iODBC, unixODBC, Solid, OpenLink ODBC, EasySoft
-ODBC, InterBase, Oracle8, Oracle7, and DB2).
+ODBC, InterBase, Oracle8, Oracle7, and DB2).
-If you do not specify either {\bf \verb{--{with-mysql} or {\bf \verb{--{with-postgresql} or
-{\bf \verb{--{with-sqlite} on the ./configure line, Bacula will use its minimalist
+If you do not specify either \lstinline+--with-mysql+ or \lstinline+--with-postgresq+ or
+\lstinline+--with-sqlite+ on the ./configure line, Bacula will use its minimalist
internal database. This database is kept for build reasons but is no longer
supported. Bacula {\bf requires} one of the three databases (MySQL,
-PostgreSQL, or SQLite) to run.
+PostgreSQL, or SQLite) to run.
\subsection{Filenames and Maximum Filename Length}
\index[general]{Filenames and Maximum Filename Length }
these restrictions apply only to the Catalog database and thus to your ability
to list online the files saved during any job. All information received and
stored by the Storage daemon (normally on tape) allows and handles arbitrarily
-long path and filenames.
+long path and filenames.
\subsection{Installing and Configuring MySQL}
\index[general]{MySQL!Installing and Configuring }
\index[general]{Installing and Configuring MySQL }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL}
+%\addcontentsline{toc}{subsubsection}{Installing and Configuring MySQL}
-For the details of installing and configuring MySQL, please see the
-\ilink{Installing and Configuring MySQL}{_ChapterStart} chapter of
-this manual.
+For the details of installing and configuring MySQL, please see the
+\bsysxrlink{Installing and Configuring MySQL}{MySqlChapter}{main}{chapter} of
+the \mainman{}.
\subsection{Installing and Configuring PostgreSQL}
\index[general]{PostgreSQL!Installing and Configuring }
\index[general]{Installing and Configuring PostgreSQL }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL}
+%\addcontentsline{toc}{subsubsection}{Installing and Configuring PostgreSQL}
-For the details of installing and configuring PostgreSQL, please see the
-\ilink{Installing and Configuring PostgreSQL}{_ChapterStart10}
-chapter of this manual.
+For the details of installing and configuring PostgreSQL, please see the
+\bsysxrlink{Installing and Configuring PostgreSQL}{PostgreSqlChapter}{main}{chapter}
+ of the \mainman{}.
\subsection{Installing and Configuring SQLite}
\index[general]{Installing and Configuring SQLite }
\index[general]{SQLite!Installing and Configuring }
-\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite}
+%\addcontentsline{toc}{subsubsection}{Installing and Configuring SQLite}
-For the details of installing and configuring SQLite, please see the
-\ilink{Installing and Configuring SQLite}{_ChapterStart33} chapter of
-this manual.
+For the details of installing and configuring SQLite, please see the
+\bsysxrlink{Installing and Configuring SQLite}{SqlLiteChapter}{main}{chapter} of
+the \mainman{}.
\subsection{Internal Bacula Catalog}
\index[general]{Catalog!Internal Bacula }
\index[general]{Internal Bacula Catalog }
-\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog}
+%\addcontentsline{toc}{subsubsection}{Internal Bacula Catalog}
-Please see the
-\ilink{Internal Bacula Database}{_ChapterStart42} chapter of this
-manual for more details.
+Please see the \bsysxrlink{Internal Bacula Database}
+{chap:InternalBaculaDatabase}{misc}{chapter} of the \miscman{} for more details.
\subsection{Database Table Design}
\index[general]{Design!Database Table }
\index[general]{Database Table Design }
-\addcontentsline{toc}{subsubsection}{Database Table Design}
+%\addcontentsline{toc}{subsubsection}{Database Table Design}
All discussions that follow pertain to the MySQL database. The details for the
PostgreSQL and SQLite databases are essentially identical except for that all
fields in the SQLite database are stored as ASCII text and some of the
database creation statements are a bit different. The details of the internal
-Bacula catalog are not discussed here.
+Bacula catalog are not discussed here.
Because the Catalog database may contain very large amounts of data for large
sites, we have made a modest attempt to normalize the data tables to reduce
redundant information. While reducing the size of the database significantly,
-it does, unfortunately, add some complications to the structures.
+it does, unfortunately, add some complications to the structures.
In simple terms, the Catalog database must contain a record of all Jobs run by
Bacula, and for each Job, it must maintain a list of all files saved, with
attributes is not maintained when using the internal Bacula database. The data
stored in the File records, which allows the user or administrator to obtain a
list of all files backed up during a job, is by far the largest volume of
-information put into the Catalog database.
+information put into the Catalog database.
Although the Catalog database has been designed to handle backup data for
multiple clients, some users may want to maintain multiple databases, one for
each machine to be backed up. This reduces the risk of confusion of accidental
restoring a file to the wrong machine as well as reducing the amount of data
in a single database, thus increasing efficiency and reducing the impact of a
-lost or damaged database.
+lost or damaged database.
\section{Sequence of Creation of Records for a Save Job}
\index[general]{Sequence of Creation of Records for a Save Job }
means to create a new record whether or not it is unique. ``Create unique''
means each record in the database should be unique. Thus, one must first
search to see if the record exists, and only if not should a new one be
-created, otherwise the existing RecordId should be used.
+created, otherwise the existing RecordId should be used.
\begin{enumerate}
-\item Create new Job record with StartDate; save JobId
-\item Create unique Media record; save MediaId
-\item Create unique Client record; save ClientId
-\item Create unique Filename record; save FilenameId
-\item Create unique Path record; save PathId
-\item Create unique Attribute record; save AttributeId
- store ClientId, FilenameId, PathId, and Attributes
-\item Create new File record
- store JobId, AttributeId, MediaCoordinates, etc
-\item Repeat steps 4 through 8 for each file
-\item Create a JobMedia record; save MediaId
-\item Update Job record filling in EndDate and other Job statistics
+\item Create new Job record with StartDate; save JobId
+\item Create unique Media record; save MediaId
+\item Create unique Client record; save ClientId
+\item Create unique Filename record; save FilenameId
+\item Create unique Path record; save PathId
+\item Create unique Attribute record; save AttributeId
+ store ClientId, FilenameId, PathId, and Attributes
+\item Create new File record
+ store JobId, AttributeId, MediaCoordinates, etc
+\item Repeat steps 4 through 8 for each file
+\item Create a JobMedia record; save MediaId
+\item Update Job record filling in EndDate and other Job statistics
\end{enumerate}
\section{Database Tables}
\index[general]{Database Tables }
\index[general]{Tables!Database }
-\addcontentsline{toc}{subsection}{Database Tables}
-
-\addcontentsline{lot}{table}{Filename Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Filename } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{l| }{\bf Data Type }
-& \multicolumn{1}{l| }{\bf Remark } \\
- \hline
-{FilenameId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {Blob } & {Filename }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Filename} table shown above contains the name of each file backed up
+%\addcontentsline{toc}{subsection}{Database Tables}
+%\addcontentsline{lot}{table}{Filename Table Layout}
+\LTXtable{\linewidth}{table_dbfilename}
+
+The {\bf Filename} table \bsysref{table:dbfilename} contains the name of each file backed up
with the path removed. If different directories or machines contain the same
-filename, only one copy will be saved in this table.
-
-\
-
-\addcontentsline{lot}{table}{Path Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Path } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{PathId } & {integer } & {Primary Key } \\
- \hline
-{Path } & {Blob } & {Full Path }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Path} table contains shown above the path or directory names of all
+filename, only one copy will be saved in this table.
+
+%\addcontentsline{lot}{table}{Path Table Layout}
+\LTXtable{\linewidth}{table_dbpath}
+
+The {\bf Path} table \bsysref{table:dbpath} contains the path or directory names of all
directories on the system or systems. The filename and any MSDOS disk name are
stripped off. As with the filename, only one copy of each directory name is
kept regardless of how many machines or drives have the same directory. These
-path names should be stored in Unix path name format.
+path names should be stored in Unix path name format.
Some simple testing on a Linux file system indicates that separating the
filename and the path may be more complication than is warranted by the space
savings. For example, this system has a total of 89,097 files, 60,467 of which
-have unique filenames, and there are 4,374 unique paths.
+have unique filenames, and there are 4,374 unique paths.
Finding all those files and doing two stats() per file takes an average wall
-clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux.
+clock time of 1 min 35 seconds on a 400MHz machine running RedHat 6.1 Linux.
Finding all those files and putting them directly into a MySQL database with
the path and filename defined as TEXT, which is variable length up to 65,535
-characters takes 19 mins 31 seconds and creates a 27.6 MByte database.
+characters takes 19 mins 31 seconds and creates a 27.6 MByte database.
Doing the same thing, but inserting them into Blob fields with the filename
indexed on the first 30 characters and the path name indexed on the 255 (max)
characters takes 5 mins 18 seconds and creates a 5.24 MB database. Rerunning
-the job (with the database already created) takes about 2 mins 50 seconds.
+the job (with the database already created) takes about 2 mins 50 seconds.
Running the same as the last one (Path and Filename Blob), but Filename
indexed on the first 30 characters and the Path on the first 50 characters
(linear search done there after) takes 5 mins on the average and creates a 3.4
MB database. Rerunning with the data already in the DB takes 3 mins 35
-seconds.
+seconds.
Finally, saving only the full path name rather than splitting the path and the
file, and indexing it on the first 50 characters takes 6 mins 43 seconds and
-creates a 7.35 MB database.
-
-\
-
-\addcontentsline{lot}{table}{File Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf File } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{FileId } & {integer } & {Primary Key } \\
- \hline
-{FileIndex } & {integer } & {The sequential file number in the Job } \\
- \hline
-{JobId } & {integer } & {Link to Job Record } \\
- \hline
-{PathId } & {integer } & {Link to Path Record } \\
- \hline
-{FilenameId } & {integer } & {Link to Filename Record } \\
- \hline
-{MarkId } & {integer } & {Used to mark files during Verify Jobs } \\
- \hline
-{LStat } & {tinyblob } & {File attributes in base64 encoding } \\
- \hline
-{MD5 } & {tinyblob } & {MD5/SHA1 signature in base64 encoding }
-\\ \hline
-
-\end{longtable}
-
-The {\bf File} table shown above contains one entry for each file backed up by
+creates a 7.35 MB database.
+
+
+%\addcontentsline{lot}{table}
+\LTXtable{\linewidth}{table_dbfile}
+
+The {\bf File} table \bsysref{table:dbfile} contains one entry for each file backed up by
Bacula. Thus a file that is backed up multiple times (as is normal) will have
multiple entries in the File table. This will probably be the table with the
most number of records. Consequently, it is essential to keep the size of this
record to an absolute minimum. At the same time, this table must contain all
the information (or pointers to the information) about the file and where it
is backed up. Since a file may be backed up many times without having changed,
-the path and filename are stored in separate tables.
+the path and filename are stored in separate tables.
This table contains by far the largest amount of information in the Catalog
database, both from the stand point of number of records, and the stand point
of total database size. As a consequence, the user must take care to
periodically reduce the number of File records using the {\bf retention}
-command in the Console program.
-
-\
-
-\addcontentsline{lot}{table}{Job Table Layout}
-\begin{longtable}{|l|l|p{2.5in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Job } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{JobId } & {integer } & {Primary Key } \\
- \hline
-{Job } & {tinyblob } & {Unique Job Name } \\
- \hline
-{Name } & {tinyblob } & {Job Name } \\
- \hline
-{PurgedFiles } & {tinyint } & {Used by Bacula for purging/retention periods
-} \\
- \hline
-{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration
-} \\
- \hline
-{Level } & {binary(1) } & {Job Level } \\
- \hline
-{ClientId } & {integer } & {Client index } \\
- \hline
-{JobStatus } & {binary(1) } & {Job Termination Status } \\
- \hline
-{SchedTime } & {datetime } & {Time/date when Job scheduled } \\
- \hline
-{StartTime } & {datetime } & {Time/date when Job started } \\
- \hline
-{EndTime } & {datetime } & {Time/date when Job ended } \\
- \hline
-{RealEndTime } & {datetime } & {Time/date when original Job ended } \\
- \hline
-{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for
-Retention period. } \\
- \hline
-{VolSessionId } & {integer } & {Unique Volume Session ID } \\
- \hline
-{VolSessionTime } & {integer } & {Unique Volume Session Time } \\
- \hline
-{JobFiles } & {integer } & {Number of files saved in Job } \\
- \hline
-{JobBytes } & {bigint } & {Number of bytes saved in Job } \\
- \hline
-{JobErrors } & {integer } & {Number of errors during Job } \\
- \hline
-{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) }
-\\
- \hline
-{PoolId } & {integer } & {Link to Pool Record } \\
- \hline
-{FileSetId } & {integer } & {Link to FileSet Record } \\
- \hline
-{PrioJobId } & {integer } & {Link to prior Job Record when migrated } \\
- \hline
-{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\
- \hline
-{HasBase } & {tiny integer } & {Set when Base Job run }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Job} table contains one record for each Job run by Bacula. Thus
+command in the Console program.
+
+%\addcontentsline{lot}{table}{Job Table Layout}
+\LTXtable{\linewidth}{table_dbjob}
+
+The {\bf Job} table \bsysref{table:dbjob} contains one record for each Job run by Bacula. Thus
normally, there will be one per day per machine added to the database. Note,
the JobId is used to index Job records in the database, and it often is shown
to the user in the Console program. However, care must be taken with its use
a database for Client data saved on machine Rufus and another database for
Client data saved on machine Roxie. In this case, the two database will each
have JobIds that match those in another database. For a unique reference to a
-Job, see Job below.
+Job, see Job below.
The Name field of the Job record corresponds to the Name resource record given
in the Director's configuration file. Thus it is a generic name, and it will
-be normal to find many Jobs (or even all Jobs) with the same Name.
+be normal to find many Jobs (or even all Jobs) with the same Name.
The Job field contains a combination of the Name and the schedule time of the
Job by the Director. Thus for a given Director, even with multiple Catalog
-databases, the Job will contain a unique name that represents the Job.
+databases, the Job will contain a unique name that represents the Job.
For a given Storage daemon, the VolSessionId and VolSessionTime form a unique
identification of the Job. This will be the case even if multiple Directors
-are using the same Storage daemon.
-
-The Job Type (or simply Type) can have one of the following values:
-
-\addcontentsline{lot}{table}{Job Types}
-\begin{longtable}{|l|l|}
- \hline
-\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\
- \hline
-{B } & {Backup Job } \\
- \hline
-{M } & {Migrated Job } \\
- \hline
-{V } & {Verify Job } \\
- \hline
-{R } & {Restore Job } \\
- \hline
-{C } & {Console program (not in database) } \\
- \hline
-{I } & {Internal or system Job } \\
- \hline
-{D } & {Admin Job } \\
- \hline
-{A } & {Archive Job (not implemented) }
-\\ \hline
-{C } & {Copy Job } \\
- \hline
-{g } & {Migration Job } \\
- \hline
-
-\end{longtable}
-Note, the Job Type values noted above are not kept in an SQL table.
+are using the same Storage daemon.
+
+The Job Type (or simply Type) can have one of the following values:
+
+%\addcontentsline{lot}{table}{Job Types}
+\LTXtable{\linewidth}{table_dbjobtypes}
+Note, the Job Type values in table \bsysref{table:dbjobtypes} noted above are not kept in an SQL table.
The JobStatus field specifies how the job terminated, and can be one of the
-following:
-
-\addcontentsline{lot}{table}{Job Statuses}
-\begin{longtable}{|l|l|}
- \hline
-\multicolumn{1}{|c| }{\bf Value } & \multicolumn{1}{c| }{\bf Meaning } \\
- \hline
-{C } & {Created but not yet running } \\
- \hline
-{R } & {Running } \\
- \hline
-{B } & {Blocked } \\
- \hline
-{T } & {Terminated normally } \\
- \hline
-{W } & {Terminated normally with warnings }
-\\ \hline
-{E } & {Terminated in Error } \\
- \hline
-{e } & {Non-fatal error } \\
- \hline
-{f } & {Fatal error } \\
- \hline
-{D } & {Verify Differences } \\
- \hline
-{A } & {Canceled by the user } \\
- \hline
-{I } & {Incomplete Job }
-\\ \hline
-{F } & {Waiting on the File daemon } \\
- \hline
-{S } & {Waiting on the Storage daemon } \\
- \hline
-{m } & {Waiting for a new Volume to be mounted } \\
- \hline
-{M } & {Waiting for a Mount } \\
- \hline
-{s } & {Waiting for Storage resource } \\
- \hline
-{j } & {Waiting for Job resource } \\
- \hline
-{c } & {Waiting for Client resource } \\
- \hline
-{d } & {Wating for Maximum jobs } \\
- \hline
-{t } & {Waiting for Start Time } \\
- \hline
-{p } & {Waiting for higher priority job to finish }
-\\ \hline
-{i } & {Doing batch insert file records }
-\\ \hline
-{a } & {SD despooling attributes }
-\\ \hline
-{l } & {Doing data despooling }
-\\ \hline
-{L } & {Committing data (last despool) }
-\\ \hline
-
-
-
-\end{longtable}
-
-\addcontentsline{lot}{table}{File Sets Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf FileSet } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{FileSetId } & {integer } & {Primary Key } \\
- \hline
-{FileSet } & {tinyblob } & {FileSet name } \\
- \hline
-{MD5 } & {tinyblob } & {MD5 checksum of FileSet } \\
- \hline
-{CreateTime } & {datetime } & {Time and date Fileset created }
-\\ \hline
-
-\end{longtable}
-
-The {\bf FileSet} table contains one entry for each FileSet that is used. The
+following:
+\LTXtable{\linewidth}{table_dbjobstatuses}
+
+
+%\addcontentsline{lot}{table}{File Sets Table Layout}
+\LTXtable{\linewidth}{table_dbfileset}
+
+The {\bf FileSet} table \bsysref{table:dbfileset} contains one entry for each FileSet that is used. The
MD5 signature is kept to ensure that if the user changes anything inside the
FileSet, it will be detected and the new FileSet will be used. This is
particularly important when doing an incremental update. If the user deletes a
file or adds a file, we need to ensure that a Full backup is done prior to the
-next incremental.
-
-
-\addcontentsline{lot}{table}{JobMedia Table Layout}
-\begin{longtable}{|l|l|p{2.5in}|}
- \hline
-\multicolumn{3}{|l| }{\bf JobMedia } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{JobMediaId } & {integer } & {Primary Key } \\
- \hline
-{JobId } & {integer } & {Link to Job Record } \\
- \hline
-{MediaId } & {integer } & {Link to Media Record } \\
- \hline
-{FirstIndex } & {integer } & {The index (sequence number) of the first file
-written for this Job to the Media } \\
- \hline
-{LastIndex } & {integer } & {The index of the last file written for this
-Job to the Media } \\
- \hline
-{StartFile } & {integer } & {The physical media (tape) file number of the
-first block written for this Job } \\
- \hline
-{EndFile } & {integer } & {The physical media (tape) file number of the
-last block written for this Job } \\
- \hline
-{StartBlock } & {integer } & {The number of the first block written for
-this Job } \\
- \hline
-{EndBlock } & {integer } & {The number of the last block written for this
-Job } \\
- \hline
-{VolIndex } & {integer } & {The Volume use sequence number within the Job }
-\\ \hline
-
-\end{longtable}
-
-The {\bf JobMedia} table contains one entry at the following: start of
+next incremental.
+
+
+%\addcontentsline{lot}{table}
+\LTXtable{\linewidth}{table_dbjobmedia}
+
+The {\bf JobMedia} table \bsysref{table:dbjobmedia} contains one entry at the following: start of
the job, start of each new tape file, start of each new tape, end of the
job. Since by default, a new tape file is written every 2GB, in general,
you will have more than 2 JobMedia records per Job. The number can be
-varied by changing the "Maximum File Size" specified in the Device
+varied by changing the "Maximum File Size" specified in the Device
resource. This record allows Bacula to efficiently position close to
(within 2GB) any given file in a backup. For restoring a full Job,
these records are not very important, but if you want to retrieve
-a single file that was written near the end of a 100GB backup, the
+a single file that was written near the end of a 100GB backup, the
JobMedia records can speed it up by orders of magnitude by permitting
forward spacing files and blocks rather than reading the whole 100GB
backup.
-
-
-
-
-\addcontentsline{lot}{table}{Media Table Layout}
-\begin{longtable}{|l|l|p{2.4in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Media } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type\
-\ \ } & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{MediaId } & {integer } & {Primary Key } \\
- \hline
-{VolumeName } & {tinyblob } & {Volume name } \\
- \hline
-{Slot } & {integer } & {Autochanger Slot number or zero } \\
- \hline
-{PoolId } & {integer } & {Link to Pool Record } \\
- \hline
-{MediaType } & {tinyblob } & {The MediaType supplied by the user } \\
- \hline
-{MediaTypeId } & {integer } & {The MediaTypeId } \\
- \hline
-{LabelType } & {tinyint } & {The type of label on the Volume } \\
- \hline
-{FirstWritten } & {datetime } & {Time/date when first written } \\
- \hline
-{LastWritten } & {datetime } & {Time/date when last written } \\
- \hline
-{LabelDate } & {datetime } & {Time/date when tape labeled } \\
- \hline
-{VolJobs } & {integer } & {Number of jobs written to this media } \\
- \hline
-{VolFiles } & {integer } & {Number of files written to this media } \\
- \hline
-{VolBlocks } & {integer } & {Number of blocks written to this media } \\
- \hline
-{VolMounts } & {integer } & {Number of time media mounted } \\
- \hline
-{VolBytes } & {bigint } & {Number of bytes saved in Job } \\
- \hline
-{VolParts } & {integer } & {The number of parts for a Volume (DVD) } \\
- \hline
-{VolErrors } & {integer } & {Number of errors during Job } \\
- \hline
-{VolWrites } & {integer } & {Number of writes to media } \\
- \hline
-{MaxVolBytes } & {bigint } & {Maximum bytes to put on this media } \\
- \hline
-{VolCapacityBytes } & {bigint } & {Capacity estimate for this volume } \\
- \hline
-{VolStatus } & {enum } & {Status of media: Full, Archive, Append, Recycle,
-Read-Only, Disabled, Error, Busy } \\
- \hline
-{Enabled } {tinyint } & {Whether or not Volume can be written } \\
- \hline
-{Recycle } & {tinyint } & {Whether or not Bacula can recycle the Volumes:
-Yes, No } \\
- \hline
-{ActionOnPurge } & {tinyint } & {What happens to a Volume after purging } \\
- \hline
-{VolRetention } & {bigint } & {64 bit seconds until expiration } \\
- \hline
-{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\
- \hline
-{MaxVolJobs } & {integer } & {maximum jobs to put on Volume } \\
- \hline
-{MaxVolFiles } & {integer } & {maximume EOF marks to put on Volume }
-\\ \hline
-{InChanger } & {tinyint } & {Whether or not Volume in autochanger } \\
- \hline
-{StorageId } & {integer } & {Storage record ID } \\
- \hline
-{DeviceId } & {integer } & {Device record ID } \\
- \hline
-{MediaAddressing } & {integer } & {Method of addressing media } \\
- \hline
-{VolReadTime } & {bigint } & {Time Reading Volume } \\
- \hline
-{VolWriteTime } & {bigint } & {Time Writing Volume } \\
- \hline
-{EndFile } & {integer } & {End File number of Volume } \\
- \hline
-{EndBlock } & {integer } & {End block number of Volume } \\
- \hline
-{LocationId } & {integer } & {Location record ID } \\
- \hline
-{RecycleCount } & {integer } & {Number of times recycled } \\
- \hline
-{InitialWrite } & {datetime } & {When Volume first written } \\
- \hline
-{ScratchPoolId } & {integer } & {Id of Scratch Pool } \\
- \hline
-{RecyclePoolId } & {integer } & {Pool ID where to recycle Volume } \\
- \hline
-{Comment } & {blob } & {User text field } \\
- \hline
-
-
-\end{longtable}
-
-The {\bf Volume} table (internally referred to as the Media table) contains
+
+
+
+
+%\addcontentsline{lot}{table}{Media Table Layout}
+\LTXtable{\linewidth}{table_dbmedia}
+
+The {\bf Volume} table\footnote{Internally referred to as the Media table} \bsysref{table:dbmedia} contains
one entry for each volume, that is each tape, cassette (8mm, DLT, DAT, ...),
or file on which information is or was backed up. There is one Volume record
-created for each of the NumVols specified in the Pool resource record.
-
-\
-
-\addcontentsline{lot}{table}{Pool Table Layout}
-\begin{longtable}{|l|l|p{2.4in}|}
- \hline
-\multicolumn{3}{|l| }{\bf Pool } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{PoolId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {Tinyblob } & {Pool Name } \\
- \hline
-{NumVols } & {Integer } & {Number of Volumes in the Pool } \\
- \hline
-{MaxVols } & {Integer } & {Maximum Volumes in the Pool } \\
- \hline
-{UseOnce } & {tinyint } & {Use volume once } \\
- \hline
-{UseCatalog } & {tinyint } & {Set to use catalog } \\
- \hline
-{AcceptAnyVolume } & {tinyint } & {Accept any volume from Pool } \\
- \hline
-{VolRetention } & {bigint } & {64 bit seconds to retain volume } \\
- \hline
-{VolUseDuration } & {bigint } & {64 bit seconds volume can be used } \\
- \hline
-{MaxVolJobs } & {integer } & {max jobs on volume } \\
- \hline
-{MaxVolFiles } & {integer } & {max EOF marks to put on Volume } \\
- \hline
-{MaxVolBytes } & {bigint } & {max bytes to write on Volume } \\
- \hline
-{AutoPrune } & {tinyint } & {yes|no for autopruning } \\
- \hline
-{Recycle } & {tinyint } & {yes|no for allowing auto recycling of Volume } \\
- \hline
-{ActionOnPurge } & {tinyint } & {Default Volume ActionOnPurge } \\
- \hline
-{PoolType } & {enum } & {Backup, Copy, Cloned, Archive, Migration } \\
- \hline
-{LabelType } & {tinyint } & {Type of label ANSI/Bacula } \\
- \hline
-{LabelFormat } & {Tinyblob } & {Label format }
-\\ \hline
-{Enabled } {tinyint } & {Whether or not Volume can be written } \\
- \hline
-{ScratchPoolId } & {integer } & {Id of Scratch Pool } \\
- \hline
-{RecyclePoolId } & {integer } & {Pool ID where to recycle Volume } \\
- \hline
-{NextPoolId } & {integer } & {Pool ID of next Pool } \\
- \hline
-{MigrationHighBytes } & {bigint } & {High water mark for migration } \\
- \hline
-{MigrationLowBytes } & {bigint } & {Low water mark for migration } \\
- \hline
-{MigrationTime } & {bigint } & {Time before migration } \\
- \hline
-
-
-
-\end{longtable}
-
-The {\bf Pool} table contains one entry for each media pool controlled by
+created for each of the NumVols specified in the Pool resource record.
+
+%\addcontentsline{lot}{table}{Pool Table Layout}
+\LTXtable{\linewidth}{table_dbpool}
+
+The {\bf Pool} table \bsysref{table:dbpool} contains one entry for each media pool controlled by
Bacula in this database. One media record exists for each of the NumVols
contained in the Pool. The PoolType is a Bacula defined keyword. The MediaType
is defined by the administrator, and corresponds to the MediaType specified in
the Director's Storage definition record. The CurrentVol is the sequence
-number of the Media record for the current volume.
-
-\
-
-\addcontentsline{lot}{table}{Client Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Client } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{ClientId } & {integer } & {Primary Key } \\
- \hline
-{Name } & {TinyBlob } & {File Services Name } \\
- \hline
-{UName } & {TinyBlob } & {uname -a from Client (not yet used) } \\
- \hline
-{AutoPrune } & {tinyint } & {yes|no for autopruning } \\
- \hline
-{FileRetention } & {bigint } & {64 bit seconds to retain Files } \\
- \hline
-{JobRetention } & {bigint } & {64 bit seconds to retain Job }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Client} table contains one entry for each machine backed up by Bacula
-in this database. Normally the Name is a fully qualified domain name.
-
-
-\addcontentsline{lot}{table}{Storage Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Storage } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{StorageId } & {integer } & {Unique Id } \\
- \hline
-{Name } & {tinyblob } & {Resource name of Storage device } \\
- \hline
-{AutoChanger } & {tinyint } & {Set if it is an autochanger } \\
- \hline
-
-\end{longtable}
-
-The {\bf Storage} table contains one entry for each Storage used.
-
-
-\addcontentsline{lot}{table}{Counter Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Counter } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{Counter } & {tinyblob } & {Counter name } \\
- \hline
-{MinValue } & {integer } & {Start/Min value for counter } \\
- \hline
-{MaxValue } & {integer } & {Max value for counter } \\
- \hline
-{CurrentValue } & {integer } & {Current counter value } \\
- \hline
-{WrapCounter } & {tinyblob } & {Name of another counter }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Counter} table contains one entry for each permanent counter defined
-by the user.
-
-\addcontentsline{lot}{table}{Job History Table Layout}
-\begin{longtable}{|l|l|p{2.5in}|}
- \hline
-\multicolumn{3}{|l| }{\bf JobHisto } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{JobId } & {integer } & {Primary Key } \\
- \hline
-{Job } & {tinyblob } & {Unique Job Name } \\
- \hline
-{Name } & {tinyblob } & {Job Name } \\
- \hline
-{Type } & {binary(1) } & {Job Type: Backup, Copy, Clone, Archive, Migration
-} \\
- \hline
-{Level } & {binary(1) } & {Job Level } \\
- \hline
-{ClientId } & {integer } & {Client index } \\
- \hline
-{JobStatus } & {binary(1) } & {Job Termination Status } \\
- \hline
-{SchedTime } & {datetime } & {Time/date when Job scheduled } \\
- \hline
-{StartTime } & {datetime } & {Time/date when Job started } \\
- \hline
-{EndTime } & {datetime } & {Time/date when Job ended } \\
- \hline
-{RealEndTime } & {datetime } & {Time/date when original Job ended } \\
- \hline
-{JobTDate } & {bigint } & {Start day in Unix format but 64 bits; used for
-Retention period. } \\
- \hline
-{VolSessionId } & {integer } & {Unique Volume Session ID } \\
- \hline
-{VolSessionTime } & {integer } & {Unique Volume Session Time } \\
- \hline
-{JobFiles } & {integer } & {Number of files saved in Job } \\
- \hline
-{JobBytes } & {bigint } & {Number of bytes saved in Job } \\
- \hline
-{JobErrors } & {integer } & {Number of errors during Job } \\
- \hline
-{JobMissingFiles } & {integer } & {Number of files not saved (not yet used) }
-\\
- \hline
-{PoolId } & {integer } & {Link to Pool Record } \\
- \hline
-{FileSetId } & {integer } & {Link to FileSet Record } \\
- \hline
-{PrioJobId } & {integer } & {Link to prior Job Record when migrated } \\
- \hline
-{PurgedFiles } & {tiny integer } & {Set when all File records purged } \\
- \hline
-{HasBase } & {tiny integer } & {Set when Base Job run }
-\\ \hline
-
-\end{longtable}
-
-The {bf JobHisto} table is the same as the Job table, but it keeps
+number of the Media record for the current volume.
+
+
+%\addcontentsline{lot}{table}{Client Table Layout}
+\LTXtable{\linewidth}{table_dbclient}
+
+The {\bf Client} table \bsysref{table:dbclient} contains one entry for each machine backed up by Bacula
+in this database. Normally the Name is a fully qualified domain name.
+
+
+%\addcontentsline{lot}{table}{Storage Table Layout}
+\LTXtable{\linewidth}{table_dbstorage}
+
+The {\bf Storage} table \bsysref{table:dbstorage} contains one entry for each Storage used.
+
+
+%\addcontentsline{lot}{table}{Counter Table Layout}
+\LTXtable{\linewidth}{table_dbcounter}
+
+The {\bf Counter} table \bsysref{table:dbcounter} contains one entry for each permanent counter defined
+by the user.
+
+%\addcontentsline{lot}{table}{Job History Table Layout}
+\LTXtable{\linewidth}{table_dbjobhistory}
+
+The {\bf JobHisto} table \bsysref{table:dbjobhistory} is the same as the Job table, but it keeps
long term statistics (i.e. it is not pruned with the Job).
-\addcontentsline{lot}{table}{Log Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Version } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{LogIdId } & {integer } & {Primary Key }
-\\ \hline
-{JobId } & {integer } & {Points to Job record }
-\\ \hline
-{Time } & {datetime } & {Time/date log record created }
-\\ \hline
-{LogText } & {blob } & {Log text }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Log} table contains a log of all Job output.
-
-\addcontentsline{lot}{table}{Location Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Location } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{LocationId } & {integer } & {Primary Key }
-\\ \hline
-{Location } & {tinyblob } & {Text defining location }
-\\ \hline
-{Cost } & {integer } & {Relative cost of obtaining Volume }
-\\ \hline
-{Enabled } & {tinyint } & {Whether or not Volume is enabled }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Location} table defines where a Volume is physically.
-
-
-\addcontentsline{lot}{table}{Location Log Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf LocationLog } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{locLogIdId } & {integer } & {Primary Key }
-\\ \hline
-{Date } & {datetime } & {Time/date log record created }
-\\ \hline
-{MediaId } & {integer } & {Points to Media record }
-\\ \hline
-{LocationId } & {integer } & {Points to Location record }
-\\ \hline
-{NewVolStatus } & {integer } & {enum: Full, Archive, Append, Recycle, Purged
- Read-only, Disabled, Error, Busy, Used, Cleaning }
-\\ \hline
-{Enabled } & {tinyint } & {Whether or not Volume is enabled }
-\\ \hline
-
-
-\end{longtable}
-
-The {\bf Log} table contains a log of all Job output.
-
-
-\addcontentsline{lot}{table}{Version Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf Version } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{VersionId } & {integer } & {Primary Key }
-\\ \hline
-
-\end{longtable}
-
-The {\bf Version} table defines the Bacula database version number. Bacula
+%\addcontentsline{lot}{table}{Log Table Layout}
+\LTXtable{\linewidth}{table_dblog}
+
+The {\bf Log} table \bsysref{table:dblog} contains a log of all Job output.
+
+%\addcontentsline{lot}{table}{Location Table Layout}
+\LTXtable{\linewidth}{table_dblocation}
+
+The {\bf Location} table \bsysref{table:dblocation} defines where a Volume is physically.
+
+
+%\addcontentsline{lot}{table}{Location Log Table Layout}
+\LTXtable{\linewidth}{table_dblocationlog}
+
+The {\bf Location Log} table \bsysref{table:dblocationlog} contains a log of all Job output.
+
+
+%\addcontentsline{lot}{table}{Version Table Layout}
+\LTXtable{\linewidth}{table_dbversion}
+
+The {\bf Version} table \bsysref{table:dbversion} defines the Bacula database version number. Bacula
checks this number before reading the database to ensure that it is compatible
-with the Bacula binary file.
-
-
-\addcontentsline{lot}{table}{Base Files Table Layout}
-\begin{longtable}{|l|l|l|}
- \hline
-\multicolumn{3}{|l| }{\bf BaseFiles } \\
- \hline
-\multicolumn{1}{|c| }{\bf Column Name } & \multicolumn{1}{c| }{\bf Data Type
-} & \multicolumn{1}{c| }{\bf Remark } \\
- \hline
-{BaseId } & {integer } & {Primary Key } \\
- \hline
-{BaseJobId } & {integer } & {JobId of Base Job } \\
- \hline
-{JobId } & {integer } & {Reference to Job } \\
- \hline
-{FileId } & {integer } & {Reference to File } \\
- \hline
-{FileIndex } & {integer } & {File Index number }
-\\ \hline
-
-\end{longtable}
-
-The {\bf BaseFiles} table contains all the File references for a particular
+with the Bacula binary file.
+
+
+%\addcontentsline{lot}{table}{Base Files Table Layout}
+\LTXtable{\linewidth}{table_dbbasefiles}
+
+The {\bf BaseFiles} table \bsysref{table:dbbasefiles} contains all the File references for a particular
JobId that point to a Base file -- i.e. they were previously saved and hence
were not saved in the current JobId but in BaseJobId under FileId. FileIndex
is the index of the file, and is used for optimization of Restore jobs to
prevent the need to read the FileId record when creating the in memory tree.
-This record is not yet implemented.
+This record is not yet implemented.
-\
+\
\subsection{MySQL Table Definition}
\index[general]{MySQL Table Definition }
\index[general]{Definition!MySQL Table }
\addcontentsline{toc}{subsubsection}{MySQL Table Definition}
-The commands used to create the MySQL tables are as follows:
+The commands used to create the MySQL tables are as follows:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
USE bacula;
CREATE TABLE Filename (
FilenameId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
WrapCounter TINYBLOB NOT NULL,
PRIMARY KEY (Counter(128))
);
-\end{verbatim}
+\end{lstlisting}
\normalsize
sending commands to another daemon (specifically, the Director to the Storage
daemon and the Director to the File daemon).
-\begin{itemize}
+\begin{bsysitemize}
\item Commands are always ASCII commands that are upper/lower case dependent
as well as space sensitive.
\item All binary data is converted into ASCII (either with printf statements
the range 0 to -999 will be standard daemon wide signals, while -1000 to
-1999 will be for Director user, -2000 to -2999 for the File daemon, and
-3000 to -3999 for the Storage daemon.
-\end{itemize}
+\end{bsysitemize}
\section{The Protocol Used Between the Director and the Storage Daemon}
\index{Daemon!Protocol Used Between the Director and the Storage }
daemons.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
SD: listens
DR: makes connection
DR: Hello <Director-name> calling <password>
DR: use device=<device-name> media_type=<media-type>
pool_name=<pool-name> pool_type=<pool_type>
SD: 3000 OK use device
-\end{verbatim}
+\end{lstlisting}
\normalsize
For the Director to be authorized, the \lt{}Director-name\gt{} and the
A typical conversation might look like the following:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
FD: listens
DR: makes connection
DR: Hello <Director-name> calling <password>
... additional Volume / Volume data pairs for volumes 2 .. n
FD: Null packet
FD: close socket
-\end{verbatim}
+\end{lstlisting}
\normalsize
\section{The Save Protocol Between the File Daemon and the Storage Daemon}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
FD: listens
SD: makes connection
FD: append open session = <JobId> [<password>]
SD: 3000 OK ticket = <number>
FD: append data <ticket-number>
SD: 3000 OK data address = <IPaddress> port = <port>
-\end{verbatim}
+\end{lstlisting}
\normalsize
\subsection{Data Information}
following exchanges:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
FD: <1 1 0> (header packet)
FD: <data packet containing file-attributes>
FD: Null packet
SD: ... additional Volume / Volume data pairs for
volumes 2 .. n
FD: close socket
-\end{verbatim}
+\end{lstlisting}
\normalsize
The information returned to the File daemon by the Storage daemon in response
%%
%% # $ % & ~ _ ^ \ { }
%%
+\documentclass[10pt,bsyspaper,english,logo,titlepage]{bsysmanual}
-\documentclass[10pt,a4paper]{book}
+\renewcommand{\familydefault}{\sfdefault}
+\usepackage[utf8]{inputenc}
+\usepackage[toc,title,header,page]{appendix}
+\usepackage[T1]{fontenc}
+\usepackage{longtable,graphicx,fancyhdr,lastpage,eurosym,dcolumn,ltxtable}
+\usepackage{textcomp,varioref,lscape,pdfpages,ifthen,setspace,colortbl,diagbox}
+\usepackage{lmodern,minitoc}
+\usepackage{MnSymbol}
+\usepackage{bbding,multirow}
+\usepackage[hyphens]{url}
+\usepackage[plainpages=true,bookmarks=false,bookmarksopen=false,filecolor=black,linkcolor=black,urlcolor=bsysredtwo,filebordercolor={0. 0. 0.},menubordercolor={0. 0. 0.},urlbordercolor={0. 0. 0.},linkbordercolor={0. 0. 0.},hyperindex=false,colorlinks=true]{hyperref}
+\usepackage{babel,xr,xr-hyper}
+\usepackage[font={sf,bf},textfont=md]{caption}
+\usepackage[printonlyused]{acronym}
+\setlength\arrayrulewidth{0.4pt}
+\include{bsyscommondefs}
+\usepackage[left=4cm,right=3cm,bottom=2cm,top=2.5cm]{geometry}
+\usepackage{moreverb,fancyvrb}
+\usepackage{listings}
+\input{external-references}
+\pdfminorversion=4
-\topmargin -0.5in
-\oddsidemargin 0.0in
-\evensidemargin 0.0in
-\textheight 10in
-\textwidth 6.5in
-
-
-\usepackage{html}
\usepackage{float}
\usepackage{graphicx}
\usepackage{bacula}
\usepackage{makeidx}
\usepackage{index}
\usepackage{setspace}
-\usepackage{hyperref}
-% \usepackage[linkcolor=black,colorlinks=true]{hyperref}
\usepackage{url}
\makeindex
\newindex{general}{idx}{ind}{General Index}
\sloppy
-
+\def\bsystitle{Developer's Guide}
\begin{document}
+\lstset{escapechar=,breaklines=true,basicstyle=\ttfamily\scriptsize,backgroundcolor=\color{lightbsysgrey}}
\sloppy
-\include{coverpage}
-
-\clearpage
-\pagenumbering{roman}
+\input{coverpage}
+\frontmatter
\tableofcontents
-\clearpage
+\listoftables
+\listoffigures
-\pagestyle{myheadings}
-\markboth{Bacula Version \version}{Bacula Version \version}
-\pagenumbering{arabic}
+\mainmatter
+%\markboth{Bacula Version \version}{Bacula Version \version}
+%\pagenumbering{arabic}
\include{generaldevel}
\include{git}
\include{pluginAPI}
\include{mempool}
\include{netprotocol}
\include{smartall}
-\include{fdl}
% The following line tells link_resolver.pl to not include these files:
% nolinks developersi baculai-dir baculai-fd baculai-sd baculai-console baculai-main
% pull in the index
-\clearpage
+\begin{appendices}
+\begin{small}
+\include{fdl}
+\end{small}
+\end{appendices}
\printindex
\end{document}
indenting standard (see below) for source code. If you have checked out
the source with Git, you can get a diff using.
-\begin{verbatim}
+\begin{lstlisting}
git pull
git format-patch -M
-\end{verbatim}
+\end{lstlisting}
If you plan on doing significant development work over a period of time,
after having your first patch reviewed and approved, you will be eligible
It should be filled out, then sent to:
-\begin{verbatim}
+\begin{lstlisting}
Kern Sibbald
Cotes-de-Montmoiret 9
1012 Lausanne
Switzerland
-\end{verbatim}
+\end{lstlisting}
Please note that the above address is different from the officially
registered office mentioned in the document. When you send in such a
the implementation of accepted Feature Requests.
Feature Request format:
-\begin{verbatim}
+\begin{lstlisting}
============= Empty Feature Request form ===========
Item n: One line summary ...
Date: Date submitted
Notes: Additional notes or features (omit if not used)
============== End Feature Request form ==============
-\end{verbatim}
+\end{lstlisting}
-\begin{verbatim}
+\begin{lstlisting}
============= Example Completed Feature Request form ===========
Item 1: Implement a Migration job type that will move the job
data from one device to another.
Highwater size (keep total size)
Lowwater mark
=================================================
-\end{verbatim}
+\end{lstlisting}
\section{Bacula Code Submissions and Projects}
Getting code implemented in Bacula works roughly as follows:
-\begin{itemize}
+\begin{bsysitemize}
\item Kern is the project manager, but prefers not to be a "gate keeper".
This means that the developers are expected to be self-motivated,
to avoid this, and ensure a continuation of the code and a sharing of
the development, debugging, documentation, and maintenance
responsibilities.
-\end{itemize}
+\end{bsysitemize}
\section{Patches for Released Versions}
\index{Patches for Released Versions}
2.2.4-poll-mount.patch. The best way to create the patch file is as
follows:
-\begin{verbatim}
+\begin{lstlisting}
(edit) 2.2.4-restore.patch
(input description)
(end edit)
git format-patch -M
mv 0001-xxx 2.2.4-restore.patch
-\end{verbatim}
+\end{lstlisting}
check to make sure no extra junk got put into the patch file (i.e.
it should have the patch for that bug only).
Then upload it to the 2.2.x release of bacula-patches.
So, end the end, the patch file is:
-\begin{itemize}
+\begin{bsysitemize}
\item Attached to the bug report
\item In Branch-2.2/bacula/patches/...
\item Loaded on Source Forge bacula-patches 2.2.x release. When
you add it, click on the check box to send an Email so that all the
users that are monitoring SF patches get notified.
-\end{itemize}
+\end{bsysitemize}
\section{Developing Bacula}
Please identify all incomplete code with a comment that contains
-\begin{verbatim}
+\begin{lstlisting}
***FIXME***
-\end{verbatim}
+\end{lstlisting}
where there are three asterisks (*) before and after the word
FIXME (in capitals) and no intervening spaces. This is important as it allows
Once you detar this file, you will have a directory structure as follows:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
|
Tar file:
|- depkgs
|- bimagemgr (Web application for burning CDROMs)
-\end{verbatim}
+\end{lstlisting}
\normalsize
\subsection{Header Files}
\index{Do Not Use}
\addcontentsline{toc}{subsubsection}{Do Not Use}
-\begin{itemize}
+\begin{bsysitemize}
\item STL -- it is totally incomprehensible.
-\end{itemize}
+\end{bsysitemize}
\subsection{Avoid if Possible}
\index{Possible!Avoid if}
\index{Avoid if Possible}
\addcontentsline{toc}{subsubsection}{Avoid if Possible}
-\begin{itemize}
+\begin{bsysitemize}
\item Using {\bf void *} because this generally means that one must
using casting, and in C++ casting is rather ugly. It is OK to use
void * to pass structure address where the structure is not known
\item Too much inheritance -- it can complicate the code, and make reading it
difficult (unless you are in love with colons)
-\end{itemize}
+\end{bsysitemize}
\subsection{Do Use Whenever Possible}
\index{Possible!Do Use Whenever}
\index{Do Use Whenever Possible}
\addcontentsline{toc}{subsubsection}{Do Use Whenever Possible}
-\begin{itemize}
+\begin{bsysitemize}
\item Locking and unlocking within a single subroutine.
\item A single point of exit from all subroutines. A goto is
\item When committing a fix for a bug, make the comment of the
following form:
-\begin{verbatim}
+\begin{lstlisting}
Reason for bug fix or other message. Fixes bug #1234
-\end{verbatim}
+\end{lstlisting}
It is important to write the {\bf bug \#1234} like
that because our program that automatically pulls messages
Providing the commit comment line has one of the following
keywords (or phrases), it will be ignored:
-\begin{verbatim}
+\begin{lstlisting}
tweak
typo
cleanup
update todo
update notes
update changelog
-\end{verbatim}
+\end{lstlisting}
\item Use the following keywords at the beginning of
a git commit message
-\end{itemize}
+\end{bsysitemize}
\subsection{Indenting Standards}
\index{Standards!Indenting}
produce reasonably indented code are:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
-nbad -bap -bbo -nbc -br -brs -c36 -cd36 -ncdb -ce -ci3 -cli0
-cp36 -d0 -di1 -ndj -nfc1 -nfca -hnl -i3 -ip0 -l85 -lp -npcs
-nprs -npsl -saf -sai -saw -nsob -nss -nbc -ncs -nbfda
-\end{verbatim}
+\end{lstlisting}
\normalsize
You can put the above in your .indent.pro file, and then just invoke indent on
(e.g. of an if), and the closing brace is on a line by itself. E.g.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
if (abc) {
some_code;
}
-\end{verbatim}
+\end{lstlisting}
\normalsize
Just follow the convention in the code. For example we I prefer non-indented cases.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
switch (code) {
case 'A':
do something
default:
break;
}
-\end{verbatim}
+\end{lstlisting}
\normalsize
Avoid using // style comments except for temporary code or turning off debug
Always put space around assignment and comparison operators.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
a = 1;
if (b >= 2) {
cleanup();
}
-\end{verbatim}
+\end{lstlisting}
\normalsize
but your can compress things in a {\bf for} statement:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
for (i=0; i < del.num_ids; i++) {
...
-\end{verbatim}
+\end{lstlisting}
\normalsize
Don't overuse the inline if (?:). A full {\bf if} is preferred, except in a
print statement, e.g.:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
if (ua->verbose \&& del.num_del != 0) {
bsendmsg(ua, _("Pruned %d %s on Volume %s from catalog.\n"), del.num_del,
del.num_del == 1 ? "Job" : "Jobs", mr->VolumeName);
}
-\end{verbatim}
+\end{lstlisting}
\normalsize
Leave a certain amount of debug code (Dmsg) in code you submit, so that future
Please don't use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
strcpy()
strcat()
strncpy()
strncat();
sprintf()
snprintf()
-\end{verbatim}
+\end{lstlisting}
\normalsize
They are system dependent and un-safe. These should be replaced by the Bacula
safe equivalents:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char *bstrncpy(char *dest, char *source, int dest_size);
char *bstrncat(char *dest, char *source, int dest_size);
int bsnprintf(char *buf, int32_t buf_len, const char *fmt, ...);
int bvsnprintf(char *str, int32_t size, const char *format, va_list ap);
-\end{verbatim}
+\end{lstlisting}
\normalsize
See src/lib/bsys.c for more details on these routines.
edit\_uint64()}. For example:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char buf[100];
uint64_t num = something;
char ed1[50];
bsnprintf(buf, sizeof(buf), "Num=%s\n", edit_uint64(num, ed1));
-\end{verbatim}
+\end{lstlisting}
\normalsize
Note: {\bf \%lld} is now permitted in Bacula code -- we have our
Job messages are messages that pertain to a particular job such as a file that
could not be saved, or the number of files and bytes that were saved. They
Are coded as:
-\begin{verbatim}
+\begin{lstlisting}
Jmsg(jcr, M\_FATAL, 0, "Text of message");
-\end{verbatim}
+\end{lstlisting}
A Jmsg with M\_FATAL will fail the job. The Jmsg() takes varargs so can
have any number of arguments for substituted in a printf like format.
Output from the Jmsg() will go to the Job report.
You can get a full copy of the Source Forge Bacula Git repository with the
following command:
-\begin{verbatim}
+\begin{lstlisting}
git clone http://git.bacula.org/bacula trunk
-\end{verbatim}
+\end{lstlisting}
This will put a read-only copy into the directory {\bf trunk}
in your current directory, and {\bf trunk} will contain
The above command needs to be done only once. Thereafter, you can:
-\begin{verbatim}
+\begin{lstlisting}
cd trunk
git pull # refresh my repo with the latest code
-\end{verbatim}
+\end{lstlisting}
As of August 2009, the size of the repository ({\bf trunk} in the above
example) will be approximately 55 Megabytes. However, if you build
\elink{http://book.git-scm.com/}{http://book.git-scm.com/}.
Some of the differences between Git and SVN are:
-\begin{itemize}
+\begin{bsysitemize}
\item Your main Git directory is a full Git repository to which you can
and must commit. In fact, we suggest you commit frequently.
\item When you commit, the commit goes into your local Git
apply to an older version of the repository you will probably
get an error message such as:
-\begin{verbatim}
+\begin{lstlisting}
git push
To git@github.com:bacula/bacula.git
! [rejected] master -> master (non-fast forward)
error: failed to push some refs to 'git@github.com:bacula/bacula.git'
-\end{verbatim}
+\end{lstlisting}
which is Git's way of telling you that the main repository has changed
and that if you push your changes, they will not be integrated properly.
will tell you and you must do conflict resolution, which is much
easier in Git than in SVN.
\item Resolving conflicts is described below in the {\bf github} section.
-\end{itemize}
+\end{bsysitemize}
\section{Step by Step Modifying Bacula Code}
Suppose you want to download Bacula source code, build it, make
a change, then submit your change to the Bacula developers. What
would you do?
-\begin{itemize}
+\begin{bsysitemize}
\item Tell git who you are:\\
-\begin{verbatim}
+\begin{lstlisting}
git config --global user.name "First-name Last-name"
git config --global user.email "email@address.com"
-\end{verbatim}
+\end{lstlisting}
Where you put your real name and your email address. Since
this is global, you only need to do it once on any given
machine regardless of how many git repos you work with.
\item Download the Source code:\\
-\begin{verbatim}
+\begin{lstlisting}
git clone http://git.bacula.org/bacula trunk
-\end{verbatim}
+\end{lstlisting}
\item Configure and Build Bacula:\\
-\begin{verbatim}
+\begin{lstlisting}
./configure (all-your-normal-options)
make
-\end{verbatim}
+\end{lstlisting}
\item Create a branch to work on:
-\begin{verbatim}
+\begin{lstlisting}
cd trunk/bacula
git checkout -b bugfix master
-\end{verbatim}
+\end{lstlisting}
\item Edit, build, Test, ...\\
-\begin{verbatim}
+\begin{lstlisting}
edit file jcr.h
make
test
-\end{verbatim}
+\end{lstlisting}
Note: if you forget to create a working branch prior to making
changes, and you make them on master, this is no problem providing
So assuming that you have edited master instead of your bugfix
branch, you can simply:
-\begin{verbatim}
+\begin{lstlisting}
git checkout -b bugfix master
-\end{verbatim}
+\end{lstlisting}
and a new bugfix branch will be created and checked out.
You can then proceed to committing to your bugfix branch as
described in the next step.
\item commit your work:
-\begin{verbatim}
+\begin{lstlisting}
git commit -am "Short comment on what I did"
-\end{verbatim}
+\end{lstlisting}
\item Possibly repeat the above two items
\item Switch back to the master branch:\\
-\begin{verbatim}
+\begin{lstlisting}
git checkout master
-\end{verbatim}
+\end{lstlisting}
\item Pull the latest changes:\\
-\begin{verbatim}
+\begin{lstlisting}
git pull
-\end{verbatim}
+\end{lstlisting}
\item Get back on your bugfix branch:\\
-\begin{verbatim}
+\begin{lstlisting}
git checkout bugfix
-\end{verbatim}
+\end{lstlisting}
\item Merge your changes and correct any conflicts:\\
-\begin{verbatim}
+\begin{lstlisting}
git rebase master bugfix
-\end{verbatim}
+\end{lstlisting}
\item Fix any conflicts:\\
You will be notified if there are conflicts. The first
thing to do is:
-\begin{verbatim}
+\begin{lstlisting}
git diff
-\end{verbatim}
+\end{lstlisting}
This will produce a diff of only the files having a conflict.
Fix each file in turn. When it is fixed, the diff for that file
For each file fixed, you must do the same as SVN, inform git with:
-\begin{verbatim}
+\begin{lstlisting}
git add (name-of-file-no-longer-in-conflict)
-\end{verbatim}
+\end{lstlisting}
\item When all files are fixed do:
-\begin{verbatim}
+\begin{lstlisting}
git rebase --continue
-\end{verbatim}
+\end{lstlisting}
\item If you find that it is impossible to reconcile the two
branches or you made a mistake in correcting and adding files,
before you enter the:
-\begin{verbatim}
+\begin{lstlisting}
git rebase --continue
-\end{verbatim}
+\end{lstlisting}
you can instead enter:
-\begin{verbatim}
+\begin{lstlisting}
git rebase --abort
-\end{verbatim}
+\end{lstlisting}
which will essentially cancel the the original git rebase and reset
everything to the beginning with no changes to your bugfix branch.
\item When you have completed the rebase and
are ready to send a patch, do the following:\\
-\begin{verbatim}
+\begin{lstlisting}
git checkout bugfix
git format-patch -M master
-\end{verbatim}
+\end{lstlisting}
Look at the files produced. They should be numbered 0001-xxx.patch
where there is one file for each commit you did, number sequentially,
and the xxx is what you put in the commit comment.
to the developers.
-\end{itemize}
+\end{bsysitemize}
you call the Bacula repository {\bf trunk}, you might use the following
commands:
-\begin{verbatim}
+\begin{lstlisting}
cd trunk
git checkout master
git pull
git add <file-edited>
git commit -m "<comment about commit>"
...
-\end{verbatim}
+\end{lstlisting}
When you have completed working on your branch, you will do:
-\begin{verbatim}
+\begin{lstlisting}
cd trunk
git checkout newbranch # ensure I am on my branch
git pull # get latest source code
git rebase master # merge my code
-\end{verbatim}
+\end{lstlisting}
If you have completed your edits before anyone has modified the repository,
the {\bf git rebase master} will report that there was nothing to do. Otherwise,
If there are any conflicts, Git will tell you. Typically resolving conflicts with
Git is relatively easy. You simply make a diff:
-\begin{verbatim}
+\begin{lstlisting}
git diff
-\end{verbatim}
+\end{lstlisting}
Then edit each file that was listed in the {\bf git diff} to remove the
conflict, which will be indicated by lines of:
-\begin{verbatim}
+\begin{lstlisting}
<<<<<<< HEAD
text
>>>>>>>>
other text
=====
-\end{verbatim}
+\end{lstlisting}
where {\bf text} is what is in the Bacula repository, and {\bf other text}
is what you have changed.
Once you have eliminated the conflict, the {\bf git diff} will show nothing,
and you must do a:
-\begin{verbatim}
+\begin{lstlisting}
git add <file-with-conflicts-fixed>
-\end{verbatim}
+\end{lstlisting}
Once you have fixed all the files with conflicts in the above manner, you enter:
-\begin{verbatim}
+\begin{lstlisting}
git rebase --continue
-\end{verbatim}
+\end{lstlisting}
and your rebase will be complete.
If for some reason, before doing the --continue, you want to abort the rebase and return to what you had, you enter:
-\begin{verbatim}
+\begin{lstlisting}
git rebase --abort
-\end{verbatim}
+\end{lstlisting}
Finally to make a set of patch files
-\begin{verbatim}
+\begin{lstlisting}
git format-patch -M master
-\end{verbatim}
+\end{lstlisting}
When you see your changes have been integrated and pushed to the
main repo, you can delete your branch with:
-\begin{verbatim}
+\begin{lstlisting}
git checkout master
git branch -D newbranch
-\end{verbatim}
+\end{lstlisting}
\section{Forcing Changes}
If you want to understand why it is not a good idea to force a
push to the repository, look at the following picture:
-\includegraphics[width=0.85\textwidth]{\idir git-edit-commit.eps}
+\bsysimageH{git-edit-commit}{Git Edit Commit}{}
The above graphic has three lines of circles. Each circle represents
a commit, and time runs from the left to the right. The top line
This can be next to impossible. There are are a number of ways that Bacula is
designed to facilitate this:
-\begin{itemize}
+\begin{bsysitemize}
\item The Bacula network protocol is packet based, and thus pieces of
information sent can be ASCII or binary.
\item The packet interface permits knowing where the end of a list is.
more details on this.
\item Most console commands allow all the arguments to be specified on the
command line: e.g. {\bf run job=NightlyBackup level=Full}
-\end{itemize}
+\end{bsysitemize}
One of the first things to overcome is to be able to establish a conversation
with the Director. Although you can write all your own code, it is probably
Console program to begin a conversation.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
static BSOCK *UA_sock = NULL;
static JCR *jcr;
...
bnet_close(UA_sock);
}
exit 0;
-\end{verbatim}
+\end{lstlisting}
\normalsize
Then the read\_and\_process\_input routine looks like the following:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
get-input-to-send-to-the-Director;
bnet_fsend(UA_sock, "%s", input);
stat = bnet_recv(UA_sock);
process-output-from-the-Director;
-\end{verbatim}
+\end{lstlisting}
\normalsize
For a GUI program things will be a bit more complicated. Basically in the very
Bat has now a bRestore panel that uses Bvfs to display files and
directories.
-\begin{figure}[htbp]
- \centering
- \includegraphics[width=12cm]{\idir bat-brestore}
- \label{fig:batbrestore}
- \caption{Bat Brestore Panel}
-\end{figure}
+\bsysimageH{bat-brestore}{Bat Brestore Panel}{fig:batbrestore}
+%% \begin{figure}[htbp]
+%% \centering
+%% \includegraphics[width=12cm]{\idir bat-brestore}
+%% \label{fig:batbrestore}
+%% \caption{Bat Brestore Panel}
+%% \end{figure}
The Bvfs module works correctly with BaseJobs, Copy and Migration jobs.
\subsection*{General notes}
-\begin{itemize}
+\begin{bsysitemize}
\item All fields are separated by a tab
\item You can specify \texttt{limit=} and \texttt{offset=} to list smoothly
records in very big directories
\item All fields are separated by a tab
\item Due to potential encoding problem, it's advised to allways use pathid in
queries.
-\end{itemize}
+\end{bsysitemize}
\subsection*{Get dependent jobs from a given JobId}
To get all JobId needed to restore a particular job, you can use the
\texttt{.bvfs\_get\_jobids} command.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_get_jobids jobid=num [all]
-\end{verbatim}
+\end{lstlisting}
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_get_jobids jobid=10
1,2,5,10
.bvfs_get_jobids jobid=10 all
1,2,3,5,10
-\end{verbatim}
+\end{lstlisting}
In this example, a normal restore will need to use JobIds 1,2,5,10 to
compute a complete restore of the system.
The \texttt{.bvfs\_update} command computes the directory cache for jobs
specified in argument, or for all jobs if unspecified.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_update [jobid=numlist]
-\end{verbatim}
+\end{lstlisting}
Example:
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_update jobid=1,2,3
-\end{verbatim}
+\end{lstlisting}
You can run the cache update process in a RunScript after the catalog backup.
function uses only PathId and FilenameId. The jobid argument is mandatory but
unused.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_versions client=filedaemon pathid=num filenameid=num jobid=1
PathId FilenameId FileId JobId LStat Md5 VolName Inchanger
PathId FilenameId FileId JobId LStat Md5 VolName Inchanger
...
-\end{verbatim}
+\end{lstlisting}
Example:
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_versions client=localhost-fd pathid=1 fnid=47 jobid=1
1 47 52 12 gD HRid IGk D Po Po A P BAA I A /uPgWaxMgKZlnMti7LChyA Vol1 1
-\end{verbatim}
+\end{lstlisting}
\subsection*{List directories}
Bvfs allows you to list directories in a specific path.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsdirs pathid=num path=/apath jobid=numlist limit=num offset=num
PathId FilenameId FileId JobId LStat Path
PathId FilenameId FileId JobId LStat Path
PathId FilenameId FileId JobId LStat Path
...
-\end{verbatim}
+\end{lstlisting}
You need to \texttt{pathid} or \texttt{path}. Using \texttt{path=""} will list
``/'' on Unix and all drives on Windows. If FilenameId is 0, the record
listed is a directory.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsdirs pathid=4 jobid=1,11,12
4 0 0 0 A A A A A A A A A A A A A A .
5 0 0 0 A A A A A A A A A A A A A A ..
3 0 0 0 A A A A A A A A A A A A A A regress/
-\end{verbatim}
+\end{lstlisting}
In this example, to list directories present in \texttt{regress/}, you can use
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsdirs pathid=3 jobid=1,11,12
3 0 0 0 A A A A A A A A A A A A A A .
4 0 0 0 A A A A A A A A A A A A A A ..
2 0 0 0 A A A A A A A A A A A A A A tmp/
-\end{verbatim}
+\end{lstlisting}
\subsection*{List files}
Bvfs allows you to list files in a specific path.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsfiles pathid=num path=/apath jobid=numlist limit=num offset=num
PathId FilenameId FileId JobId LStat Path
PathId FilenameId FileId JobId LStat Path
PathId FilenameId FileId JobId LStat Path
...
-\end{verbatim}
+\end{lstlisting}
You need to \texttt{pathid} or \texttt{path}. Using \texttt{path=""} will list
``/'' on Unix and all drives on Windows. If FilenameId is 0, the record listed
is a directory.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsfiles pathid=4 jobid=1,11,12
4 0 0 0 A A A A A A A A A A A A A A .
5 0 0 0 A A A A A A A A A A A A A A ..
1 0 0 0 A A A A A A A A A A A A A A regress/
-\end{verbatim}
+\end{lstlisting}
In this example, to list files present in \texttt{regress/}, you can use
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_lsfiles pathid=1 jobid=1,11,12
1 47 52 12 gD HRid IGk BAA I BMqcPH BMqcPE BMqe+t A titi
1 49 53 12 gD HRid IGk BAA I BMqe/K BMqcPE BMqe+t B toto
1 48 54 12 gD HRie IGk BAA I BMqcPH BMqcPE BMqe+3 A tutu
1 45 55 12 gD HRid IGk BAA I BMqe/K BMqcPE BMqe+t B ficheriro1.txt
1 46 56 12 gD HRie IGk BAA I BMqe/K BMqcPE BMqe+3 D ficheriro2.txt
-\end{verbatim}
+\end{lstlisting}
\subsection*{Restore set of files}
Bvfs allows you to create a SQL table that contains files that you want to
restore. This table can be provided to a restore command with the file option.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_restore fileid=numlist dirid=numlist hardlink=numlist path=b2num
OK
restore file=?b2num ...
-\end{verbatim}
+\end{lstlisting}
To include a directory (with \texttt{dirid}), Bvfs needs to run a query to
select all files. This query could be time consuming.
Example:
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_restore fileid=1,2,3,4 hardlink=10,15,10,20 jobid=10 path=b20001
OK
-\end{verbatim}
+\end{lstlisting}
\subsection*{Cleanup after Restore}
To drop the table used by the restore command, you can use the
\texttt{.bvfs\_cleanup} command.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_cleanup path=b20001
-\end{verbatim}
+\end{lstlisting}
\subsection*{Clearing the BVFS Cache}
To clear the BVFS cache, you can use the \texttt{.bvfs\_clear\_cache} command.
-\begin{verbatim}
+\begin{lstlisting}
.bvfs_clear_cache yes
OK
-\end{verbatim}
+\end{lstlisting}
\label{_ChapterStart9}
\index{Format!Storage Media Output}
\index{Storage Media Output Format}
-\addcontentsline{toc}{section}{Storage Media Output Format}
\section{General}
\index{General}
-\addcontentsline{toc}{subsection}{General}
This document describes the media format written by the Storage daemon. The
Storage daemon reads and writes in units of blocks. Blocks contain records.
Each block has a block header followed by records, and each record has a
-record header followed by record data.
+record header followed by record data.
This chapter is intended to be a technical discussion of the Media Format and
as such is not targeted at end users but rather at developers and system
administrators that want or need to know more of the working details of {\bf
-Bacula}.
+Bacula}.
\section{Definitions}
\index{Definitions}
-\addcontentsline{toc}{subsection}{Definitions}
\begin{description}
\index{Block}
A block represents the primitive unit of information that the Storage daemon
reads and writes to a physical device. Normally, for a tape device, it will
-be the same as a tape block. The Storage daemon always reads and writes
+be the same as a tape block. The Storage daemon always reads and writes
blocks. A block consists of block header information followed by records.
Clients of the Storage daemon (the File daemon) normally never see blocks.
However, some of the Storage tools (bls, bscan, bextract, ...) may be use
all blocks currently written by Bacula are block level BB02, and a given
block contains records for only a single job. Different jobs simply have
their own private blocks that are intermingled with the other blocks from
-other jobs on the Volume (previously the records were intermingled within
+other jobs on the Volume (previously the records were intermingled within
the blocks). Having only records from a single job in any give block
permitted moving the VolumeSessionId and VolumeSessionTime (see below) from
each record heading to the Block header. This has two advantages: 1. a block
can be quickly rejected based on the contents of the header without reading
all the records. 2. because there is on the average more than one record per
-block, less data is written to the Volume for each job.
+block, less data is written to the Volume for each job.
\item [Record]
\index{Record}
A record consists of a Record Header, which is managed by the Storage daemon
and Record Data, which is the data received from the Client. A record is the
-primitive unit of information sent to and from the Storage daemon by the
-Client (File daemon) programs. The details are described below.
+primitive unit of information sent to and from the Storage daemon by the
+Client (File daemon) programs. The details are described below.
\item [JobId]
\index{JobId}
A number assigned by the Director daemon for a particular job. This number
-will be unique for that particular Director (Catalog). The daemons use this
+will be unique for that particular Director (Catalog). The daemons use this
number to keep track of individual jobs. Within the Storage daemon, the JobId
may not be unique if several Directors are accessing the Storage daemon
-simultaneously.
+simultaneously.
\item [Session]
\index{Session}
A Session is a concept used in the Storage daemon corresponds one to one to a
Job with the exception that each session is uniquely identified within the
-Storage daemon by a unique SessionId/SessionTime pair (see below).
+Storage daemon by a unique SessionId/SessionTime pair (see below).
\item [VolSessionId]
\index{VolSessionId}
A unique number assigned by the Storage daemon to a particular session (Job)
-it is having with a File daemon. This number by itself is not unique to the
-given Volume, but with the VolSessionTime, it is unique.
+it is having with a File daemon. This number by itself is not unique to the
+given Volume, but with the VolSessionTime, it is unique.
\item [VolSessionTime]
\index{VolSessionTime}
execution. It is actually the Unix time\_t value of when the Storage daemon
began execution cast to a 32 bit unsigned integer. The combination of the
{\bf VolSessionId} and the {\bf VolSessionTime} for a given Storage daemon is
-guaranteed to be unique for each Job (or session).
+guaranteed to be unique for each Job (or session).
\item [FileIndex]
\index{FileIndex}
the Storage daemon uses negative FileIndexes to flag Session Start and End
Labels as well as End of Volume Labels. Thus, the combination of
VolSessionId, VolSessionTime, and FileIndex uniquely identifies the records
-for a single file written to a Volume.
+for a single file written to a Volume.
\item [Stream]
\index{Stream}
attributes, the file data, ... The Stream indicates what piece of data it
is, and it is an arbitrary number assigned by the File daemon to the parts
(Unix attributes, Win32 attributes, data, compressed data,\ ...) of a file
-that are sent to the Storage daemon. The Storage daemon has no knowledge of
+that are sent to the Storage daemon. The Storage daemon has no knowledge of
the details of a Stream; it simply represents a numbered stream of bytes. The
data for a given stream may be passed to the Storage daemon in single record,
-or in multiple records.
+or in multiple records.
\item [Block Header]
\index{Block Header}
in bytes (typically 64,512) a checksum, and sequential block number. Each
block starts with a Block Header and is followed by Records. Current block
headers also contain the VolSessionId and VolSessionTime for the records
-written to that block.
+written to that block.
\item [Record Header]
\index{Record Header}
Record Header is always immediately followed by a Data Record if the size
given in the Header is greater than zero. Note, for Block headers of level
BB02 (version 1.27 and later), the Record header as written to tape does not
-contain the Volume Session Id and the Volume Session Time as these two
+contain the Volume Session Id and the Volume Session Time as these two
fields are stored in the BB02 Block header. The in-memory record header does
-have those fields for convenience.
+have those fields for convenience.
\item [Data Record]
\index{Data Record}
A data record consists of a binary stream of bytes and is always preceded by
a Record Header. The details of the meaning of the binary stream of bytes are
unknown to the Storage daemon, but the Client programs (File daemon) defines
-and thus knows the details of each record type.
+and thus knows the details of each record type.
\item [Volume Label]
\index{Volume Label}
A label placed by the Storage daemon at the beginning of each storage volume.
It contains general information about the volume. It is written in Record
format. The Storage daemon manages Volume Labels, and if the client wants, he
-may also read them.
+may also read them.
\item [Begin Session Label]
\index{Begin Session Label}
session (Job), since no records with the same VolSessionId and VolSessionTime
will precede this record. This record is not normally visible outside of the
Storage daemon. The Begin Session Label is similar to the Volume Label except
-that it contains additional information pertaining to the Session.
+that it contains additional information pertaining to the Session.
\item [End Session Label]
\index{End Session Label}
The End Session Label is a special record placed by the Storage daemon on the
storage medium as the last record of an append session job with a File
daemon. The End Session Record is distinguished by a FileIndex with a value
-of minus two (-2). This record is useful for detecting the end of a
-particular session since no records with the same VolSessionId and
+of minus two (-2). This record is useful for detecting the end of a
+particular session since no records with the same VolSessionId and
VolSessionTime will follow this record. This record is not normally visible
outside of the Storage daemon. The End Session Label is similar to the Volume
Label except that it contains additional information pertaining to the
-Session.
+Session.
\end{description}
\section{Storage Daemon File Output Format}
\index{Format!Storage Daemon File Output}
\index{Storage Daemon File Output Format}
-\addcontentsline{toc}{subsection}{Storage Daemon File Output Format}
The file storage and tape storage formats are identical except that tape
records are by default blocked into blocks of 64,512 bytes, except for the
The default block size of 64,512 bytes may be overridden by the user (some
older tape drives only support block sizes of 32K). Each Session written to
tape is terminated with an End of File mark (this will be removed later).
-Sessions written to file are simply appended to the end of the file.
+Sessions written to file are simply appended to the end of the file.
\section{Overall Format}
\index{Format!Overall}
\index{Overall Format}
-\addcontentsline{toc}{subsection}{Overall Format}
A Bacula output file consists of Blocks of data. Each block contains a block
header followed by records. Each record consists of a record header followed
by the record data. The first record on a tape will always be the Volume Label
-Record.
+Record.
No Record Header will be split across Bacula blocks. However, Record Data may
be split across any number of Bacula blocks. Obviously this will not be the
case for the Volume Label which will always be smaller than the Bacula Block
-size.
+size.
To simplify reading tapes, the Start of Session (SOS) and End of Session (EOS)
records are never split across blocks. If this is about to happen, Bacula will
write a short block before writing the session record (actually, the SOS
record should always be the first record in a block, excepting perhaps the
-Volume label).
+Volume label).
Due to hardware limitations, the last block written to the tape may not be
fully written. If your drive permits backspace record, Bacula will backup over
the last record written on the tape, re-read it and verify that it was
-correctly written.
+correctly written.
When a new tape is mounted Bacula will write the full contents of the
partially written block to the new tape ensuring that there is no loss of
data. When reading a tape, Bacula will discard any block that is not totally
written, thus ensuring that there is no duplication of data. In addition,
since Bacula blocks are sequentially numbered within a Job, it is easy to
-ensure that no block is missing or duplicated.
+ensure that no block is missing or duplicated.
\section{Serialization}
\index{Serialization}
-\addcontentsline{toc}{subsection}{Serialization}
All Block Headers, Record Headers, and Label Records are written using
Bacula's serialization routines. These routines guarantee that the data is
-written to the output volume in a machine independent format.
+written to the output volume in a machine independent format.
\section{Block Header}
\index{Header!Block}
\index{Block Header}
-\addcontentsline{toc}{subsection}{Block Header}
-The format of the Block Header (version 1.27 and later) is:
+The format of the Block Header (version 1.27 and later) is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
uint32_t CheckSum; /* Block check sum */
uint32_t BlockSize; /* Block byte size including the header */
uint32_t BlockNumber; /* Block number */
char ID[4] = "BB02"; /* Identification and block level */
uint32_t VolSessionId; /* Session Id for Job */
uint32_t VolSessionTime; /* Session Time for Job */
-\end{verbatim}
+\end{lstlisting}
\normalsize
The Block header is a fixed length and fixed format and is followed by Record
\section{Record Header}
\index{Header!Record}
\index{Record Header}
-\addcontentsline{toc}{subsection}{Record Header}
Each binary data record is preceded by a Record Header. The Record Header is
fixed length and fixed format, whereas the binary data record is of variable
length. The Record Header is written using the Bacula serialization routines
-and thus is guaranteed to be in machine independent format.
+and thus is guaranteed to be in machine independent format.
-The format of the Record Header (version 1.27 or later) is:
+The format of the Record Header (version 1.27 or later) is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int32_t FileIndex; /* File index supplied by File daemon */
int32_t Stream; /* Stream number supplied by File daemon */
uint32_t DataSize; /* size of following data record in bytes */
-\end{verbatim}
+\end{lstlisting}
\normalsize
This record is followed by the binary Stream data of DataSize bytes, followed
by another Record Header record and the binary stream data. For the definitive
-definition of this record, see record.h in the src/stored directory.
+definition of this record, see record.h in the src/stored directory.
-Additional notes on the above:
+Additional notes on the above:
\begin{description}
\index{VolSessionId}
is a unique sequential number that is assigned by the Storage Daemon to a
particular Job. This number is sequential since the start of execution of the
-daemon.
+daemon.
\item [The {\bf VolSessionTime} ]
\index{VolSessionTime}
is the time/date that the current execution of the Storage Daemon started. It
assures that the combination of VolSessionId and VolSessionTime is unique for
every jobs written to the tape, even if there was a machine crash between two
-writes.
+writes.
\item [The {\bf FileIndex} ]
\index{FileIndex}
index to be greater than zero and sequential. Note, however, that the File
daemon may send multiple Streams for the same FileIndex. In addition, the
Storage daemon uses negative FileIndices to hold the Begin Session Label, the
-End Session Label, and the End of Volume Label.
+End Session Label, and the End of Volume Label.
\item [The {\bf Stream} ]
\index{Stream}
is defined by the File daemon and is used to identify separate parts of the
data saved for each file (Unix attributes, Win32 attributes, file data,
-compressed file data, sparse file data, ...). The Storage Daemon has no idea
+compressed file data, sparse file data, ...). The Storage Daemon has no idea
of what a Stream is or what it contains except that the Stream is required to
be a positive integer. Negative Stream numbers are used internally by the
Storage daemon to indicate that the record is a continuation of the previous
-record (the previous record would not entirely fit in the block).
+record (the previous record would not entirely fit in the block).
-For Start Session and End Session Labels (where the FileIndex is negative),
+For Start Session and End Session Labels (where the FileIndex is negative),
the Storage daemon uses the Stream field to contain the JobId. The current
-stream definitions are:
+stream definitions are:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
#define STREAM_UNIX_ATTRIBUTES 1 /* Generic Unix attributes */
#define STREAM_FILE_DATA 2 /* Standard uncompressed data */
#define STREAM_MD5_SIGNATURE 3 /* MD5 signature for the file */
#define STREAM_HFSPLUS_ATTRIBUTES 14 /* Mac OS extra attributes */
#define STREAM_UNIX_ATTRIBUTES_ACCESS_ACL 15 /* Standard ACL attributes on UNIX */
#define STREAM_UNIX_ATTRIBUTES_DEFAULT_ACL 16 /* Default ACL attributes on UNIX */
-\end{verbatim}
+\end{lstlisting}
\normalsize
\item [The {\bf DataSize} ]
Record header. The Storage Daemon has no idea of the actual contents of the
binary data record. For standard Unix files, the data record typically
contains the file attributes or the file data. For a sparse file the first
-64 bits of the file data contains the storage address for the data block.
+64 bits of the file data contains the storage address for the data block.
\end{description}
The Record Header is never split across two blocks. If there is not enough
is always immediately preceded by a Record Header. When reading a record, if
Bacula finds only part of the data in the first record, it will automatically
read the next record and concatenate the data record to form a full data
-record.
+record.
\section{Version BB02 Block Header}
\index{Version BB02 Block Header}
\index{Header!Version BB02 Block}
-\addcontentsline{toc}{subsection}{Version BB02 Block Header}
Each session or Job has its own private block. As a consequence, the SessionId
and SessionTime are written once in each Block Header and not in the Record
-Header. So, the second and current version of the Block Header BB02 is:
+Header. So, the second and current version of the Block Header BB02 is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
uint32_t CheckSum; /* Block check sum */
uint32_t BlockSize; /* Block byte size including the header */
uint32_t BlockNumber; /* Block number */
char ID[4] = "BB02"; /* Identification and block level */
uint32_t VolSessionId; /* Applies to all records */
uint32_t VolSessionTime; /* contained in this block */
-\end{verbatim}
+\end{lstlisting}
\normalsize
As with the previous version, the BB02 Block header is a fixed length and
by a Record Header. If the tape is damaged, a Bacula utility will be able to
recover as much information as possible from the tape by recovering blocks
which are valid. The Block header is written using the Bacula serialization
-routines and thus is guaranteed to be in machine independent format.
+routines and thus is guaranteed to be in machine independent format.
\section{Version 2 Record Header}
\index{Version 2 Record Header}
\index{Header!Version 2 Record}
-\addcontentsline{toc}{subsection}{Version 2 Record Header}
Version 2 Record Header is written to the medium when using Version BB02 Block
Headers. The memory representation of the record is identical to the old BB01
record) so that when the block is written, it will have the current and unique
VolSessionId and VolSessionTime. On reading each record from the Block, the
VolSessionId and VolSessionTime is filled in the Record Header from the Block
-Header.
+Header.
\section{Volume Label Format}
\index{Volume Label Format}
\index{Format!Volume Label}
-\addcontentsline{toc}{subsection}{Volume Label Format}
Tape volume labels are created by the Storage daemon in response to a {\bf
label} command given to the Console program, or alternatively by the {\bf
btape} program. created. Each volume is labeled with the following information
using the Bacula serialization routines, which guarantee machine byte order
-independence.
+independence.
-For Bacula versions 1.27 and later, the Volume Label Format is:
+For Bacula versions 1.27 and later, the Volume Label Format is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char Id[32]; /* Bacula 1.0 Immortal\n */
uint32_t VerNum; /* Label version number */
/* VerNum 11 and greater Bacula 1.27 and later */
char LabelProg[32]; /* Label program name */
char ProgVersion[32]; /* Program version */
char ProgDate[32]; /* Program build date/time */
-\end{verbatim}
+\end{lstlisting}
\normalsize
Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label, ...)
is stored in the record FileIndex field of the Record Header and does not
-appear in the data part of the record.
+appear in the data part of the record.
\section{Session Label}
\index{Label!Session}
\index{Session Label}
-\addcontentsline{toc}{subsection}{Session Label}
The Session Label is written at the beginning and end of each session as well
as the last record on the physical medium. It has the following binary format:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char Id[32]; /* Bacula Immortal ... */
uint32_t VerNum; /* Label version number */
uint32_t JobId; /* Job id */
char FileSetName[128]; /* FileSet name */
uint32_t JobType;
uint32_t JobLevel;
-\end{verbatim}
+\end{lstlisting}
\normalsize
-In addition, the EOS label contains:
+In addition, the EOS label contains:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
/* The remainder are part of EOS label only */
uint32_t JobFiles;
uint64_t JobBytes;
uint32_t start_file;
uint32_t end_file;
uint32_t JobErrors;
-\end{verbatim}
+\end{lstlisting}
\normalsize
In addition, for VerNum greater than 10, the EOS label contains (in addition
-to the above):
+to the above):
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
uint32_t JobStatus /* Job termination code */
-\end{verbatim}
+\end{lstlisting}
\normalsize
: Note, the LabelType (Volume Label, Volume PreLabel, Session Start Label,
...) is stored in the record FileIndex field and does not appear in the data
part of the record. Also, the Stream field of the Record Header contains the
JobId. This permits quick filtering without actually reading all the session
-data in many cases.
+data in many cases.
\section{Overall Storage Format}
\index{Format!Overall Storage}
\index{Overall Storage Format}
-\addcontentsline{toc}{subsection}{Overall Storage Format}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Current Bacula Tape Format
6 June 2001
Version BB02 added 28 September 2002
| ProgDate (32 bytes) |
|-------------------------------------------------------|
:=======================================================:
-
+
Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n"
(old version also recognized:)
Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n"
MediaType: Media Type
ClientName: Name of File daemon or Client writing this session
Not used for EOM_LABEL.
-\end{verbatim}
+\end{lstlisting}
\normalsize
\section{Unix File Attributes}
\index{Unix File Attributes}
\index{Attributes!Unix File}
-\addcontentsline{toc}{subsection}{Unix File Attributes}
-The Unix File Attributes packet consists of the following:
+The Unix File Attributes packet consists of the following:
\lt{}File-Index\gt{} \lt{}Type\gt{}
\lt{}Filename\gt{}@\lt{}File-Attributes\gt{}@\lt{}Link\gt{}
-@\lt{}Extended-Attributes@\gt{} where
+@\lt{}Extended-Attributes@\gt{} where
\begin{description}
\item [@]
- represents a byte containing a binary zero.
+ represents a byte containing a binary zero.
\item [FileIndex]
\index{FileIndex}
- is the sequential file index starting from one assigned by the File daemon.
+ is the sequential file index starting from one assigned by the File daemon.
\item [Type]
\index{Type}
- is one of the following:
+ is one of the following:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
#define FT_LNKSAVED 1 /* hard link to file already saved */
#define FT_REGE 2 /* Regular file but empty */
#define FT_REG 3 /* Regular file */
#define FT_NOOPEN 15 /* Could not open directory */
#define FT_RAW 16 /* Raw block device */
#define FT_FIFO 17 /* Raw fifo device */
-\end{verbatim}
+\end{lstlisting}
\normalsize
\item [Filename]
\index{Filename}
- is the fully qualified filename.
+ is the fully qualified filename.
\item [File-Attributes]
\index{File-Attributes}
consists of the 13 fields of the stat() buffer in ASCII base64 format
separated by spaces. These fields and their meanings are shown below. This
stat() packet is in Unix format, and MUST be provided (constructed) for ALL
-systems.
+systems.
\item [Link]
\index{Link}
when the FT code is FT\_LNK or FT\_LNKSAVED, the item in question is a Unix
link, and this field contains the fully qualified link name. When the FT code
-is not FT\_LNK or FT\_LNKSAVED, this field is null.
+is not FT\_LNK or FT\_LNKSAVED, this field is null.
\item [Extended-Attributes]
\index{Extended-Attributes}
The exact format of this field is operating system dependent. It contains
additional or extended attributes of a system dependent nature. Currently,
-this field is used only on WIN32 systems where it contains a ASCII base64
+this field is used only on WIN32 systems where it contains a ASCII base64
representation of the WIN32\_FILE\_ATTRIBUTE\_DATA structure as defined by
Windows. The fields in the base64 representation of this structure are like
-the File-Attributes separated by spaces.
+the File-Attributes separated by spaces.
\end{description}
-The File-attributes consist of the following:
-
-\addcontentsline{lot}{table}{File Attributes}
-\begin{longtable}{|p{0.6in}|p{0.7in}|p{1in}|p{1in}|p{1.4in}|}
- \hline
-\multicolumn{1}{|c|}{\bf Field No. } & \multicolumn{1}{c|}{\bf Stat Name }
-& \multicolumn{1}{c|}{\bf Unix } & \multicolumn{1}{c|}{\bf Win98/NT } &
-\multicolumn{1}{c|}{\bf MacOS } \\
- \hline
-\multicolumn{1}{|c|}{1 } & {st\_dev } & {Device number of filesystem } &
-{Drive number } & {vRefNum } \\
- \hline
-\multicolumn{1}{|c|}{2 } & {st\_ino } & {Inode number } & {Always 0 } &
-{fileID/dirID } \\
- \hline
-\multicolumn{1}{|c|}{3 } & {st\_mode } & {File mode } & {File mode } &
-{777 dirs/apps; 666 docs; 444 locked docs } \\
- \hline
-\multicolumn{1}{|c|}{4 } & {st\_nlink } & {Number of links to the file } &
-{Number of link (only on NTFS) } & {Always 1 } \\
- \hline
-\multicolumn{1}{|c|}{5 } & {st\_uid } & {Owner ID } & {Always 0 } &
-{Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{6 } & {st\_gid } & {Group ID } & {Always 0 } &
-{Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{7 } & {st\_rdev } & {Device ID for special files } &
-{Drive No. } & {Always 0 } \\
- \hline
-\multicolumn{1}{|c|}{8 } & {st\_size } & {File size in bytes } & {File
-size in bytes } & {Data fork file size in bytes } \\
- \hline
-\multicolumn{1}{|c|}{9 } & {st\_blksize } & {Preferred block size } &
-{Always 0 } & {Preferred block size } \\
- \hline
-\multicolumn{1}{|c|}{10 } & {st\_blocks } & {Number of blocks allocated }
-& {Always 0 } & {Number of blocks allocated } \\
- \hline
-\multicolumn{1}{|c|}{11 } & {st\_atime } & {Last access time since epoch }
-& {Last access time since epoch } & {Last access time -66 years } \\
- \hline
-\multicolumn{1}{|c|}{12 } & {st\_mtime } & {Last modify time since epoch }
-& {Last modify time since epoch } & {Last access time -66 years } \\
- \hline
-\multicolumn{1}{|c|}{13 } & {st\_ctime } & {Inode change time since epoch
-} & {File create time since epoch } & {File create time -66 years}
-\\ \hline
-
-\end{longtable}
+The File-attributes consist of the following:
+
+%\addcontentsline{lot}{table}{File Attributes}
+\LTXtable{\linewidth}{table_fileattributes}
\section{Old Depreciated Tape Format}
\index{Old Depreciated Tape Format}
\index{Format!Old Depreciated Tape}
-\addcontentsline{toc}{subsection}{Old Depreciated Tape Format}
-The format of the Block Header (version 1.26 and earlier) is:
+The format of the Block Header (version 1.26 and earlier) is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
uint32_t CheckSum; /* Block check sum */
uint32_t BlockSize; /* Block byte size including the header */
uint32_t BlockNumber; /* Block number */
char ID[4] = "BB01"; /* Identification and block level */
-\end{verbatim}
+\end{lstlisting}
\normalsize
-The format of the Record Header (version 1.26 or earlier) is:
+The format of the Record Header (version 1.26 or earlier) is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
uint32_t VolSessionId; /* Unique ID for this session */
uint32_t VolSessionTime; /* Start time/date of session */
int32_t FileIndex; /* File index supplied by File daemon */
int32_t Stream; /* Stream number supplied by File daemon */
uint32_t DataSize; /* size of following data record in bytes */
-\end{verbatim}
+\end{lstlisting}
\normalsize
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Current Bacula Tape Format
6 June 2001
Version BB01 is the old deprecated format.
| ProgDate (32 bytes) |
|-------------------------------------------------------|
:=======================================================:
-
+
Id: 32 byte Bacula identifier "Bacula 1.0 immortal\n"
(old version also recognized:)
Id: 32 byte Bacula identifier "Bacula 0.9 mortal\n"
MediaType: Media Type
ClientName: Name of File daemon or Client writing this session
Not used for EOM_LABEL.
-\end{verbatim}
+\end{lstlisting}
\normalsize
possibilities for memory usage within {\bf Bacula}. Each will be described in
turn. They are:
-\begin{itemize}
+\begin{bsysitemize}
\item Statically allocated memory.
\item Dynamically allocated memory using malloc() and free().
\item Non-pooled memory.
\item Pooled memory.
- \end{itemize}
+ \end{bsysitemize}
\subsection{Statically Allocated Memory}
\index{Statically Allocated Memory}
Statically allocated memory is of the form:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char buffer[MAXSTRING];
-\end{verbatim}
+\end{lstlisting}
\normalsize
The use of this kind of memory is discouraged except when you are 100\% sure
As in:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
char *buf;
buf = malloc(256);
-\end{verbatim}
+\end{lstlisting}
\normalsize
This kind of memory can be released with:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
free(buf);
-\end{verbatim}
+\end{lstlisting}
\normalsize
It is recommended to use this kind of memory only when you are sure that you
Currently there are three memory pool types:
-\begin{itemize}
+\begin{bsysitemize}
\item PM\_NOPOOL -- non-pooled memory.
\item PM\_FNAME -- a filename pool.
\item PM\_MESSAGE -- a message buffer pool.
\item PM\_EMSG -- error message buffer pool.
- \end{itemize}
+ \end{bsysitemize}
\paragraph*{Getting Memory:}
To get memory, one uses:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void *get_pool_memory(pool);
-\end{verbatim}
+\end{lstlisting}
\normalsize
where {\bf pool} is one of the above mentioned pool names. The size of the
If you wish non-pooled memory, you may alternatively call:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void *get_memory(size_t size);
-\end{verbatim}
+\end{lstlisting}
\normalsize
The buffer length will be set to the size specified, and it will be assigned
To free memory acquired by either of the above two calls, use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void free_pool_memory(void *buffer);
-\end{verbatim}
+\end{lstlisting}
\normalsize
where buffer is the memory buffer returned when the memory was acquired. If
To determine the memory buffer size, use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
size_t sizeof_pool_memory(void *buffer);
-\end{verbatim}
+\end{lstlisting}
\normalsize
\paragraph*{Resizing Pool Memory:}
To resize pool memory, use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void *realloc_pool_memory(void *buffer);
-\end{verbatim}
+\end{lstlisting}
\normalsize
The buffer will be reallocated, and the contents of the original buffer will
memory buffer, use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void *check_pool_memory_size(void *buffer, size_t new-size);
-\end{verbatim}
+\end{lstlisting}
\normalsize
where {\bf new-size} is the buffer length needed. Note, if the buffer is
use:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void close_memory_pool();
-\end{verbatim}
+\end{lstlisting}
\normalsize
to free all unused memory retained in the Bacula memory pool. Note, any memory
the current memory pool statistics:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void print_memory_pool_stats();
-\end{verbatim}
+\end{lstlisting}
\normalsize
an example output is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Pool Maxsize Maxused Inuse
0 256 0 0
1 256 1 0
2 256 1 0
-\end{verbatim}
+\end{lstlisting}
\normalsize
following data packet. It returns:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Returns 0 on failure
Returns 1 on success
-\end{verbatim}
+\end{lstlisting}
\normalsize
In the case of a failure, an error message will be sent to the JCR contained
maxbytes is less than the record size sent. It returns:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
* Returns number of bytes read
* Returns 0 on end of file
* Returns -1 on hard end of file (i.e. network connection close)
* Returns -2 on error
-\end{verbatim}
+\end{lstlisting}
\normalsize
It should be noted that bnet\_recv() is a blocking read.
Thus the normal exchange between the server (S) and the client (C) are:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
S: wait for connection C: attempt connection
S: accept connection C: bnet_send() send request
S: bnet_recv() wait for request
S: act on request
S: bnet_send() send ack C: bnet_recv() wait for ack
-\end{verbatim}
+\end{lstlisting}
\normalsize
Thus a single command is sent, acted upon by the server, and then
enter a loop:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
while ((n=bnet_recv(bsock)) > 0) {
act on request
}
if (n < 0)
error
-\end{verbatim}
+\end{lstlisting}
\normalsize
The client will perform the following:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bnet_send(bsock);
bnet_send(bsock);
...
bnet_sig(bsock, BNET_EOD);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Thus the client will send multiple packets and signal to the server when all
must support POSIX Unix system calls. In addition, the following
requirements must be met:
-\begin{itemize}
+\begin{bsysitemize}
\item The principal developer (currently Kern) must have
non-root ssh access to a test machine running the platform.
\item The ideal requirements and minimum requirements
each Bacula release.
\item Ideally there are one or more developers who can respond to
and fix platform specific bugs.
-\end{itemize}
+\end{bsysitemize}
Ideal requirements for a test machine:
-\begin{itemize}
+\begin{bsysitemize}
\item The principal developer will have non-root ssh access to
the test machine at all times.
\item The pricipal developer will have a root password.
access for account "bacula" available.
\item The test machine will have sftp access.
\item The test machine will provide an smtp server.
-\end{itemize}
+\end{bsysitemize}
Minimum requirements for a test machine:
-\begin{itemize}
+\begin{bsysitemize}
\item The principal developer will have non-root ssh access to
the test machine when requested approximately once a month.
\item The pricipal developer not have root access.
access.
\item The test machine will have no sftp access.
\item The test machine will provide no email access.
-\end{itemize}
+\end{bsysitemize}
Bare bones test machine requirements:
-\begin{itemize}
+\begin{bsysitemize}
\item The test machine is available only to a designated
test person (your own machine).
\item The designated test person runs the regession
tests on demand.
\item The test machine has a tape drive available.
-\end{itemize}
+\end{bsysitemize}
notified of important events as noted above (details described below), but in
addition, this kind of plugin can accept a command line, which is a:
-\begin{verbatim}
+\begin{lstlisting}
Plugin = <command-string>
-\end{verbatim}
+\end{lstlisting}
directive that is placed in the Include section of a FileSet and is very
similar to the "File = " directive. When this Plugin directive is encountered
filesystem.
The important features of the command plugin entry points are:
-\begin{itemize}
+\begin{bsysitemize}
\item It is triggered by a "Plugin =" directive in the FileSet
\item Only a single plugin is called that is named on the "Plugin =" directive.
\item The full command string after the "Plugin =" is passed to the plugin
so that it can be told what to backup/restore.
-\end{itemize}
+\end{bsysitemize}
The third type of plugin is the Options Plugin, this kind of plugin is useful
to implement some custom filter on data. For example, you can implement a
important events as noted above (details described below), but in addition,
this kind of plugin can be placed in a Options group, which is a:
-\begin{verbatim}
+\begin{lstlisting}
FileSet {
Name = TestFS
Include {
File = /
}
}
-\end{verbatim}
+\end{lstlisting}
\section{Loading Plugins}
Once the File daemon loads the plugins, it asks the OS for the
The two entry points are:
-\begin{verbatim}
+\begin{lstlisting}
bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs)
and
bRC unloadPlugin()
-\end{verbatim}
+\end{lstlisting}
both these external entry points to the shared object are defined as C entry
points to avoid name mangling complications with C++. However, the shared
plugin interface. Within this header file, it includes the following
files:
-\begin{verbatim}
+\begin{lstlisting}
#include <sys/types.h>
#include "config.h"
#include "bc_types.h"
#include "lib/plugins.h"
#include <sys/stat.h>
-\end{verbatim}
+\end{lstlisting}
Aside from the {\bf bc\_types.h} and {\bf confit.h} headers, the plugin
definition uses the minimum code from Bacula. The bc\_types.h file is required
core code.
The return codes are defined as:
-\begin{verbatim}
+\begin{lstlisting}
typedef enum {
bRC_OK = 0, /* OK */
bRC_Stop = 1, /* Stop calling other plugins */
bRC_Core = 6, /* Let Bacula core handles this file */
bRC_Skip = 7, /* Skip the proposed file */
} bRC;
-\end{verbatim}
+\end{lstlisting}
At a future point in time, we hope to make the Bacula libbac.a into a
is passed to the plugin, and the last two arguments are information
about the plugin that the plugin must return to Bacula. The call is:
-\begin{verbatim}
+\begin{lstlisting}
bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs)
-\end{verbatim}
+\end{lstlisting}
and the arguments are:
byte size of the structure. The exact definition of the bInfo structure
as of this writing is:
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_baculaInfo {
uint32_t size;
uint32_t version;
} bInfo;
-\end{verbatim}
+\end{lstlisting}
\item [lbfuncs]
The bFuncs structure defines the callback entry points within Bacula
Bacula values, and send messages to the Job output or debug output.
The exact definition as of this writing is:
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_baculaFuncs {
uint32_t size;
uint32_t version;
size_t size);
void (*baculaFree)(bpContext *ctx, const char *file, int line, void *mem);
} bFuncs;
-\end{verbatim}
+\end{lstlisting}
We will discuss these entry points and how to use them a bit later when
describing the plugin code.
The exact definition as of this writing is:
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_pluginInfo {
uint32_t size;
uint32_t version;
const char *plugin_version;
const char *plugin_description;
} pInfo;
-\end{verbatim}
+\end{lstlisting}
Where:
\begin{description}
The exact definition as of this writing is:
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_pluginFuncs {
uint32_t size;
uint32_t version;
bRC (*setFileAttributes)(bpContext *ctx, struct restore_pkt *rp);
bRC (*checkFile)(bpContext *ctx, char *fname);
} pFuncs;
-\end{verbatim}
+\end{lstlisting}
The details of the entry points will be presented in
separate sections below.
\end{description}
Sample code for loadPlugin:
-\begin{verbatim}
+\begin{lstlisting}
bfuncs = lbfuncs; /* set Bacula funct pointers */
binfo = lbinfo;
*pinfo = &pluginInfo; /* return pointer to our info */
*pfuncs = &pluginFuncs; /* return pointer to our functions */
return bRC_OK;
-\end{verbatim}
+\end{lstlisting}
where pluginInfo and pluginFuncs are statically defined structures.
See bpipe-fd.c for details.
same throughout the Job thus you can keep your private Job specific
data in it ({\bf bContext}).
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_bpContext {
void *pContext; /* Plugin private context */
void *bContext; /* Bacula private context */
} bpContext;
-\end{verbatim}
+\end{lstlisting}
This context pointer will be passed as the first argument to all
the entry points that Bacula calls within the plugin. Needless
When the plugin is called, Bacula passes it the pointer to an event
structure (bEvent), which currently has one item, the eventType:
-\begin{verbatim}
+\begin{lstlisting}
typedef struct s_bEvent {
uint32_t eventType;
} bEvent;
-\end{verbatim}
+\end{lstlisting}
which defines what event has been triggered, and for each event,
Bacula will pass a pointer to a value associated with that event.
The current list of events are:
-\begin{verbatim}
+\begin{lstlisting}
typedef enum {
bEventJobStart = 1,
bEventJobEnd = 2,
} bEventType;
-\end{verbatim}
+\end{lstlisting}
Most of the above are self-explanatory.
with a pointer to the {\bf save\_pkt} structure and you must fill in
this packet with the "attribute" data of the file.
-\begin{verbatim}
+\begin{lstlisting}
struct save_pkt {
int32_t pkt_size; /* size of this packet */
char *fname; /* Full path and filename */
int32_t index; /* restore object index */
int32_t pkt_end; /* end packet sentinel */
};
-\end{verbatim}
+\end{lstlisting}
The second argument is a pointer to the {\bf save\_pkt} structure for the file
to be backed up. The plugin is responsible for filling in all the fields
Example of filling in the save\_pkt as used in bpipe-fd.c:
-\begin{verbatim}
+\begin{lstlisting}
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
time_t now = time(NULL);
sp->fname = p_ctx->fname;
sp->statp.st_blocks = 1;
p_ctx->backup = true;
return bRC_OK;
-\end{verbatim}
+\end{lstlisting}
Note: the filename to be created has already been created from the
command string previously sent to the plugin and is in the plugin
This call must return one of the following values:
-\begin{verbatim}
+\begin{lstlisting}
enum {
CF_SKIP = 1, /* skip file (not newer or something) */
CF_ERROR, /* error creating file */
CF_CREATED, /* file created, no data to extract */
CF_CORE /* let bacula core handles the file creation */
};
-\end{verbatim}
+\end{lstlisting}
in the restore\_pkt value {\bf create\_status}. For a normal file,
unless there is an error, you must return {\bf CF\_EXTRACT}.
-\begin{verbatim}
+\begin{lstlisting}
struct restore_pkt {
int32_t pkt_size; /* size of this packet */
int32_t pkt_end; /* end packet sentinel */
};
-\end{verbatim}
+\end{lstlisting}
Typical code to create a regular file would be the following:
-\begin{verbatim}
+\begin{lstlisting}
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
time_t now = time(NULL);
sp->fname = p_ctx->fname; /* set the full path/filename I want to create */
sp->statp.st_blksize = 4096;
sp->statp.st_blocks = 1;
return bRC_OK;
-\end{verbatim}
+\end{lstlisting}
This will create a virtual file. If you are creating a file that actually
exists, you will most likely want to fill the statp packet using the
Creating a directory is similar, but requires a few extra steps:
-\begin{verbatim}
+\begin{lstlisting}
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
time_t now = time(NULL);
sp->fname = p_ctx->fname; /* set the full path I want to create */
sp->statp.st_blksize = 4096;
sp->statp.st_blocks = 1;
return bRC_OK;
-\end{verbatim}
+\end{lstlisting}
The link field must be set with the full cononical path name, which always
ends with a forward slash. If you do not terminate it with a forward slash,
the return values are also placed in the packet. In addition for Win32 systems
the plugin must return two additional values (described below).
-\begin{verbatim}
+\begin{lstlisting}
enum {
IO_OPEN = 1,
IO_READ = 2,
bool win32; /* Win32 GetLastError returned */
int32_t pkt_end; /* end packet sentinel */
};
-\end{verbatim}
+\end{lstlisting}
The particular Unix function being simulated is indicated by the {\bf func},
which will have one of the IO\_OPEN, IO\_READ, ... codes listed above. The
\subsection{bRC getBaculaValue(bpContext *ctx, bVariable var, void *value)}
Calling this entrypoint, you can obtain specific values that are available
in Bacula. The following Variables can be referenced:
-\begin{itemize}
+\begin{bsysitemize}
\item bVarJobId returns an int
\item bVarFDName returns a char *
\item bVarLevel returns an int
\item bVarJobStatus returns an int
\item bVarSinceTime returns an int (time\_t)
\item bVarAccurate returns an int
-\end{itemize}
+\end{bsysitemize}
\subsection{bRC setBaculaValue(bpContext *ctx, bVariable var, void *value)}
Calling this entrypoint allows you to set particular values in
one working plugin {\bf bpipe-fd.c} that can be found in the Bacula
{\bf src/plugins/fd} directory. Both are built with the following:
-\begin{verbatim}
+\begin{lstlisting}
cd <bacula-source>
./configure <your-options>
make
cd src/plugins/fd
make
make test
-\end{verbatim}
+\end{lstlisting}
After building Bacula and changing into the src/plugins/fd directory,
the {\bf make} command will build the {\bf bpipe-fd.so} plugin, which
In General, the following holds true:
-\begin{itemize}
+\begin{bsysitemize}
\item {\bf Bacula} has been compiled and run on Linux RedHat, FreeBSD, and
Solaris systems.
\item In addition, clients exist on Win32, and Irix
\item The source code has been written with portability in mind and is mostly
POSIX compatible. Thus porting to any POSIX compatible operating system
should be relatively easy.
-\end{itemize}
+\end{bsysitemize}
\section{Steps to Take for Porting}
\index{Porting!Steps to Take for}
\index{Steps to Take for Porting}
\addcontentsline{toc}{section}{Steps to Take for Porting}
-\begin{itemize}
+\begin{bsysitemize}
\item The first step is to ensure that you have version 2.13 or later of the
{\bf autoconf} tools loaded. You can skip this step, but making changes to
the configuration program will be difficult or impossible.
examine the output. It should look something like the following:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Configuration on Mon Oct 28 11:42:27 CET 2002:
Host: i686-pc-linux-gnu -- redhat 7.3
Bacula version: 1.27 (26 October 2002)
enable-smartalloc: yes
enable-gnome: no
gmp support: yes
-\end{verbatim}
+\end{lstlisting}
\normalsize
The details depend on your system. The first thing to check is that it
\item The items to in the case statement that corresponds to your system are
the following:
-\begin{itemize}
+\begin{bsysitemize}
\item DISTVER -- set to the version of your operating system. Typically some
form of {\bf uname} obtains it.
\item TAPEDRIVE -- the default tape drive. Not too important as the user can
\item PFILES -- set to add any files that you may define is your platform
subdirectory. These files are used for installation of automatic system
startup of Bacula daemons.
-\end{itemize}
+\end{bsysitemize}
\item To rebuild a new version of {\bf configure} from a changed {\bf
autoconf/configure.in} you enter {\bf make configure} in the top level Bacula
\item If you are having problems with Bacula's type definitions, you might
look at {\bf src/bc\_types.h} where all the types such as {\bf uint32\_t},
{\bf uint64\_t}, etc. that Bacula uses are defined.
-\end{itemize}
+\end{bsysitemize}
are working in your home directory in a non-root account):
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
git clone http://git.bacula.org/bacula bacula
-\end{verbatim}
+\end{lstlisting}
\normalsize
This will create the directory {\bf bacula} and populate it with
once. Thereafter to update to the latest code, you do:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
cd bacula
git pull
-\end{verbatim}
+\end{lstlisting}
\normalsize
If you want to test with SQLite and it is not installed on your system,
unpack it into {\bf depkgs}, then simply:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
cd depkgs
make
-\end{verbatim}
+\end{lstlisting}
\normalsize
To begin:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
cd bacula/regress
-\end{verbatim}
+\end{lstlisting}
\normalsize
We suggest that you start by:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
cp prototype.conf config
-\end{verbatim}
+\end{lstlisting}
\normalsize
Then you can edit the {\bf config} file directly.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
# Where to get the source to be tested
BACULA_SOURCE="${HOME}/bacula/bacula"
# non-networked machine
HOST="localhost"
-\end{verbatim}
+\end{lstlisting}
\normalsize
-\begin{itemize}
+\begin{bsysitemize}
\item {\bf BACULA\_SOURCE} should be the full path to the Bacula source code
that you wish to test. It will be loaded configured, compiled, and
installed with the "make setup" command, which needs to be done only
\item {\bf SQLITE\_DIR} should be the full path to the sqlite package, must
be build before running a Bacula regression, if you are using SQLite. This
variable is ignored if you are using MySQL or PostgreSQL. To use PostgreSQL,
- edit the Makefile and change (or add) WHICHDB?=``\verb{--{with-postgresql''. For
- MySQL use ``WHICHDB=''\verb{--{with-mysql``.
+ edit the Makefile and change (or add) WHICHDB?=``\lstinline+--with-postgresql+''. For
+ MySQL use ``WHICHDB=''\lstinline+--with-mysql+``.
The advantage of using SQLite is that it is totally independent of any
installation you may have running on your system, and there is no
\item {\bf scripts} is the bacula scripts location (where we could find
database creation script, autochanger handler, etc.)
-\end{itemize}
+\end{bsysitemize}
\subsection{Building the Test Bacula}
\index{Building the Test Bacula}
Once the above variables are set, you can build the setup by entering:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
make setup
-\end{verbatim}
+\end{lstlisting}
\normalsize
This will setup the regression testing and you should not need to
file which supplies the credentials by default to the MySQL commandline
utilities.
-\begin{verbatim}
+\begin{lstlisting}
[client]
host = localhost
user = regress
password = asecret
-\end{verbatim}
+\end{lstlisting}
A similar technique can be used PostgreSQL regression testing where the
database is configured to require a password. The ~/.pgpass file should
contain a line with the database connection properties.
-\begin{verbatim}
+\begin{lstlisting}
hostname:port:database:username:password
-\end{verbatim}
+\end{lstlisting}
\subsection{Running the Disk Only Regression}
\index{Regression!Running the Disk Only}
it, and run the tests is to use a helper script:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
./do_disk
-\end{verbatim}
+\end{lstlisting}
\normalsize
something similar to:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Test results
===== auto-label-test OK 12:31:33 =====
===== backup-bacula-test OK 12:32:32 =====
===== scratch-pool-test OK 13:28:01 =====
Total time = 0:57:55 or 3475 secs
-\end{verbatim}
+\end{lstlisting}
\normalsize
and the working tape tests are run with
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
make full_test
-\end{verbatim}
+\end{lstlisting}
\normalsize
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Test results
===== Bacula tape test OK =====
===== incremental-tape test OK =====
===== four-concurrent-jobs-tape OK =====
===== four-jobs-tape OK =====
-\end{verbatim}
+\end{lstlisting}
\normalsize
Each separate test is self contained in that it initializes to run Bacula from
Alternatively, you can do the ./do\_disk work by hand with:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
make setup
-\end{verbatim}
+\end{lstlisting}
\normalsize
The above will then copy the source code within
by entering:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
make test
-\end{verbatim}
+\end{lstlisting}
\normalsize
If you one or more tests fail, the line output will be similar to:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
!!!!! concurrent-jobs-test failed!!! !!!!!
-\end{verbatim}
+\end{lstlisting}
\normalsize
If you want to determine why the test failed, you will need to rerun the
script with the debug output turned on. You do so by defining the
environment variable {\bf REGRESS\_DEBUG} with commands such as:
-\begin{verbatim}
+\begin{lstlisting}
REGRESS_DEBUG=1
export REGRESS_DEBUG
-\end{verbatim}
+\end{lstlisting}
Then from the "regress" directory (all regression scripts assume that
you have "regress" as the current directory), enter:
-\begin{verbatim}
+\begin{lstlisting}
tests/test-name
-\end{verbatim}
+\end{lstlisting}
where test-name should be the name of a test script -- for example:
{\bf tests/backup-bacula-test}.
in your config file.
Example:
-\begin{verbatim}
+\begin{lstlisting}
bin=/opt/bacula/bin
scripts=/opt/bacula/scripts
-\end{verbatim}
+\end{lstlisting}
The \texttt{./scripts/prepare-other-loc} will tweak the regress scripts to use
your binary location. You will need to run it manually once before you run any
regression tests.
-\begin{verbatim}
+\begin{lstlisting}
$ ./scripts/prepare-other-loc
$ ./tests/backup-bacula-test
...
-\end{verbatim}
+\end{lstlisting}
All regression scripts must be run by hand or by calling the test scripts.
These are principally scripts that begin with {\bf all\_...} such as {\bf all\_disk\_tests},
If you wish to run a single test, you can simply:
-\begin{verbatim}
+\begin{lstlisting}
cd regress
tests/<name-of-test>
-\end{verbatim}
+\end{lstlisting}
or, if the source code has been updated, you would do:
-\begin{verbatim}
+\begin{lstlisting}
cd bacula
git pull
cd regress
make setup
tests/backup-to-null
-\end{verbatim}
+\end{lstlisting}
\section{Writing a Regression Test}
directory and entering:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
tests/<test-name>
-\end{verbatim}
+\end{lstlisting}
\normalsize
\subsection{Directory Structure}
The directory structure of the regression tests is:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
regress - Makefile, scripts to start tests
|------ scripts - Scripts and conf files
|-------tests - All test scripts are here
|------ tmp - Most temp files go here
|------ working - Bacula working directory
|------ weird-files - Weird files used in two of the tests.
-\end{verbatim}
+\end{lstlisting}
\normalsize
\subsection{Adding a New Test}
under the debugger) by first setting the environment variable
{\bf REGRESS\_WAIT} with commands such as:
-\begin{verbatim}
+\begin{lstlisting}
REGRESS_WAIT=1
export REGRESS_WAIT
-\end{verbatim}
+\end{lstlisting}
Then executing the script. When the script prints the following line:
-\begin{verbatim}
+\begin{lstlisting}
Start Bacula under debugger and enter anything when ready ...
-\end{verbatim}
+\end{lstlisting}
You start the Bacula component you want to run under the debugger in a
different shell window. For example:
-\begin{verbatim}
+\begin{lstlisting}
cd .../regress/bin
gdb bacula-sd
(possibly set breakpoints, ...)
run -s -f
-\end{verbatim}
+\end{lstlisting}
Then enter any character in the window with the above message.
An error message will appear saying that the daemon you are debugging
%%
%%
-\addcontentsline{lof}{figure}{Smart Memory Allocation with Orphaned Buffer
-Detection}
-\includegraphics{\idir smartall.eps}
+%\addcontentsline{lof}{figure}{Smart Memory Allocation with Orphaned Buffer
\chapter{Smart Memory Allocation}
\label{_ChapterStart4}
\addcontentsline{toc}{section}{Smart Memory Allocation With Orphaned Buffer
Detection}
+\bsysimageH{smartall}{Smart Memory Allocation with Orphaned Buffer Detection}{}
Few things are as embarrassing as a program that leaks, yet few errors are so
easy to commit or as difficult to track down in a large, complicated program
as failure to release allocated memory. SMARTALLOC replaces the standard C
sm\_dump()} on stderr as follows:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
Orphaned buffer: 120 bytes allocated at line 50 of gutshot.c
-\end{verbatim}
+\end{lstlisting}
\normalsize
\subsection{ Squelching a SMARTALLOC}
function call:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
sm_static(1);
-\end{verbatim}
+\end{lstlisting}
\normalsize
you declare that subsequent storage allocated by {\tt malloc()}, {\tt
allocating unmonitored data this way, be sure to add a call to:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
sm_static(0);
-\end{verbatim}
+\end{lstlisting}
\normalsize
to resume normal monitoring of buffer allocations. Buffers allocated while
should call {\tt actuallyfree()}, as in this code fragment:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
struct image *ibuf = getimage("ratpack.img");
display_on_screen(ibuf);
actuallyfree(ibuf);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Conversely, suppose we are to call a library function, ``{\tt putimage()}'',
a routine.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
struct image *obuf =
(struct image *) actuallymalloc(sizeof(struct image));
dump_screen_to_image(obuf);
putimage("scrdump.img", obuf); /* putimage() releases obuf */
-\end{verbatim}
+\end{lstlisting}
\normalsize
It's unlikely you'll need any of the ``actually'' calls except under very odd
redefinitions, please refer to smartall.h.)
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void *malloc(size_t size)
void *calloc(size_t nelem, size_t elsize)
void *realloc(void *ptr, size_t size)
void free(void *ptr)
void cfree(void *ptr)
-\end{verbatim}
+\end{lstlisting}
\normalsize
{\tt cfree()} is a historical artifact identical to {\tt free()}.
This chapter is intended to be a technical discussion of the Storage daemon
services and as such is not targeted at end users but rather at developers and
system administrators that want or need to know more of the working details of
-{\bf Bacula}.
+{\bf Bacula}.
This document is somewhat out of date.
device (for example, a tape drive, CD writer, tape changer or jukebox, etc.),
and may employ auxiliary storage resources (such as space on a hard disk file
system) to increase performance and/or optimize use of the permanent storage
-medium.
+medium.
Any number of storage daemons may be run on a given machine; each associated
with an individual storage device connected to it, and BACULA operations may
employ storage daemons on any number of hosts connected by a network, local or
remote. The ability to employ remote storage daemons (with appropriate
security measures) permits automatic off-site backup, possibly to publicly
-available backup repositories.
+available backup repositories.
\section{SD Development Outline}
\index{Outline!SD Development }
access to the storage server. Once a connection has been opened, the client
may make any number of Query requests, and/or initiate (if permitted), one or
more Append sessions (which transmit data to be stored by the storage daemon)
-and/or Read sessions (which retrieve data from the storage daemon).
+and/or Read sessions (which retrieve data from the storage daemon).
Most requests and replies sent across the connection are simple ASCII strings,
with status replies prefixed by a four digit status code for easier parsing.
pending}'', which indicates the client must send a ``Query notification''
request to retrieve one or more notifications posted to it. Once the
notifications have been returned, the client may then resubmit the request
-which resulted in the 3201 status.
+which resulted in the 3201 status.
The following descriptions omit common error codes, yet to be defined, which
can occur from most or many requests due to events like media errors,
restarting of the storage daemon, etc. These details will be filled in, along
with a comprehensive list of status codes along with which requests can
-produce them in an update to this document.
+produce them in an update to this document.
\subsection{SD Append Requests}
\index{Requests!SD Append }
session. If too many sessions are open, or a conflicting session (for
example, a read in progress when simultaneous read and append sessions are
not permitted), a status of ``{\tt 3502\ Volume\ busy}'' is returned. If no
-volume is mounted, or the volume mounted cannot be appended to, a status of
-``{\tt 3503\ Volume\ not\ mounted}'' is returned.
+volume is mounted, or the volume mounted cannot be appended to, a status of
+``{\tt 3503\ Volume\ not\ mounted}'' is returned.
\item [append data = \lt{}ticket-number\gt{} ]
\index{SPAN class }
IPaddress} and {\tt port} specify the IP address and port number of the data
channel. Error status codes are {\tt 3504\ Invalid\ ticket\ number} and {\tt
3505\ Session\ aborted}, the latter of which indicates the entire append
-session has failed due to a daemon or media error.
+session has failed due to a daemon or media error.
-Once the File daemon has established the connection to the data channel
+Once the File daemon has established the connection to the data channel
opened by the Storage daemon, it will transfer a header packet followed by
-any number of data packets. The header packet is of the form:
+any number of data packets. The header packet is of the form:
-{\tt \lt{}file-index\gt{} \lt{}stream-id\gt{} \lt{}info\gt{}}
+{\tt \lt{}file-index\gt{} \lt{}stream-id\gt{} \lt{}info\gt{}}
-The details are specified in the
+The details are specified in the
\ilink{Daemon Protocol}{_ChapterStart2} section of this
-document.
+document.
\item [*append abort session = \lt{}ticket-number\gt{} ]
\index{SPAN class }
The open append session with ticket {\it ticket-number} is aborted; any blocks
-not yet written to permanent media are discarded. Subsequent attempts to
+not yet written to permanent media are discarded. Subsequent attempts to
append data to the session will receive an error status of {\tt 3505\
-Session\ aborted}.
+Session\ aborted}.
\item [append end session = \lt{}ticket-number\gt{} ]
\index{SPAN class }
The open append session with ticket {\it ticket-number} is marked complete; no
further blocks may be appended. The storage daemon will give priority to
saving any buffered blocks from this session to permanent media as soon as
-possible.
+possible.
\item [append close session = \lt{}ticket-number\gt{} ]
\index{SPAN class }
list of volumes, from first to last, which contain blocks from the session,
along with the first and last file and block on each containing session data
and the volume session key identifying data from that session in lines with
-the following format:
+the following format:
{\tt {\tt Volume = }\lt{}Volume-id\gt{} \lt{}start-file\gt{}
\lt{}start-block\gt{} \lt{}end-file\gt{} \lt{}end-block\gt{}
data from that session on the volume, {\it end-file} and {\it end-block} are
the file and block with the last data from the session on the volume and {\it
volume-session-id} is the volume session ID for blocks from the session
-stored on that volume.
+stored on that volume.
\end{description}
\subsection{SD Read Requests}
session on the volume, {\it end-file} and {\it end-block} are the file and
block with the last data from the session on the volume and {\it
volume-session-id} is the volume session ID for blocks from the session
-stored on that volume.
+stored on that volume.
-If the session is successfully opened, a status of
+If the session is successfully opened, a status of
-{\tt {\tt 3100\ OK Ticket\ =\ }{\it number}``}
+{\tt {\tt 3100\ OK Ticket\ =\ }{\it number}``}
is returned with a reply used to identify subsequent messages in the session.
If too many sessions are open, or a conflicting session (for example, an
append in progress when simultaneous read and append sessions are not
permitted), a status of ''{\tt 3502\ Volume\ busy}`` is returned. If no
-volume is mounted, or the volume mounted cannot be appended to, a status of
+volume is mounted, or the volume mounted cannot be appended to, a status of
''{\tt 3503\ Volume\ not\ mounted}`` is returned. If no block with the given
volume session ID and the correct client ID number appears in the given first
file and block for the volume, a status of ''{\tt 3505\ Session\ not\
-found}`` is returned.
+found}`` is returned.
\item [Read data = \lt{}Ticket\gt{} \gt{} \lt{}Block\gt{} ]
\index{SPAN class }
blocks may be skipped. If a block number greater than the largest stored on
the volume is requested, a status of ''{\tt 3201\ End\ of\ volume}`` is
returned. If a block number greater than the largest in the file is
-requested, a status of ''{\tt 3401\ End\ of\ file}`` is returned.
+requested, a status of ''{\tt 3401\ End\ of\ file}`` is returned.
\item [Read close session = \lt{}Ticket\gt{} ]
\index{SPAN class }
The read session with Ticket number is closed. A read session may be closed
-at any time; you needn't read all its blocks before closing it.
+at any time; you needn't read all its blocks before closing it.
\end{description}
-{\it by
+{\it by
\elink{John Walker}{http://www.fourmilab.ch/}
-January 30th, MM }
+January 30th, MM }
\section{SD Data Structures}
\index{SD Data Structures}
all the job specific data as well as a pointer to the Device resource
(DEVRES structure) and the physical DEVICE structure.
-Now if a job is writing to two devices (it could be writing two separate
-streams to the same device), it must have two DCRs. Today, the code only
+Now if a job is writing to two devices (it could be writing two separate
+streams to the same device), it must have two DCRs. Today, the code only
permits one. This won't be hard to change, but it is new code.
Today three jobs (threads), two physical devices each job
writes to only one device:
-\begin{verbatim}
+\begin{lstlisting}
Job1 -> DCR1 -> DEVICE1
Job2 -> DCR2 -> DEVICE1
Job3 -> DCR3 -> DEVICE2
-\end{verbatim}
+\end{lstlisting}
To be implemented three jobs, three physical devices, but
job1 is writing simultaneously to three devices:
-\begin{verbatim}
+\begin{lstlisting}
Job1 -> DCR1 -> DEVICE1
-> DCR4 -> DEVICE2
-> DCR5 -> DEVICE3
Job = job control record
DCR = Job contorl data for a specific device
DEVICE = Device only control data
-\end{verbatim}
+\end{lstlisting}
--- /dev/null
+\begin{longtable}{|X|l|l|}
+\caption{Filename table} \\
+\endlastfoot
+ \hline
+ \multicolumn{3}{|l|}{\bf Filename } \\
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \hline
+ FilenameId & integer & Primary Key \\
+ \hline
+ Name & Blob & Filename \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Base Files Table Layout}\label{table:dbbasefiles} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ BaseId & integer & Primary Key \\
+ \hline
+ BaseJobId & integer & JobId of Base Job \\
+ \hline
+ JobId & integer & Reference to Job \\
+ \hline
+ FileId & integer & Reference to File \\
+ \hline
+ FileIndex & integer & File Index number \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Client Table Layout}\label{table:dbclient} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ ClientId & integer & Primary Key \\
+ \hline
+ Name & TinyBlob & File Services Name \\
+ \hline
+ UName & TinyBlob & uname -a from Client (not yet used) \\
+ \hline
+ AutoPrune & tinyint & yes|no for autopruning \\
+ \hline
+ FileRetention & bigint & 64 bit seconds to retain Files \\
+ \hline
+ JobRetention & bigint & 64 bit seconds to retain Job \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Counter Table Layout}\label{table:dbcounter} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ Counter & tinyblob & Counter name \\
+ \hline
+ MinValue & integer & Start/Min value for counter \\
+ \hline
+ MaxValue & integer & Max value for counter \\
+ \hline
+ CurrentValue & integer & Current counter value \\
+ \hline
+ WrapCounter & tinyblob & Name of another counter \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+\caption{File Table Layout}\label{table:dbfile} \\
+\endlastfoot
+\hline
+\multicolumn{1}{|c|}{\bf Column Name}
+& \multicolumn{1}{c|}{\bf Data Type}
+& \multicolumn{1}{c|}{\bf Remark} \\
+\hline
+FileId & integer & Primary Key \\
+\hline
+FileIndex & integer & The sequential file number in the Job \\
+\hline
+JobId & integer & Link to Job Record \\
+\hline
+PathId & integer & Link to Path Record \\
+\hline
+FilenameId & integer & Link to Filename Record \\
+\hline
+MarkId & integer & Used to mark files during Verify Jobs \\
+\hline
+LStat & tinyblob & File attributes in base64 encoding \\
+\hline
+MD5 & tinyblob & MD5/SHA1 signature in base64 encoding \\
+\hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Filename table}\label{table:dbfilename} \\
+ \endlastfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \hline
+ FilenameId & integer & Primary Key \\
+ \hline
+ Name & Blob & Filename \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{File Sets Table Layout}\label{table:dbfileset} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ FileSetId & integer & Primary Key \\
+ \hline
+ FileSet & tinyblob & FileSet name \\
+ \hline
+ MD5 & tinyblob & MD5 checksum of FileSet \\
+ \hline
+ CreateTime & datetime & Time and date Fileset created \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Job Table Layout}\label{table:dbjob} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endhead
+ \hline
+ JobId & integer & Primary Key \\
+ \hline
+ Job & tinyblob & Unique Job Name \\
+ \hline
+ Name & tinyblob & Job Name \\
+ \hline
+ PurgedFiles & tinyint & Used by Bacula for purging/retention periods \\
+ \hline
+ Type & binary(1) & Job Type: Backup, Copy, Clone, Archive, Migration \\
+ \hline
+ Level & binary(1) & Job Level \\
+ \hline
+ ClientId & integer & Client index \\
+ \hline
+ JobStatus & binary(1) & Job Termination Status \\
+ \hline
+ SchedTime & datetime & Time/date when Job scheduled \\
+ \hline
+ StartTime & datetime & Time/date when Job started \\
+ \hline
+ EndTime & datetime & Time/date when Job ended \\
+ \hline
+ RealEndTime & datetime & Time/date when original Job ended \\
+ \hline
+ JobTDate & bigint & Start day in Unix format but 64 bits; used for Retention period. \\
+ \hline
+ VolSessionId & integer & Unique Volume Session ID \\
+ \hline
+ VolSessionTime & integer & Unique Volume Session Time \\
+ \hline
+ JobFiles & integer & Number of files saved in Job \\
+ \hline
+ JobBytes & bigint & Number of bytes saved in Job \\
+ \hline
+ JobErrors & integer & Number of errors during Job \\
+ \hline
+ JobMissingFiles & integer & Number of files not saved (not yet used) \\
+ \hline
+ PoolId & integer & Link to Pool Record \\
+ \hline
+ FileSetId & integer & Link to FileSet Record \\
+ \hline
+ PrioJobId & integer & Link to prior Job Record when migrated \\
+ \hline
+ PurgedFiles & tiny integer & Set when all File records purged \\
+ \hline
+ HasBase & tiny integer & Set when Base Job run \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Job History Table Layout}\label{table:dbjobhistory} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+JobId & integer & Primary Key \\
+ \hline
+Job & tinyblob & Unique Job Name \\
+ \hline
+Name & tinyblob & Job Name \\
+ \hline
+Type & binary(1) & Job Type: Backup, Copy, Clone, Archive, Migration
+ \\
+ \hline
+Level & binary(1) & Job Level \\
+ \hline
+ClientId & integer & Client index \\
+ \hline
+JobStatus & binary(1) & Job Termination Status \\
+ \hline
+SchedTime & datetime & Time/date when Job scheduled \\
+ \hline
+StartTime & datetime & Time/date when Job started \\
+ \hline
+EndTime & datetime & Time/date when Job ended \\
+ \hline
+RealEndTime & datetime & Time/date when original Job ended \\
+ \hline
+JobTDate & bigint & Start day in Unix format but 64 bits; used for
+Retention period. \\
+ \hline
+VolSessionId & integer & Unique Volume Session ID \\
+ \hline
+VolSessionTime & integer & Unique Volume Session Time \\
+ \hline
+JobFiles & integer & Number of files saved in Job \\
+ \hline
+JobBytes & bigint & Number of bytes saved in Job \\
+ \hline
+JobErrors & integer & Number of errors during Job \\
+ \hline
+JobMissingFiles & integer & Number of files not saved (not yet used)
+\\
+ \hline
+PoolId & integer & Link to Pool Record \\
+ \hline
+FileSetId & integer & Link to FileSet Record \\
+ \hline
+PrioJobId & integer & Link to prior Job Record when migrated \\
+ \hline
+PurgedFiles & tiny integer & Set when all File records purged \\
+ \hline
+HasBase & tiny integer & Set when Base Job run
+\\ \hline
+
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{JobMedia Table Layout}\label{table:dbjobmedia} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ JobMediaId & integer & Primary Key \\
+ \hline
+ JobId & integer & Link to Job Record \\
+ \hline
+ MediaId & integer & Link to Media Record \\
+ \hline
+ FirstIndex & integer & The index (sequence number) of the first file written for this Job to the Media \\
+ \hline
+ LastIndex & integer & The index of the last file written for this Job to the Media \\
+ \hline
+ StartFile & integer & The physical media (tape) file number of the first block written for this Job \\
+ \hline
+ EndFile & integer & The physical media (tape) file number of the last block written for this Job \\
+ \hline
+ StartBlock & integer & The number of the first block written for this Job \\
+ \hline
+ EndBlock & integer & The number of the last block written for this Job \\
+ \hline
+ VolIndex & integer & The Volume use sequence number within the Job \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|c|X|}
+ \caption{Job Statuses}\label{table:dbjobstatuses} \\
+ \endlastfoot
+ \multicolumn{2}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Value}
+ & \multicolumn{1}{c|}{\bf Meaning} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Value}
+ & \multicolumn{1}{c|}{\bf Meaning} \\
+ \endhead
+ C & Created but not yet running \\
+ \hline
+ R & Running \\
+ \hline
+ B & Blocked \\
+ \hline
+ T & Terminated normally \\
+ \hline
+ W & Terminated normally with warnings \\
+ \hline
+ E & Terminated in Error \\
+ \hline
+ e & Non-fatal error \\
+ \hline
+ f & Fatal error \\
+ \hline
+ D & Verify Differences \\
+ \hline
+ A & Canceled by the user \\
+ \hline
+ I & Incomplete Job \\
+ \hline
+ F & Waiting on the File daemon \\
+ \hline
+ S & Waiting on the Storage daemon \\
+ \hline
+ m & Waiting for a new Volume to be mounted \\
+ \hline
+ M & Waiting for a Mount \\
+ \hline
+ s & Waiting for Storage resource \\
+ \hline
+ j & Waiting for Job resource \\
+ \hline
+ c & Waiting for Client resource \\
+ \hline
+ d & Wating for Maximum jobs \\
+ \hline
+ t & Waiting for Start Time \\
+ \hline
+ p & Waiting for higher priority job to finish \\
+ \hline
+ i & Doing batch insert file records \\
+ \hline
+ a & SD despooling attributes \\
+ \hline
+ l & Doing data despooling \\
+ \hline
+ L & Committing data (last despool) \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|c|X|}
+ \caption{Job Types}\label{table:dbjobtypes} \\
+ \endlastfoot
+ \multicolumn{2}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Value}
+ & \multicolumn{1}{c|}{\bf Meaning} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Value}
+ & \multicolumn{1}{c|}{\bf Meaning} \\
+ \endhead
+ \hline
+ B & Backup Job \\
+ \hline
+ M & Migrated Job \\
+ \hline
+ V & Verify Job \\
+ \hline
+ R & Restore Job \\
+ \hline
+ C & Console program (not in database) \\
+ \hline
+ I & Internal or system Job \\
+ \hline
+ D & Admin Job \\
+ \hline
+ A & Archive Job (not implemented) \\
+ \hline
+ C & Copy Job \\
+ \hline
+ g & Migration Job \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Location Table Layout}\label{table:dblocation} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ LocationId & integer & Primary Key \\
+ \hline
+ Location & tinyblob & Text defining location \\
+ \hline
+ Cost & integer & Relative cost of obtaining Volume \\
+ \hline
+ Enabled & tinyint & Whether or not Volume is enabled \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Location Log Table Layout}\label{table:dblocationlog} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ locLogIdId & integer & Primary Key \\
+ \hline
+ Date & datetime & Time/date log record created \\
+ \hline
+ MediaId & integer & Points to Media record \\
+ \hline
+ LocationId & integer & Points to Location record \\
+ \hline
+ NewVolStatus & integer & enum: Full, Archive, Append, Recycle, Purged Read-only, Disabled, Error, Busy, Used, Cleaning \\
+ \hline
+ Enabled & tinyint & Whether or not Volume is enabled \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Log Table Layout}\label{table:dblog} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ LogIdId & integer & Primary Key \\
+ \hline
+ JobId & integer & Points to Job record \\
+ \hline
+ Time & datetime & Time/date log record created \\
+ \hline
+ LogText & blob & Log text \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Media Table Layout}\label{table:dbmedia} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ MediaId & integer & Primary Key \\
+ \hline
+ VolumeName & tinyblob & Volume name \\
+ \hline
+ Slot & integer & Autochanger Slot number or zero \\
+ \hline
+ PoolId & integer & Link to Pool Record \\
+ \hline
+ MediaType & tinyblob & The MediaType supplied by the user \\
+ \hline
+ MediaTypeId & integer & The MediaTypeId \\
+ \hline
+ LabelType & tinyint & The type of label on the Volume \\
+ \hline
+ FirstWritten & datetime & Time / date when first written \\
+ \hline
+ LastWritten & datetime & Time/date when last written \\
+ \hline
+ LabelDate & datetime & Time/date when tape labeled \\
+ \hline
+ VolJobs & integer & Number of jobs written to this media \\
+ \hline
+ VolFiles & integer & Number of files written to this media \\
+ \hline
+ VolBlocks & integer & Number of blocks written to this media \\
+ \hline
+ VolMounts & integer & Number of time media mounted \\
+ \hline
+ VolBytes & bigint & Number of bytes saved in Job \\
+ \hline
+ VolParts & integer & The number of parts for a Volume (DVD) \\
+ \hline
+ VolErrors & integer & Number of errors during Job \\
+ \hline
+ VolWrites & integer & Number of writes to media \\
+ \hline
+ MaxVolBytes & bigint & Maximum bytes to put on this media \\
+ \hline
+ VolCapacityBytes & bigint & Capacity estimate for this volume \\
+ \hline
+ VolStatus & enum & Status of media: Full, Archive, Append, Recycle, Read-Only, Disabled, Error, Busy \\
+ \hline
+ Enabled & tinyint & Whether or not Volume can be written \\
+ \hline
+ Recycle & tinyint & Whether or not Bacula can recycle the Volumes: Yes, No \\
+ \hline
+ ActionOnPurge & tinyint & What happens to a Volume after purging \\
+ \hline
+ VolRetention & bigint & 64 bit seconds until expiration \\
+ \hline
+ VolUseDuration & bigint & 64 bit seconds volume can be used \\
+ \hline
+ MaxVolJobs & integer & maximum jobs to put on Volume \\
+ \hline
+ MaxVolFiles & integer & maximume EOF marks to put on Volume \\
+ \hline
+ InChanger & tinyint & Whether or not Volume in autochanger \\
+ \hline
+ StorageId & integer & Storage record ID \\
+ \hline
+ DeviceId & integer & Device record ID \\
+ \hline
+ MediaAddressing & integer & Method of addressing media \\
+ \hline
+ VolReadTime & bigint & Time Reading Volume \\
+ \hline
+ VolWriteTime & bigint & Time Writing Volume \\
+ \hline
+ EndFile & integer & End File number of Volume \\
+ \hline
+ EndBlock & integer & End block number of Volume \\
+ \hline
+ LocationId & integer & Location record ID \\
+ \hline
+ RecycleCount & integer & Number of times recycled \\
+ \hline
+ InitialWrite & datetime & When Volume first written \\
+ \hline
+ ScratchPoolId & integer & Id of Scratch Pool \\
+ \hline
+ RecyclePoolId & integer & Pool ID where to recycle Volume \\
+ \hline
+ Comment & blob & User text field \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Path Table Layout}\label{table:dbpath} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ PathId & integer & Primary Key \\
+ \hline
+ Path & Blob & Full Path \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Pool Table Layout}\label{table:dbpool} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+ PoolId & integer & Primary Key \\
+ \hline
+ Name & Tinyblob & Pool Name \\
+ \hline
+ NumVols & Integer & Number of Volumes in the Pool \\
+ \hline
+ MaxVols & Integer & Maximum Volumes in the Pool \\
+ \hline
+ UseOnce & tinyint & Use volume once \\
+ \hline
+ UseCatalog & tinyint & Set to use catalog \\
+ \hline
+ AcceptAnyVolume & tinyint & Accept any volume from Pool \\
+ \hline
+ VolRetention & bigint & 64 bit seconds to retain volume \\
+ \hline
+ VolUseDuration & bigint & 64 bit seconds volume can be used \\
+ \hline
+ MaxVolJobs & integer & max jobs on volume \\
+ \hline
+ MaxVolFiles & integer & max EOF marks to put on Volume \\
+ \hline
+ MaxVolBytes & bigint & max bytes to write on Volume \\
+ \hline
+ AutoPrune & tinyint & yes|no for autopruning \\
+ \hline
+ Recycle & tinyint & yes|no for allowing auto recycling of Volume \\
+ \hline
+ ActionOnPurge & tinyint & Default Volume ActionOnPurge \\
+ \hline
+ PoolType & enum & Backup, Copy, Cloned, Archive, Migration \\
+ \hline
+ LabelType & tinyint & Type of label ANSI / Bacula \\
+ \hline
+ LabelFormat & Tinyblob & Label format \\
+ \hline
+ Enabled & tinyint & Whether or not Volume can be written \\
+ \hline
+ ScratchPoolId & integer & Id of Scratch Pool \\
+ \hline
+ RecyclePoolId & integer & Pool ID where to recycle Volume \\
+ \hline
+ NextPoolId & integer & Pool ID of next Pool \\
+ \hline
+ MigrationHighBytes & bigint & High water mark for migration \\
+ \hline
+ MigrationLowBytes & bigint & Low water mark for migration \\
+ \hline
+ MigrationTime & bigint & Time before migration \\
+ \hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Storage Table Layout}\label{table:dbstorage} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+StorageId & integer & Unique Id \\
+ \hline
+Name & tinyblob & Resource name of Storage device \\
+ \hline
+AutoChanger & tinyint & Set if it is an autochanger \\
+ \hline
+
+\end{longtable}
--- /dev/null
+\begin{longtable}{|l|l|X|}
+ \caption{Version Table Layout}\label{table:dbversion} \\
+ \endlastfoot
+ \multicolumn{3}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark} \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Column Name}
+ & \multicolumn{1}{l|}{\bf Data Type}
+ & \multicolumn{1}{l|}{\bf Remark } \\
+ \endhead
+ \hline
+VersionId & integer & Primary Key \\
+\hline
+\end{longtable}
--- /dev/null
+\begin{longtable}{|c|c|p{2cm}|p{2cm}|p{2cm}|}
+ \caption{File Attributes}\label{table:fileattributes} \\
+ \endlastfoot
+ \multicolumn{5}{c}{Cont. on next page} \\
+ \endfoot
+ \hline
+ \multicolumn{1}{|c|}{\bf Field No.}
+ & \multicolumn{1}{c|}{\bf Stat Name}
+ & \multicolumn{1}{c|}{\bf Unix}
+ & \multicolumn{1}{c|}{\bf Win98 / NT}
+ & \multicolumn{1}{c|}{\bf MacOS } \\
+ \endfirsthead
+ \hline
+ \multicolumn{1}{|c|}{\bf Field No.}
+ & \multicolumn{1}{c|}{\bf Stat Name}
+ & \multicolumn{1}{c|}{\bf Unix}
+ & \multicolumn{1}{c|}{\bf Win98 / NT}
+ & \multicolumn{1}{c|}{\bf MacOS} \\
+ \endhead
+ \hline
+ 1 & st\_dev & Device number of filesystem & Drive number & vRefNum \\
+ \hline
+ 2 & st\_ino & Inode number & Always 0 & fileID / dirID \\
+ \hline
+ 3 & st\_mode & File mode & File mode & 777 dirs / apps; 666 docs; 444 locked docs \\
+ \hline
+ 4 & st\_nlink & Number of links to the file & Number of link (only on NTFS) & Always 1 \\
+ \hline
+ 5 & st\_uid & Owner ID & Always 0 & Always 0 \\
+ \hline
+ 6 & st\_gid & Group ID & Always 0 &Always 0 \\
+ \hline
+ 7 & st\_rdev & Device ID for special files & Drive No. & Always 0 \\
+ \hline
+ 8 & st\_size & File size in bytes & File size in bytes & Data fork file size in bytes \\
+ \hline
+ 9 & st\_blksize & Preferred block size & Always 0 & Preferred block size \\
+ \hline
+ 10 & st\_blocks & Number of blocks allocated & Always 0 & Number of blocks allocated \\
+ \hline
+ 11 & st\_atime & Last access time since epoch & Last access time since epoch & Last access time -66 years \\
+ \hline
+ 12 & st\_mtime & Last modify time since epoch & Last modify time since epoch & Last access time -66 years \\
+ \hline
+ 13 & st\_ctime & Inode change time since epoch & File create time since epoch & File create time -66 years\\
+ \hline
+\end{longtable}
Adding support for the remaining daemons will be straight-forward.
Supported features of this patchset include:
-\begin{itemize}
+\begin{bsysitemize}
\item Client/Server TLS Requirement Negotiation
\item TLSv1 Connections with Server and Client Certificate
Validation
\item Forward Secrecy Support via Diffie-Hellman Ephemeral Keying
-\end{itemize}
+\end{bsysitemize}
This document will refer to both ``server'' and ``client'' contexts. These
terms refer to the accepting and initiating peer, respectively.
Additional configuration directives have been added to both the Console and
Director resources. These new directives are defined as follows:
-\begin{itemize}
+\begin{bsysitemize}
\item \underline{TLS Enable} \emph{(yes/no)}
Enable TLS support.
secrecy of communications. This directive is only valid within a server
context. To generate the parameter file, you may use openssl:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
openssl dhparam -out dh1024.pem -5 1024
-\end{verbatim}
+\end{lstlisting}
\normalsize
-\end{itemize}
+\end{bsysitemize}
\section{TLS API Implementation}
\index{TLS API Implimentation}
\addcontentsline{toc}{subsection}{Library Initialization and Cleanup}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int init_tls (void);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Performs TLS library initialization, including seeding of the PRNG. PRNG
seeding has not yet been implemented for win32.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int cleanup_tls (void);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Performs TLS library cleanup.
\addcontentsline{toc}{subsection}{Manipulating TLS Contexts}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
TLS_CONTEXT *new_tls_context (const char *ca_certfile,
const char *ca_certdir, const char *certfile,
const char *keyfile, const char *dhfile, bool verify_peer);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Allocates and initalizes a new opaque \emph{TLS\_CONTEXT} structure. The
client certificate validation is enabled.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void free_tls_context (TLS_CONTEXT *ctx);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Deallocated a previously allocated \emph{TLS\_CONTEXT} structure.
\addcontentsline{toc}{subsection}{Performing Post-Connection Verification}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bool tls_postconnect_verify_host (TLS_CONNECTION *tls, const char *host);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Performs post-connection verification of the peer-supplied x509
Returns \emph{true} if there is a match, \emph{false} otherwise.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bool tls_postconnect_verify_cn (TLS_CONNECTION *tls, alist *verify_list);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Performs post-connection verification of the peer-supplied x509
\addcontentsline{toc}{subsection}{Manipulating TLS Connections}
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
TLS_CONNECTION *new_tls_connection (TLS_CONTEXT *ctx, int fd);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Allocates and initializes a new \emph{TLS\_CONNECTION} structure with
context \emph{ctx} and file descriptor \emph{fd}.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void free_tls_connection (TLS_CONNECTION *tls);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Deallocates memory associated with the \emph{tls} structure.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bool tls_bsock_connect (BSOCK *bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Negotiates a a TLS client connection via \emph{bsock}. Returns \emph{true}
protocol error or an invalid certificate is presented
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bool tls_bsock_accept (BSOCK *bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Accepts a TLS client connection via \emph{bsock}. Returns \emph{true} if
error or an invalid certificate is presented.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
bool tls_bsock_shutdown (BSOCK *bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Issues a blocking TLS shutdown request to the peer via \emph{bsock}. This function may not wait for the peer's reply.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int tls_bsock_writen (BSOCK *bsock, char *ptr, int32_t nbytes);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Writes \emph{nbytes} from \emph{ptr} via the \emph{TLS\_CONNECTION}
bytes written will be returned.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int tls_bsock_readn (BSOCK *bsock, char *ptr, int32_t nbytes);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Reads \emph{nbytes} from the \emph{TLS\_CONNECTION} associated with
implemented using the new TLS API as follows:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int bnet_tls_client(TLS_CONTEXT *ctx, BSOCK * bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Negotiates a TLS session via \emph{bsock} using the settings from
\emph{ctx}. Returns 1 if successful, 0 otherwise.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int bnet_tls_server(TLS_CONTEXT *ctx, BSOCK * bsock, alist *verify_list);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Accepts a TLS client session via \emph{bsock} using the settings from
These functions are prototyped as follows:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int bnet_set_nonblocking (BSOCK *bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Enables non-blocking I/O on the socket associated with \emph{bsock}.
Returns a copy of the socket flags prior to modification.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
int bnet_set_blocking (BSOCK *bsock);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Enables blocking I/O on the socket associated with \emph{bsock}. Returns a
copy of the socket flags prior to modification.
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
void bnet_restore_blocking (BSOCK *bsock, int flags);
-\end{verbatim}
+\end{lstlisting}
\normalsize
Restores blocking or non-blocking IO setting on the socket associated with
client and server independently decide whether to continue:
\footnotesize
-\begin{verbatim}
+\begin{lstlisting}
if (!cram_md5_get_auth(dir, password, &tls_remote_need) ||
!cram_md5_auth(dir, password, tls_local_need)) {
[snip]
auth_success = false;
goto auth_done;
}
-\end{verbatim}
+\end{lstlisting}
\normalsize