+Version 1.36.1 released xxNov04:
+18Nov04
+- Increase authentication timeouts to 10 minutes.
+15Nov04
+- Fix cancel bug in FD on /lib/tls with zero pid in
+ pthread_kill.
+- Add date/time to all messages.
+- Make Qmsg use time message was queued rather than time
+ printed.
+- Indent job output two spaces.
+13Nov04
+- Fix web page links for new manual.
+- Grant postgresql permission to cdimages.
+- Correct crash after "list nextvol" "list media" bug 160
+12Nov04
+- Fix scripts/Makefile.in missing ;\ -- thanks Martin
+- A bit of work on btape to keep if from going into infinite
+ loops when things do not work well, and to print a bit
+ better info.
+11Nov04
+- JobDefs Storage resource completely overrode anything
+ specified in Job resource -- bug 159 -- fixed.
+- Fix syntax of renaming postgresql per Dan.
+- Add working_directory to be /tmp for wx_console.
+10Nov04
+- Allow both a JobId and a filename or list of files to be
+ specified on a restore command line.
+- Save old mtx-changer before installing new one: bug 156
+- Fix errors in CDROM file pointed out by Scott.
+09Nov04
+- Fix exepath when Bacula executed without path.
+- Move test for socket libraries for Solaris in configure.in
+ before tcp wrapper tests so that linking works.
+- Add "make copy-static-fd" to makefile in CDROM file to
+ copy existing static fd to CDROM rather than building it.
+08Nov04
+- More doc updates
+- Fix ps command for OpenBSD
+- Rework the creation of indexes for SQL -- fall back
+ to the old code, but document what can be added.
+06Nov04
+- Add new Daemon message handler in default DIR conf.
+05Nov04
+- Fix Seg Fault with -D100 in bpipe.c.
+- Fix Seg Fault in run specifying a JobId.
+- Make mail from daemon with a Messages Resource use
+ the MailCommand with editing rather than the default
+ sendmail.
+- Replace Jmsg in dispatch_message() with Qmsg.
+- Make edit_job_codes handle NULL jcr.
+04Nov04
+- Add M_ALERT class and put tape alerts into it.
+- Fix Verify count vs found by not double counting files
+ that are split across files/Volumes.
+30Oct04
+- Fix count returned from write_bsr_file() to handle multiple
+ volumes. This fixes most cases of the Verify VolumeToCatalog.
+- Cleanup a bit the make clean for the rescue cdrom and remove
+ unneeded files from the CVS.
+28Oct04
+- Fixed acquiring a tape so that it does not block all acquires
+ when operator intervention is needed.
+- Platform build script updates from Scott
+- Doc updates
+- Add patch to force Linux LD_ASSUME_KERNEL to avoid using the
+ new /lib/tls. This is done in the startup scripts.
+- Modify mtx-changer so that it checks for ONLINE while
+ waiting.
+- Modify make_postgresql_tables.in so that EndBlock is stored
+ as a bigint. Prevents job failures when a disk volume is
+ larger than 2GB.
+24Oct04
+- Add grep ONLINE to wait_for_drive() in mtx-changer.in
+- More doc.
+- Rebuild Scott's new configure
+
Version 1.36.0 released 21Oct04:
Changes to 1.35.9:
19Oct04
Kern's ToDo List
- 26 October 2004
+ 18 November 2004
Major development:
Project Developer
Orphaned buffer: 24 bytes allocated at line 808 of rufus-dir job.c
Orphaned buffer: 40 bytes allocated at line 45 of rufus-dir alist.c
- Add dump of VolSessionId/Time and FileIndex with bls.
-- Add date/time to each Jmsg.
-1.37 Items:
-- Include within include
+1.37 Projects:
+#3 Migration (Move, Copy, Archive Jobs)
+#4 Embedded Python Scripting
+#5 Events that call a Python program
+#6 Select one from among Multiple Storage Devices for Job
+#7 Single Job Writing to Multiple Storage Devices
+
+
+1.37 Possibilities:
+- if 2 concurrent backups are attempted on the same tape
+ drive (autoloader) into different tape pools, one of them will exit
+ fatally instead of halting until the drive is idle
+- Add seconds to start and end times in the Job report output.
+- Add disk seeking on restore.
+- Document that ChangerDevice is used for Alert command.
+- Include within include does it work?
- Implement a Pool of type Cleaning?
- Implement VolReadTime and VolWriteTime in SD
- Modify Backing up Your Database to include a bootstrap file.
entirely upon the length.On a restore, the full and all incrementals
since it will beapplied in sequence to restore the file.
- Add a regression test for dbcheck.
-- Add disk seeking on restore. - Allow
- for optional cancelling of SD and FD in case DIR
+- Allow for optional cancelling of SD and FD in case DIR
gets a fatal error. Requested by Jesse Guardiani <jesse@wingnet.net>
- Add "limit=n" for "list jobs"
- Check new HAVE_WIN32 open bits.
- Add "Rerun failed levels = yes/no" to Job resource.
- Fix CDROM make script to permit picking up already installed
static FD.
+- Add date/time to each Jmsg.
sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus);
break;
}
- bstrftime(sdt, sizeof(sdt), jcr->jr.StartTime);
- bstrftime(edt, sizeof(edt), jcr->jr.EndTime);
+ bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
+ bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
if (RunTime <= 0) {
kbps = 0;
sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus);
break;
}
- bstrftime(sdt, sizeof(sdt), jcr->jr.StartTime);
- bstrftime(edt, sizeof(edt), jcr->jr.EndTime);
+ bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
+ bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
if (RunTime <= 0) {
kbps = 0;
sprintf(term_code, _("Inappropriate term code: %c\n"), TermCode);
break;
}
- bstrftime(sdt, sizeof(sdt), jcr->jr.StartTime);
- bstrftime(edt, sizeof(edt), jcr->jr.EndTime);
+ bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
+ bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
if (jcr->jr.EndTime - jcr->jr.StartTime > 0) {
kbps = (double)jcr->jr.JobBytes / (1000 * (jcr->jr.EndTime - jcr->jr.StartTime));
} else {
/* Select Jobs -- for counting */
Mmsg(query, select_job, edit_uint64(now - period, ed1), cr.ClientId);
- Dmsg1(050, "select sql=%s\n", query);
+ Dmsg1(500, "select sql=%s\n", query);
if (!db_sql_query(ua->db, query, file_count_handler, (void *)&del)) {
if (ua->verbose) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg0(050, "Count failed\n");
+ Dmsg0(500, "Count failed\n");
goto bail_out;
}
for (i=0; i < del.num_ids; i++) {
struct s_count_ctx cnt;
- Dmsg1(050, "Delete JobId=%d\n", del.JobId[i]);
+ Dmsg1(500, "Delete JobId=%d\n", del.JobId[i]);
Mmsg(query, cnt_File, del.JobId[i]);
cnt.count = 0;
db_sql_query(ua->db, query, count_handler, (void *)&cnt);
*/
Mmsg(query, upd_Purged, del.JobId[i]);
db_sql_query(ua->db, query, NULL, (void *)NULL);
- Dmsg1(050, "Del sql=%s\n", query);
+ Dmsg1(500, "Del sql=%s\n", query);
}
edit_uint64_with_commas(del.tot_ids, ed1);
edit_uint64_with_commas(del.num_ids, ed2);
for (i=0; create_deltabs[i]; i++) {
if (!db_sql_query(ua->db, create_deltabs[i], NULL, (void *)NULL)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
- Dmsg0(050, "create DelTables table failed\n");
+ Dmsg0(500, "create DelTables table failed\n");
return 0;
}
}
if (ua->verbose) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg0(050, "insert delcand failed\n");
+ Dmsg0(500, "insert delcand failed\n");
goto bail_out;
}
/* Count Files to be deleted */
pm_strcpy(query, cnt_DelCand);
- Dmsg1(100, "select sql=%s\n", query);
+ Dmsg1(500, "select sql=%s\n", query);
cnt.count = 0;
if (!db_sql_query(ua->db, query, count_handler, (void *)&cnt)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
- Dmsg0(050, "Count failed\n");
+ Dmsg0(500, "Count failed\n");
goto bail_out;
}
* Then delete the Job entry, and finally and JobMedia records.
*/
for (i=0; i < del.num_ids; i++) {
- Dmsg1(050, "Delete JobId=%d\n", del.JobId[i]);
+ Dmsg1(500, "Delete JobId=%d\n", del.JobId[i]);
if (!del.PurgedFiles[i]) {
Mmsg(query, del_File, del.JobId[i]);
if (!db_sql_query(ua->db, query, NULL, (void *)NULL)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg1(050, "Del sql=%s\n", query);
+ Dmsg1(500, "Del sql=%s\n", query);
}
Mmsg(query, del_Job, del.JobId[i]);
if (!db_sql_query(ua->db, query, NULL, (void *)NULL)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg1(050, "Del sql=%s\n", query);
+ Dmsg1(500, "Del sql=%s\n", query);
Mmsg(query, del_JobMedia, del.JobId[i]);
if (!db_sql_query(ua->db, query, NULL, (void *)NULL)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg1(050, "Del sql=%s\n", query);
+ Dmsg1(500, "Del sql=%s\n", query);
}
bsendmsg(ua, _("Pruned %d %s for client %s from catalog.\n"), del.num_ids,
del.num_ids==1?_("Job"):_("Jobs"), client->hdr.name);
Mmsg(query, cnt_JobMedia, mr->MediaId);
if (!db_sql_query(ua->db, query, count_handler, (void *)&cnt)) {
bsendmsg(ua, "%s", db_strerror(ua->db));
- Dmsg0(050, "Count failed\n");
+ Dmsg0(500, "Count failed\n");
goto bail_out;
}
if (ua->verbose) {
bsendmsg(ua, "%s", db_strerror(ua->db));
}
- Dmsg0(050, "Count failed\n");
+ Dmsg0(500, "Count failed\n");
goto bail_out;
}
period = mr->VolRetention;
now = (utime_t)time(NULL);
- Dmsg3(200, "Now=%d period=%d now-period=%d\n", (int)now, (int)period,
+ Dmsg3(500, "Now=%d period=%d now-period=%d\n", (int)now, (int)period,
(int)(now-period));
for (i=0; i < del.num_ids; i++) {
if (!db_get_job_record(ua->jcr, ua->db, &jr)) {
continue;
}
- Dmsg2(200, "Looking at %s JobTdate=%d\n", jr.Job, (int)jr.JobTDate);
+ Dmsg2(500, "Looking at %s JobTdate=%d\n", jr.Job, (int)jr.JobTDate);
if (jr.JobTDate >= (now - period)) {
continue;
}
- Dmsg2(200, "Delete JobId=%d Job=%s\n", del.JobId[i], jr.Job);
+ Dmsg2(500, "Delete JobId=%d Job=%s\n", del.JobId[i], jr.Job);
Mmsg(query, del_File, del.JobId[i]);
db_sql_query(ua->db, query, NULL, (void *)NULL);
Mmsg(query, del_Job, del.JobId[i]);
db_sql_query(ua->db, query, NULL, (void *)NULL);
Mmsg(query, del_JobMedia, del.JobId[i]);
db_sql_query(ua->db, query, NULL, (void *)NULL);
- Dmsg1(050, "Del sql=%s\n", query);
+ Dmsg1(500, "Del sql=%s\n", query);
del.num_del++;
}
if (del.JobId) {
/* If purged, mark it so */
if (del.num_ids == del.num_del) {
- Dmsg0(200, "Volume is purged.\n");
+ Dmsg0(500, "Volume is purged.\n");
stat = mark_media_purged(ua, mr);
}
_("Inappropriate term code: %d %c\n"), TermCode, TermCode);
break;
}
- bstrftime(sdt, sizeof(sdt), jcr->jr.StartTime);
- bstrftime(edt, sizeof(edt), jcr->jr.EndTime);
+ bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
+ bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
if (jcr->verify_job) {
Name = jcr->verify_job->hdr.name;
} else {
return dt;
}
+/* Formatted time for user display: dd-Mon-yyyy hh:mm:ss */
+char *bstrftimes(char *dt, int maxlen, utime_t tim)
+{
+ time_t ttime = (time_t)tim;
+ struct tm tm;
+
+ /* ***FIXME**** the format and localtime_r() should be user configurable */
+ localtime_r(&ttime, &tm);
+ strftime(dt, maxlen, "%d-%b-%Y %H:%M:%S", &tm);
+ return dt;
+}
+
+
/* Formatted time for user display: dd-Mon hh:mm */
char *bstrftime_ny(char *dt, int maxlen, utime_t tim)
{
char *bstrutime(char *dt, int maxlen, utime_t tim);
char *bstrftime(char *dt, int maxlen, utime_t tim);
+char *bstrftimes(char *dt, int maxlen, utime_t tim);
char *bstrftime_ny(char *dt, int maxlen, utime_t tim);
char *bstrftime_nc(char *dt, int maxlen, utime_t tim);
utime_t str_to_utime(char *str);
wid->wd->interval = wait;
register_watchdog(wid->wd);
- Dmsg3(50, "Start thread timer %p tip %p for %d secs.\n", wid, wid->tid, wait);
+ Dmsg3(50, "Start bsock timer %p tip %p for %d secs.\n", wid, wid->tid, wait);
return wid;
}
Dmsg0(400, "NicB-reworked watchdog thread entered\n");
while (!quit) {
- watchdog_t *p, *q;
+ watchdog_t *p;
/*
* We lock the jcr chain here because a good number of the
*/
lock_jcr_chain();
wd_lock();
+
+walk_list:
watchdog_time = time(NULL);
next_time = watchdog_time + watchdog_sleep_time;
-
foreach_dlist(p, wd_queue) {
if (p->next_fire <= watchdog_time) {
/* Run the callback */
/* Reschedule (or move to inactive list if it's a one-shot timer) */
if (p->one_shot) {
- /*
- * Note, when removing an item while walking the list
- * we must get the previous pointer (q) and set the
- * current pointer (p) to this previous pointer after
- * removing the current pointer, otherwise, we won't
- * walk the rest of the list.
- */
- q = (watchdog_t *)wd_queue->prev(p);
wd_queue->remove(p);
wd_inactive->append(p);
- p = q;
+ goto walk_list;
} else {
p->next_fire = watchdog_time + p->interval;
}
/* */
#undef VERSION
-#define VERSION "1.36.1"
-#define BDATE "20 November 2004"
-#define LSMDATE "20Nov04"
+#define VERSION "1.37.1"
+#define BDATE "21 November 2004"
+#define LSMDATE "21Nov04"
/* Debug flags */
#undef DEBUG
-#!/bin/sh
+#!/bin/bash
#
# Shell script to update MySQL tables from version 1.34 to 1.35.5
#
echo "Depending on the size of your database,"
echo "this script may take several minutes to run."
echo " "
-bindir=/home/kern/bacula/depkgs/sqlite
+bindir=/usr/bin
-if $bindir/mysql $* -f <<END-OF-DATA
+DB_VER="$bindir/mysql $* bacula -e 'select * from Version;'|tail -n 1 2>/dev/null"
+if [ -z "$DB_VER" ]; then
+ echo "Sorry, I can't seem to locate a bacula database."
+ exit 1
+fi
+
+if [ -n "$DB_VER" ]; then
+
+ if [ "$DB_VER" = "8" ]; then
+ echo "The Catalog is already at version 8. Nothing to do!"
+ exit 0
+ elif [ "$DB_VER" -ne "7" ]; then
+ echo "Sorry, this script is designed to update a version 7 database"
+ echo "and you have a version $DB_VER database."
+ exit 1
+ fi
+fi
+
+if $bindir/mysql $* -f << END_OF_DATA
USE bacula;
ALTER TABLE Media ADD COLUMN EndFile INTEGER UNSIGNED NOT NULL DEFAULT 0;
DELETE FROM Version;
INSERT INTO Version (VersionId) VALUES (8);
-END-OF-DATA
+END_OF_DATA
then
echo "Update of Bacula MySQL tables succeeded."
else
echo "Update of Bacula MySQL tables failed."
fi
+
exit 0
echo "Depending on the size of your database,"
echo "this script may take several minutes to run."
echo " "
-bindir=/home/kern/bacula/depkgs/sqlite
+bindir=/usr/bin
-if $bindir/psql $* -f - <<END-OF-DATA
+DB_VER="`echo -e '\\c bacula\nselect * from Version;' | $bindir/psql $* bacula -f - | tail -n 1 2>/dev/null`"
+if [ -z "$DB_VER" ]; then
+ echo "Sorry, I can't seem to locate a bacula database."
+ exit 1
+fi
+
+if [ -n "$DB_VER" ]; then
+
+ if [ "$DB_VER" = "8" ]; then
+ echo "The Catalog is already at version 8. Nothing to do!"
+ exit 0
+ elif [ "$DB_VER" -ne "7" ]; then
+ echo "Sorry, this script is designed to update a version 7 database"
+ echo "and you have a version $DB_VER database."
+ exit 1
+ fi
+fi
+
+
+if $bindir/psql $* -f - <<END_OF_DATA
\c bacula
ALTER TABLE media ADD COLUMN EndFile integer;
UPDATE media SET EndFile=0;
ALTER TABLE media ALTER COLUMN EndFile SET NOT NULL;
-ALTER TABLE media ADD COLUMN EndBlock integer;
+ALTER TABLE media ADD COLUMN EndBlock bigint;
UPDATE media SET EndBlock=0;
ALTER TABLE media ALTER COLUMN EndBlock SET NOT NULL;
vacuum;
-END-OF-DATA
+END_OF_DATA
then
echo "Update of Bacula PostgreSQL tables succeeded."
else
echo "this script may take several minutes to run."
echo " "
-bindir=/home/kern/bacula/depkgs/sqlite
-cd /home/kern/bacula/working
+bindir=/usr/bin
-$bindir/sqlite $* bacula.db <<END-OF-DATA
+# The location of your bacula working directory
+workdir=/var/lib/bacula
+
+
+cd $workdir
+
+if [ ! -r bacula.db -o ! -s bacula.db ];then
+ echo "Sorry, can't find a Bacula DB. Aborting."
+ exit 1
+fi
+
+DB_VER="`echo "select * from Version;" | $bindir/sqlite bacula.db | tail -1 2>/dev/null`"
+if [ -n "$DB_VER" ]; then
+
+ if [ "$DB_VER" = "8" ]; then
+ echo "The Catalog is already at version 8. Nothing to do!"
+ exit 0
+ elif [ "$DB_VER" -ne "7" ]; then
+ echo "Sorry, this script is designed to update a version 7 database"
+ echo "and you have a version $DB_VER database."
+ exit 1
+ fi
+
+else
+ echo "Sorry, I can't seem to locate a bacula database."
+ exit 1
+fi
+
+
+$bindir/sqlite $* bacula.db <<END_OF_DATA
BEGIN TRANSACTION;
CREATE TEMPORARY TABLE Media_backup (
DELETE FROM Version;
INSERT INTO Version (VersionId) VALUES (8);
-END-OF-DATA
+END_OF_DATA