From: Kern Sibbald Date: Sat, 19 Aug 2006 10:10:21 +0000 (+0000) Subject: kes Apply Maritn's fix to src/win32/Makefile. X-Git-Tag: Release-2.0.0~604 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=dc73b35ec79d9852ebb77e08ecd331eb8b777fa9;p=bacula%2Fbacula kes Apply Maritn's fix to src/win32/Makefile. kes Apply Martin's fix to configure.in for pthreads on FreeBSD 4.x kes Implement pruning for Migration and migrated jobs. kes Implement PoolOccupancy migration. kes Implement PoolTime migration. git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@3304 91ce42f0-d328-0410-95d8-f526ca767f89 --- diff --git a/bacula/ReleaseNotes b/bacula/ReleaseNotes index c2e7b20203..6aa779a1a2 100644 --- a/bacula/ReleaseNotes +++ b/bacula/ReleaseNotes @@ -172,26 +172,18 @@ A Lot of New features for 1.39.18: - Fixes to reloading the Dir conf file from Eric Bollengier and Christopher Hull. - Modify LICENSE to correct some problems pointed out by Debian. - Implement patch submitted by cesarb in bug #606 to implement O_NOATIME - support. +- Apply a patch submitted by cesarb in bug #606 to implement O_NOATIME support. O_NOATIME is a open() flag which makes it possible to read a file without updating the inode atime (and also without the inode ctime update which happens if you try to set the atime back to its previous value). It also prevents a race condition when two programs are reading the same file, but only one does not want to change the atime. It's most useful for backup programs and file integrity checkers (and bacula can fit on both - categories). - - Recent versions of the Linux kernel and glibc have support for it (the - glibc support being mostly copying the O_NOATIME definition to - bits/fcntl.h). If there's no support for it on the kernel, trying to use - it does nothing (since the kernel ignores unknown flags). - - If the kernel has support for it, trying to use it either works, fails - silently (mostly in remote filesystems), or returns errno=EPERM (if you - are not either the owner of the file or root). A simple way to prevent the - failure is to open the file without the flag and set it later with - fcntl(F_SETFL), ignoring any EPERM errors. + categories). + You enable it in the Bacula FileSet Options resource by setting: + noatime = yes + The effect of this option is similar to the keepatime option except + it is more efficient and avoids modifying ctime. - Implement a pile of new man pages contributed by Jose Tallon. - Implement a number of user supplied patches for DVD writing. - Modify the database format for handling Migration jobs: diff --git a/bacula/autoconf/configure.in b/bacula/autoconf/configure.in index bd853a3fa0..b61da9e8b9 100644 --- a/bacula/autoconf/configure.in +++ b/bacula/autoconf/configure.in @@ -613,26 +613,26 @@ AC_ARG_WITH(python, if test -f $python_root/include/python2.2/Python.h; then PYTHON_INCDIR=-I$python_root/include/python2.2 if test -d $python_root/lib64/python2.2/config; then - PYTHON_LIBS="-L$python_root/lib64/python2.2/config -lpython2.2" - else - PYTHON_LIBS="-L$python_root/lib/python2.2/config -lpython2.2" - fi + PYTHON_LIBS="-L$python_root/lib64/python2.2/config -lpython2.2" + else + PYTHON_LIBS="-L$python_root/lib/python2.2/config -lpython2.2" + fi break elif test -f $python_root/include/python2.3/Python.h; then PYTHON_INCDIR=-I$python_root/include/python2.3 if test -d $python_root/lib64/python2.3/config; then - PYTHON_LIBS="-L$python_root/lib64/python2.3/config -lpython2.3" - else - PYTHON_LIBS="-L$python_root/lib/python2.3/config -lpython2.3" - fi + PYTHON_LIBS="-L$python_root/lib64/python2.3/config -lpython2.3" + else + PYTHON_LIBS="-L$python_root/lib/python2.3/config -lpython2.3" + fi break elif test -f $python_root/include/python2.4/Python.h; then PYTHON_INCDIR=-I$python_root/include/python2.4 if test -d $python_root/lib64/python2.4/config; then - PYTHON_LIBS="-L$python_root/lib64/python2.4/config -lpython2.4" - else - PYTHON_LIBS="-L$python_root/lib/python2.4/config -lpython2.4" - fi + PYTHON_LIBS="-L$python_root/lib64/python2.4/config -lpython2.4" + else + PYTHON_LIBS="-L$python_root/lib/python2.4/config -lpython2.4" + fi break fi done @@ -640,10 +640,10 @@ AC_ARG_WITH(python, if test -f $prefix/include/Python.h; then PYTHON_INCDIR=-I$prefix/include if test -d $prefix/lib64/config; then - PYTHON_LIBS="-L$prefix/lib64/config -lpython" - else - PYTHON_LIBS="-L$prefix/lib/config -lpython" - fi + PYTHON_LIBS="-L$prefix/lib64/config -lpython" + else + PYTHON_LIBS="-L$prefix/lib/config -lpython" + fi else AC_MSG_RESULT(no) AC_MSG_ERROR(Unable to find Python.h in standard locations) @@ -1762,8 +1762,8 @@ freebsd) DISTVER=`uname -a |awk '{print $3}'` VER=`echo $DISTVER | cut -c 1` if test x$VER = x4 ; then - PTHREAD_LIB="${PTHREAD_LIBS}" - CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS}" + PTHREAD_LIB="${PTHREAD_LIBS:--pthread}" + CFLAGS="${CFLAGS} ${PTHREAD_CFLAGS:--pthread}" fi lld="qd" llu="qu" diff --git a/bacula/kernstodo b/bacula/kernstodo index c8774d6921..3ad63c9fff 100644 --- a/bacula/kernstodo +++ b/bacula/kernstodo @@ -1,5 +1,5 @@ Kern's ToDo List - 16 August 2006 + 19 August 2006 Major development: Project Developer @@ -46,7 +46,6 @@ For 1.39: daemon it was trying to connect to. - Try turning on disk seek code. - Possibly turn on St. Bernard code. -- Fix bscan to report the JobType when restoring a job. - Fix bextract to restore ACLs, or better yet, use common routines. - Do we migrate appendable Volumes? @@ -1648,4 +1647,5 @@ Block Position: 0 - Update dbcheck to include Log table - Update llist to include new fields. - Make unmount unload autochanger. Make mount load slot. +- Fix bscan to report the JobType when restoring a job. diff --git a/bacula/src/cats/sql_cmds.c b/bacula/src/cats/sql_cmds.c index 1525fffb75..e4e273025a 100644 --- a/bacula/src/cats/sql_cmds.c +++ b/bacula/src/cats/sql_cmds.c @@ -101,7 +101,8 @@ const char *insert_delcand = "AND JobTDate<%s " "AND ClientId=%s"; -/* Select Jobs from the DelCandidates table that have a +/* + * Select Jobs from the DelCandidates table that have a * more recent backup -- i.e. are not the only backup. * This is the list of Jobs to delete for a Backup Job. * At the same time, we select "orphanned" jobs @@ -114,7 +115,7 @@ const char *select_backup_del = "(DelCandidates.JobStatus!='T'))) OR " "(Job.JobTDate>%s " "AND Job.ClientId=%s " - "AND Job.Level='F' AND Job.JobStatus='T' AND Job.Type='B' " + "AND Job.Level='F' AND Job.JobStatus='T' AND Job.Type IN ('B','M') " "AND Job.FileSetId=DelCandidates.FileSetId)"; /* Select Jobs from the DelCandidates table that have a @@ -153,6 +154,17 @@ const char *select_admin_del = "AND Job.ClientId=%s " "AND Job.Type='D')"; +/* + * Select Jobs from the DelCandidates table. + * This is the list of Jobs to delete for an Migrate Job. + */ +const char *select_migrate_del = + "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles " + "FROM Job,DelCandidates " + "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR " + "(Job.JobTDate>%s " + "AND Job.ClientId=%s " + "AND Job.Type='g')"; /* ======= ua_restore.c */ const char *uar_count_files = diff --git a/bacula/src/cats/sql_cmds.h b/bacula/src/cats/sql_cmds.h index 40b8498370..cbe57d4438 100644 --- a/bacula/src/cats/sql_cmds.h +++ b/bacula/src/cats/sql_cmds.h @@ -22,6 +22,7 @@ extern const char CATS_IMP_EXP *select_backup_del; extern const char CATS_IMP_EXP *select_verify_del; extern const char CATS_IMP_EXP *select_restore_del; extern const char CATS_IMP_EXP *select_admin_del; +extern const char CATS_IMP_EXP *select_migrate_del; extern const char CATS_IMP_EXP *select_job; extern const char CATS_IMP_EXP *del_File; extern const char CATS_IMP_EXP *cnt_File; diff --git a/bacula/src/dird/migrate.c b/bacula/src/dird/migrate.c index d487415af7..38f51f31a8 100644 --- a/bacula/src/dird/migrate.c +++ b/bacula/src/dird/migrate.c @@ -49,6 +49,7 @@ static bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1, const char *type); static bool find_jobids_from_mediaid_list(JCR *jcr, idpkt *ids, const char *type); static void start_migration_job(JCR *jcr); +static int get_next_dbid_from_list(char **p, DBId_t *DBId); /* * Called here before the job is run to do the job @@ -56,7 +57,7 @@ static void start_migration_job(JCR *jcr); */ bool do_migration_init(JCR *jcr) { - /* If we find a job to migrate it is previous_jr.JobId */ + /* If we find a job or jobs to migrate it is previous_jr.JobId */ if (!get_job_to_migrate(jcr)) { return false; } @@ -409,26 +410,53 @@ const char *sql_jobids_from_mediaid = " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId=%s" " ORDER by Job.StartTime"; +/* Get tne number of bytes in the pool */ const char *sql_pool_bytes = "SELECT SUM(VolBytes) FROM Media,Pool WHERE" " VolStatus in ('Full','Used','Error','Append') AND Media.Enabled=1 AND" " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"; -const char *sql_vol_bytes = +/* Get tne number of bytes in the Jobs */ +const char *sql_job_bytes = + "SELECT SUM(JobBytes) FROM Job WHERE JobId IN (%s)"; + + +/* Get Media Ids in Pool */ +const char *sql_mediaids = "SELECT MediaId FROM Media,Pool WHERE" " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" - " Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND" - " VolBytes<%s ORDER BY LastWritten ASC LIMIT 1"; + " Media.PoolId=Pool.PoolId AND Pool.Name='%s' ORDER BY LastWritten ASC"; +/* Get JobIds in Pool longer than specified time */ +const char *sql_pool_time = + "SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE" + " Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND" + " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND" + " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId" + " AND Job.RealEndTime<='%s'"; -const char *sql_ujobid = - "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia " - " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND" - " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"; +/* +* const char *sql_ujobid = +* "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia " +* " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND" +* " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"; +*/ /* + * + * This is the central piece of code that finds a job or jobs + * actually JobIds to migrate. It first looks to see if one + * has been "manually" specified in jcr->MigrateJobId, and if + * so, it returns that JobId to be run. Otherwise, it + * examines the Selection Type to see what kind of migration + * we are doing (Volume, Job, Client, ...) and applies any + * Selection Pattern if appropriate to obtain a list of JobIds. + * Finally, it will loop over all the JobIds found, except the last + * one starting a new job with MigrationJobId set to that JobId, and + * finally, it returns the last JobId to the caller. + * * Returns: false on error * true if OK and jcr->previous_jr filled in */ @@ -437,14 +465,27 @@ static bool get_job_to_migrate(JCR *jcr) char ed1[30]; POOL_MEM query(PM_MESSAGE); JobId_t JobId; + DBId_t MediaId = 0; int stat; char *p; - idpkt ids; + idpkt ids, mid, jids; + db_int64_ctx ctx; + int64_t pool_bytes; + bool ok; + time_t ttime; + struct tm tm; + char dt[MAX_TIME_LENGTH]; ids.list = get_pool_memory(PM_MESSAGE); - Dmsg1(dbglevel, "list=%p\n", ids.list); ids.list[0] = 0; ids.count = 0; + mid.list = get_pool_memory(PM_MESSAGE); + mid.list[0] = 0; + mid.count = 0; + jids.list = get_pool_memory(PM_MESSAGE); + jids.list[0] = 0; + jids.count = 0; + /* * If MigrateJobId is set, then we migrate only that Job, @@ -496,11 +537,9 @@ static bool get_job_to_migrate(JCR *jcr) } break; -/***** Below not implemented yet *********/ case MT_POOL_OCCUPANCY: - db_int64_ctx ctx; - ctx.count = 0; + /* Find count of bytes in pool */ Mmsg(query, sql_pool_bytes, jcr->pool->hdr.name); if (!db_sql_query(jcr->db, query.c_str(), db_int64_handler, (void *)&ctx)) { Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); @@ -508,17 +547,98 @@ static bool get_job_to_migrate(JCR *jcr) } if (ctx.count == 0) { Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n")); + goto ok_out; + } + pool_bytes = ctx.value; + Dmsg2(dbglevel, "highbytes=%d pool=%d\n", (int)jcr->pool->MigrationHighBytes, + (int)pool_bytes); + if (pool_bytes < (int64_t)jcr->pool->MigrationHighBytes) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n")); + goto ok_out; + } + Dmsg0(dbglevel, "We should do Occupation migration.\n"); + + ids.count = 0; + /* Find a list of MediaIds that could be migrated */ + Mmsg(query, sql_mediaids, jcr->pool->hdr.name); +// Dmsg1(dbglevel, "query=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), dbid_handler, (void *)&ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); goto bail_out; } - if (ctx.value > (int64_t)jcr->pool->MigrationHighBytes) { - Dmsg2(dbglevel, "highbytes=%d pool=%d\n", (int)jcr->pool->MigrationHighBytes, - (int)ctx.value); + if (ids.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n")); + goto ok_out; } - goto bail_out; + Dmsg2(dbglevel, "Pool Occupancy ids=%d MediaIds=%s\n", ids.count, ids.list); + + /* + * Now loop over MediaIds getting more JobIds to migrate until + * we reduce the pool occupancy below the low water mark. + */ + p = ids.list; + for (int i=0; i < (int)ids.count; i++) { + stat = get_next_dbid_from_list(&p, &MediaId); + Dmsg2(dbglevel, "get_next_dbid stat=%d MediaId=%u\n", stat, MediaId); + if (stat < 0) { + Jmsg(jcr, M_FATAL, 0, _("Invalid MediaId found.\n")); + goto bail_out; + } else if (stat == 0) { + break; + } + mid.count = 1; + Mmsg(mid.list, "%s", edit_int64(MediaId, ed1)); + ok = find_jobids_from_mediaid_list(jcr, &mid, "Volumes"); + if (!ok) { + continue; + } + if (i != 0) { + pm_strcat(jids.list, ","); + } + pm_strcat(jids.list, mid.list); + jids.count += mid.count; + + /* Now get the count of bytes added */ + ctx.count = 0; + /* Find count of bytes from Jobs */ + Mmsg(query, sql_job_bytes, mid.list); + if (!db_sql_query(jcr->db, query.c_str(), db_int64_handler, (void *)&ctx)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + pool_bytes -= ctx.value; + Dmsg1(dbglevel, "Job bytes=%d\n", (int)ctx.value); + Dmsg2(dbglevel, "lowbytes=%d pool=%d\n", (int)jcr->pool->MigrationLowBytes, + (int)pool_bytes); + if (pool_bytes <= (int64_t)jcr->pool->MigrationLowBytes) { + Dmsg0(dbglevel, "We should be done.\n"); + break; + } + + } + Dmsg2(dbglevel, "Pool Occupancy ids=%d JobIds=%s\n", jids.count, jids.list); + break; + case MT_POOL_TIME: - Dmsg0(dbglevel, "Pool time not implemented\n"); + ttime = time(NULL) - (time_t)jcr->pool->MigrationTime; + (void)localtime_r(&ttime, &tm); + strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); + + ids.count = 0; + Mmsg(query, sql_pool_time, jcr->pool->hdr.name, dt); +// Dmsg1(000, "query=%s\n", query.c_str()); + if (!db_sql_query(jcr->db, query.c_str(), dbid_handler, (void *)&ids)) { + Jmsg(jcr, M_FATAL, 0, _("SQL failed. ERR=%s\n"), db_strerror(jcr->db)); + goto bail_out; + } + if (ids.count == 0) { + Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n")); + goto ok_out; + } + Dmsg2(dbglevel, "PoolTime ids=%d JobIds=%s\n", ids.count, ids.list); break; + default: Jmsg(jcr, M_FATAL, 0, _("Unknown Migration Selection Type.\n")); goto bail_out; @@ -572,10 +692,14 @@ static bool get_job_to_migrate(JCR *jcr) ok_out: free_pool_memory(ids.list); + free_pool_memory(mid.list); + free_pool_memory(jids.list); return true; bail_out: free_pool_memory(ids.list); + free_pool_memory(mid.list); + free_pool_memory(jids.list); return false; } @@ -920,3 +1044,37 @@ void migration_cleanup(JCR *jcr, int TermCode) } Dmsg0(100, "Leave migrate_cleanup()\n"); } + +/* + * Return next DBId from comma separated list + * + * Returns: + * 1 if next DBId returned + * 0 if no more DBIds are in list + * -1 there is an error + */ +static int get_next_dbid_from_list(char **p, DBId_t *DBId) +{ + char id[30]; + char *q = *p; + + id[0] = 0; + for (int i=0; i<(int)sizeof(id); i++) { + if (*q == 0) { + break; + } else if (*q == ',') { + q++; + break; + } + id[i] = *q++; + id[i+1] = 0; + } + if (id[0] == 0) { + return 0; + } else if (!is_a_number(id)) { + return -1; /* error */ + } + *p = q; + *DBId = str_to_int64(id); + return 1; +} diff --git a/bacula/src/dird/ua_prune.c b/bacula/src/dird/ua_prune.c index 39fc3b3989..dd0b8aa869 100644 --- a/bacula/src/dird/ua_prune.c +++ b/bacula/src/dird/ua_prune.c @@ -291,7 +291,7 @@ static bool create_temp_tables(UAContext *ua) /* - * Purging Jobs is a bit more complicated than purging Files + * Pruning Jobs is a bit more complicated than purging Files * because we delete Job records only if there is a more current * backup of the FileSet. Otherwise, we keep the Job record. * In other words, we never delete the only Job record that @@ -387,6 +387,9 @@ int prune_jobs(UAContext *ua, CLIENT *client, int JobType) case JT_ADMIN: Mmsg(query, select_admin_del, ed1, ed1, ed2); break; + case JT_MIGRATE: + Mmsg(query, select_migrate_del, ed1, ed1, ed2); + break; } if (!db_sql_query(ua->db, query, job_delete_handler, (void *)&del)) { bsendmsg(ua, "%s", db_strerror(ua->db)); diff --git a/bacula/src/version.h b/bacula/src/version.h index e929806785..5345295d52 100644 --- a/bacula/src/version.h +++ b/bacula/src/version.h @@ -4,8 +4,8 @@ #undef VERSION #define VERSION "1.39.20" -#define BDATE "16 August 2006" -#define LSMDATE "16Aug06" +#define BDATE "19 August 2006" +#define LSMDATE "19Aug06" #define BYEAR "2006" /* year for copyright messages in progs */ /* Debug flags */ diff --git a/bacula/src/win32/Makefile b/bacula/src/win32/Makefile index 9e8f133c45..03745bcc2b 100644 --- a/bacula/src/win32/Makefile +++ b/bacula/src/win32/Makefile @@ -18,10 +18,12 @@ clean: $(DIRS) $(ECHO_CMD)-rm -rf release $(DIRS): - @if $(MAKE) -C $@ $(MAKECMDGOALS); then \ + @if test -f Makefile.inc; then \ + if $(MAKE) -C $@ $(MAKECMDGOALS); then \ echo -e "\n===== Make of $@ succeeded =====\n\n" ; \ - else \ + else \ echo -e "\n!!!!! Make of $@ failed !!!!!\n\n" ; \ + fi ; \ fi Makefile.inc: Makefile.inc.in diff --git a/bacula/technotes-1.39 b/bacula/technotes-1.39 index 117310cf7f..001893a803 100644 --- a/bacula/technotes-1.39 +++ b/bacula/technotes-1.39 @@ -1,6 +1,12 @@ Technical notes on version 1.39 General: +19Aug06 +kes Apply Maritn's fix to src/win32/Makefile. +kes Apply Martin's fix to configure.in for pthreads on FreeBSD 4.x +kes Implement pruning for Migration and migrated jobs. +kes Implement PoolOccupancy migration. +kes Implement PoolTime migration. 16Aug06 kes If doing a mount, look for a slot, and if specified pass it to the SD so that it can load the autochanger.