From: Kern Sibbald Date: Wed, 5 Jul 2006 20:23:33 +0000 (+0000) Subject: Migration fixes X-Git-Tag: Release-2.0.0~773 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=695905cab98b74c56460868b2ffa8fdefbe9955b;p=bacula%2Fbacula Migration fixes git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@3124 91ce42f0-d328-0410-95d8-f526ca767f89 --- diff --git a/bacula/kernstodo b/bacula/kernstodo index 627e25cd94..858368f0d9 100644 --- a/bacula/kernstodo +++ b/bacula/kernstodo @@ -51,6 +51,33 @@ For 1.39: target slot. The catalog should be updated accordingly. .move transfer device=xxx fromslot=yyy toslot=zzz +- Given all the problems with FIFOs, I think the solution is to do something a + little different, though I will look at the code and see if there is not some + simple solution (i.e. some bug that was introduced). What might be a better + solution would be to use a FIFO as a sort of "key" to tell Bacula to read and + write data to a program rather than the FIFO. For example, suppose you + create a FIFO named: + + /home/kern/my-fifo + + Then, I could imagine if you backup and restore this file with a direct + reference as is currently done for fifos, instead, during backup Bacula will + execute: + + /home/kern/my-fifo.backup + + and read the data that my-fifo.backup writes to stdout. For restore, Bacula + will execute: + + /home/kern/my-fifo.restore + + and send the data backed up to stdout. These programs can either be an + executable or a shell script and they need only read/write to stdin/stdout. + + I think this would give a lot of flexibility to the user without making any + significant changes to Bacula. + + ==== SQL # get null file select FilenameId from Filename where Name=''; diff --git a/bacula/kes-1.39 b/bacula/kes-1.39 index 8df7d379a6..3934e27f4a 100644 --- a/bacula/kes-1.39 +++ b/bacula/kes-1.39 @@ -2,6 +2,8 @@ Kern Sibbald General: +05Jul06 +- Migration fixes 04Jul06 ======================= Warning ========================== All hash codes in the database are now kept in world diff --git a/bacula/src/cats/sql_create.c b/bacula/src/cats/sql_create.c index 7b1cc909d7..43d22aabd9 100644 --- a/bacula/src/cats/sql_create.c +++ b/bacula/src/cats/sql_create.c @@ -28,6 +28,8 @@ #include "bacula.h" #include "cats.h" +static const int dbglevel = 500; + #if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL /* ----------------------------------------------------------------------- @@ -673,8 +675,8 @@ int db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) { db_lock(mdb); - Dmsg1(300, "Fname=%s\n", ar->fname); - Dmsg0(500, "put_file_into_catalog\n"); + Dmsg1(dbglevel, "Fname=%s\n", ar->fname); + Dmsg0(dbglevel, "put_file_into_catalog\n"); /* * Make sure we have an acceptable attributes record. */ @@ -692,21 +694,21 @@ int db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) if (!db_create_filename_record(jcr, mdb, ar)) { goto bail_out; } - Dmsg1(500, "db_create_filename_record: %s\n", mdb->esc_name); + Dmsg1(dbglevel, "db_create_filename_record: %s\n", mdb->esc_name); if (!db_create_path_record(jcr, mdb, ar)) { goto bail_out; } - Dmsg1(500, "db_create_path_record: %s\n", mdb->esc_name); + Dmsg1(dbglevel, "db_create_path_record: %s\n", mdb->esc_name); /* Now create master File record */ if (!db_create_file_record(jcr, mdb, ar)) { goto bail_out; } - Dmsg0(500, "db_create_file_record OK\n"); + Dmsg0(dbglevel, "db_create_file_record OK\n"); - Dmsg3(300, "CreateAttributes Path=%s File=%s FilenameId=%d\n", mdb->path, mdb->fname, ar->FilenameId); + Dmsg3(dbglevel, "CreateAttributes Path=%s File=%s FilenameId=%d\n", mdb->path, mdb->fname, ar->FilenameId); db_unlock(mdb); return 1; diff --git a/bacula/src/cats/sql_list.c b/bacula/src/cats/sql_list.c index cbc9413738..1f6cbb6376 100644 --- a/bacula/src/cats/sql_list.c +++ b/bacula/src/cats/sql_list.c @@ -252,10 +252,11 @@ db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr, DB_LIST_HANDLER *sendit, if (jr->JobId == 0 && jr->Job[0] == 0) { Mmsg(mdb->cmd, "SELECT JobId,Job,Job.Name,PurgedFiles,Type,Level," - "Job.ClientId,Client.Name,JobStatus,SchedTime," - "StartTime,EndTime,JobTDate," + "Job.ClientId,Client.Name as ClientName,JobStatus,SchedTime," + "StartTime,EndTime,RealEndTime,JobTDate," "VolSessionId,VolSessionTime,JobFiles,JobErrors," - "JobMissingFiles,Job.PoolId,Pool.Name,Job.FileSetId,FileSet.FileSet " + "JobMissingFiles,Job.PoolId,Pool.Name as PooLname,PriorJobId," + "Job.FileSetId,FileSet.FileSet " "FROM Job,Client,Pool,FileSet WHERE " "Client.ClientId=Job.ClientId AND Pool.PoolId=Job.PoolId " "AND FileSet.FileSetId=Job.FileSetId ORDER BY StartTime%s", limit); @@ -263,9 +264,10 @@ db_list_job_records(JCR *jcr, B_DB *mdb, JOB_DBR *jr, DB_LIST_HANDLER *sendit, Mmsg(mdb->cmd, "SELECT JobId,Job,Job.Name,PurgedFiles,Type,Level," "Job.ClientId,Client.Name,JobStatus,SchedTime," - "StartTime,EndTime,JobTDate," + "StartTime,EndTime,RealEndTime,JobTDate," "VolSessionId,VolSessionTime,JobFiles,JobErrors," - "JobMissingFiles,Job.PoolId,Pool.Name,Job.FileSetId,FileSet.FileSet " + "JobMissingFiles,Job.PoolId,Pool.Name as PooLname,PriorJobId," + "Job.FileSetId,FileSet.FileSet " "FROM Job,Client,Pool,FileSet WHERE Job.JobId=%s AND " "Client.ClientId=Job.ClientId AND Pool.PoolId=Job.PoolId " "AND FileSet.FileSetId=Job.FileSetId", diff --git a/bacula/src/dird/migrate.c b/bacula/src/dird/migrate.c index f37138bf72..1172638831 100644 --- a/bacula/src/dird/migrate.c +++ b/bacula/src/dird/migrate.c @@ -98,7 +98,7 @@ bool do_migration(JCR *jcr) char ed1[100]; BSOCK *sd; JOB *job, *prev_job; - JCR *prev_jcr; + JCR *prev_jcr; /* newly migrated job */ if (jcr->previous_jr.JobId == 0) { set_jcr_job_status(jcr, JS_Terminated); @@ -106,7 +106,7 @@ bool do_migration(JCR *jcr) return true; /* no work */ } - Dmsg4(000, "Previous:: Name=%s JobId=%d Type=%c Level=%c\n", + Dmsg4(000, "Previous: Name=%s JobId=%d Type=%c Level=%c\n", jcr->previous_jr.Name, jcr->previous_jr.JobId, jcr->previous_jr.JobType, jcr->previous_jr.JobLevel); @@ -137,9 +137,17 @@ bool do_migration(JCR *jcr) if (!setup_job(prev_jcr)) { return false; } - /* Set output PoolId and FileSetId. */ + + /* Now reset the job record from the previous job */ + memcpy(&prev_jcr->jr, &jcr->previous_jr, sizeof(prev_jcr->jr)); + /* Update the jr to reflect the new values of PoolId, FileSetId, and JobId. */ prev_jcr->jr.PoolId = jcr->jr.PoolId; prev_jcr->jr.FileSetId = jcr->jr.FileSetId; + prev_jcr->jr.JobId = prev_jcr->JobId; + + Dmsg4(000, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n", + prev_jcr->jr.Name, prev_jcr->jr.JobId, + prev_jcr->jr.JobType, prev_jcr->jr.JobLevel); /* * Get the PoolId used with the original job. Then @@ -192,11 +200,18 @@ bool do_migration(JCR *jcr) set_jcr_job_status(jcr, JS_Running); set_jcr_job_status(prev_jcr, JS_Running); Dmsg2(000, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel); + + /* Update job start record for this migration job */ if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); return false; } + Dmsg4(000, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n", + prev_jcr->jr.Name, prev_jcr->jr.JobId, + prev_jcr->jr.JobType, prev_jcr->jr.JobLevel); + + /* Update job start record for migrated job */ if (!db_update_job_start_record(prev_jcr, prev_jcr->db, &prev_jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(prev_jcr->db)); return false; @@ -654,7 +669,8 @@ void migration_cleanup(JCR *jcr, int TermCode) update_job_end_record(prev_jcr); - + + /* Update final items to set them to the previous job's values */ Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," "JobTDate=%s WHERE JobId=%s", jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, @@ -662,6 +678,11 @@ void migration_cleanup(JCR *jcr, int TermCode) edit_uint64(prev_jcr->jr.JobId, ec2)); db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL); + /* Now marke the previous job as migrated */ + Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s", + (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1)); + db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL); + if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting job record for stats: %s"), db_strerror(jcr->db)); diff --git a/bacula/src/dird/sql_cmds.c b/bacula/src/dird/sql_cmds.c index de34bead73..e0f97192e5 100644 --- a/bacula/src/dird/sql_cmds.c +++ b/bacula/src/dird/sql_cmds.c @@ -35,7 +35,7 @@ const char *client_backups = " WHERE Client.Name='%s'" " AND FileSet='%s'" " AND Client.ClientId=Job.ClientId" - " AND JobStatus='T'" + " AND JobStatus='T' AND Type='B'" " AND JobMedia.JobId=Job.JobId AND JobMedia.MediaId=Media.MediaId" " AND Job.FileSetId=FileSet.FileSetId" " ORDER BY Job.StartTime"; @@ -114,9 +114,7 @@ const char *select_backup_del = "(DelCandidates.JobStatus!='T'))) OR " "(Job.JobTDate>%s " "AND Job.ClientId=%s " - "AND Job.Type='B' " - "AND Job.Level='F' " - "AND Job.JobStatus='T' " + "AND Job.Level='F' AND Job.JobStatus='T' AND Job.Type='B' " "AND Job.FileSetId=DelCandidates.FileSetId)"; /* Select Jobs from the DelCandidates table that have a @@ -129,9 +127,7 @@ const char *select_verify_del = "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR " "(Job.JobTDate>%s " "AND Job.ClientId=%s " - "AND Job.Type='V' " - "AND Job.Level='V' " - "AND Job.JobStatus='T' " + "AND Job.Type='V' AND Job.Level='V' AND Job.JobStatus='T' " "AND Job.FileSetId=DelCandidates.FileSetId)"; @@ -251,7 +247,7 @@ const char *uar_last_full = "FROM Client,Job,JobMedia,Media,FileSet WHERE Client.ClientId=%s " "AND Job.ClientId=%s " "AND Job.StartTime<'%s' " - "AND Level='F' AND JobStatus='T' " + "AND Level='F' AND JobStatus='T' AND Type='B' " "AND JobMedia.JobId=Job.JobId " "AND JobMedia.MediaId=Media.MediaId " "AND Job.FileSetId=FileSet.FileSetId " @@ -264,7 +260,7 @@ const char *uar_full = "Job.ClientId,Job.Level,Job.JobFiles,Job.JobBytes," "StartTime,VolumeName,JobMedia.StartFile,VolSessionId,VolSessionTime " "FROM temp1,Job,JobMedia,Media WHERE temp1.JobId=Job.JobId " - "AND Level='F' AND JobStatus='T' " + "AND Level='F' AND JobStatus='T' AND Type='B' " "AND JobMedia.JobId=Job.JobId " "AND JobMedia.MediaId=Media.MediaId"; @@ -278,7 +274,7 @@ const char *uar_dif = "AND Job.ClientId=%s " "AND JobMedia.JobId=Job.JobId " "AND JobMedia.MediaId=Media.MediaId " - "AND Job.Level='D' AND JobStatus='T' " + "AND Job.Level='D' AND JobStatus='T' AND Type='B' " "AND Job.FileSetId=FileSet.FileSetId " "AND FileSet.FileSet='%s' " "%s" @@ -294,7 +290,7 @@ const char *uar_inc = "AND Job.ClientId=%s " "AND JobMedia.JobId=Job.JobId " "AND JobMedia.MediaId=Media.MediaId " - "AND Job.Level='I' AND JobStatus='T' " + "AND Job.Level='I' AND JobStatus='T' AND Type='B' " "AND Job.FileSetId=FileSet.FileSetId " "AND FileSet.FileSet='%s' " "%s"; diff --git a/bacula/src/dird/ua_run.c b/bacula/src/dird/ua_run.c index 5aa292d101..334fe079c1 100644 --- a/bacula/src/dird/ua_run.c +++ b/bacula/src/dird/ua_run.c @@ -666,11 +666,9 @@ try_again: break; case JT_MIGRATE: jcr->JobLevel = L_FULL; /* default level */ - bsendmsg(ua, _("Run Restore job\n" + bsendmsg(ua, _("Run Migration job\n" "JobName: %s\n" "Bootstrap: %s\n" - "Where: %s\n" - "Replace: %s\n" "FileSet: %s\n" "Client: %s\n" "Storage: %s\n" @@ -680,8 +678,6 @@ try_again: "Priority: %d\n"), job->hdr.name, NPRT(jcr->RestoreBootstrap), - jcr->where?jcr->where:NPRT(job->RestoreWhere), - replace, jcr->fileset->hdr.name, jcr->client->hdr.name, jcr->store->hdr.name, diff --git a/bacula/src/jcr.h b/bacula/src/jcr.h index f5f45ad74e..5e2a69c828 100644 --- a/bacula/src/jcr.h +++ b/bacula/src/jcr.h @@ -43,6 +43,7 @@ /* Job Types. These are stored in the DB */ #define JT_BACKUP 'B' /* Backup Job */ +#define JT_MIGRATED_JOB 'M' /* A previous backup job that was migrated */ #define JT_VERIFY 'V' /* Verify Job */ #define JT_RESTORE 'R' /* Restore Job */ #define JT_CONSOLE 'c' /* console program */ @@ -50,7 +51,7 @@ #define JT_ADMIN 'D' /* admin job */ #define JT_ARCHIVE 'A' /* Archive Job */ #define JT_COPY 'C' /* Copy Job */ -#define JT_MIGRATE 'M' /* Migration Job */ +#define JT_MIGRATE 'g' /* Migration Job */ #define JT_SCAN 'S' /* Scan Job */ /* Job Status. Some of these are stored in the DB */ diff --git a/bacula/src/stored/mac.c b/bacula/src/stored/mac.c index 9199e627e3..eda7a3e925 100644 --- a/bacula/src/stored/mac.c +++ b/bacula/src/stored/mac.c @@ -75,7 +75,7 @@ bool do_mac(JCR *jcr) goto bail_out; } - Dmsg3(200, "Found %d volumes names for %s. First=%s\n", jcr->NumVolumes, + Dmsg3(000, "Found %d volumes names for %s. First=%s\n", jcr->NumVolumes, jcr->VolList->VolumeName, Type); /* Ready devices for reading and writing */ @@ -161,10 +161,12 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) char buf1[100], buf2[100]; int32_t stream; + /* We want to write SOS_LABEL and EOS_LABEL */ switch (rec->FileIndex) { case PRE_LABEL: case VOL_LABEL: case EOT_LABEL: + case EOM_LABEL: return true; /* don't write vol labels */ } rec->VolSessionId = jcr->VolSessionId; @@ -189,6 +191,8 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) jcr->JobBytes += rec->data_len; /* increment bytes this job */ if (rec->FileIndex > 0) { jcr->JobFiles = rec->FileIndex; + } else { + return ok; /* don't send LABELs to Dir */ } Dmsg4(850, "write_record FI=%s SessId=%d Strm=%s len=%d\n", FI_to_ascii(buf1, rec->FileIndex), rec->VolSessionId, diff --git a/bacula/src/version.h b/bacula/src/version.h index 73cf373f5c..63b6e1f781 100644 --- a/bacula/src/version.h +++ b/bacula/src/version.h @@ -4,8 +4,8 @@ #undef VERSION #define VERSION "1.39.15" -#define BDATE "4 July 2006" -#define LSMDATE "04Jul06" +#define BDATE "5 July 2006" +#define LSMDATE "05Jul06" /* Debug flags */ #undef DEBUG