From: Kern Sibbald Date: Thu, 28 Aug 2008 20:07:54 +0000 (+0000) Subject: kes Fix problem of Virtual backup not writing a sequential FileIndex. X-Git-Tag: Release-3.0.0~1052 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=24881d910937c448c4bf00d68643ffb68ecf643a;p=bacula%2Fbacula kes Fix problem of Virtual backup not writing a sequential FileIndex. kes Reset Virtual backup time/date to the value from the last backup. kes Ensure that storage name is passed to SD on read. git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@7521 91ce42f0-d328-0410-95d8-f526ca767f89 --- diff --git a/bacula/src/dird/msgchan.c b/bacula/src/dird/msgchan.c index 5ab6a08125..35da13ba74 100644 --- a/bacula/src/dird/msgchan.c +++ b/bacula/src/dird/msgchan.c @@ -218,8 +218,9 @@ bool start_storage_daemon_job(JCR *jcr, alist *rstore, alist *wstore) */ /* Do read side of storage daemon */ if (ok && rstore) { - /* For the moment, only migrate and copy have rpool */ - if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY) { + /* For the moment, only migrate, copy and vbackup have rpool */ + if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY || + (jcr->get_JobType() == JT_BACKUP && jcr->get_JobLevel() == L_VIRTUAL_FULL)) { pm_strcpy(pool_type, jcr->rpool->pool_type); pm_strcpy(pool_name, jcr->rpool->name()); } else { @@ -230,6 +231,7 @@ bool start_storage_daemon_job(JCR *jcr, alist *rstore, alist *wstore) bash_spaces(pool_name); foreach_alist(storage, rstore) { Dmsg1(100, "Rstore=%s\n", storage->name()); + pm_strcpy(store_name, storage->name()); bash_spaces(store_name); pm_strcpy(media_type, storage->media_type); bash_spaces(media_type); diff --git a/bacula/src/dird/vbackup.c b/bacula/src/dird/vbackup.c index 2a41178c60..c8c424fa62 100644 --- a/bacula/src/dird/vbackup.c +++ b/bacula/src/dird/vbackup.c @@ -61,6 +61,8 @@ void vbackup_cleanup(JCR *jcr, int TermCode); */ bool do_vbackup_init(JCR *jcr) { + char *p; + if (!get_or_create_fileset_record(jcr)) { Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId); return false; @@ -72,6 +74,12 @@ bool do_vbackup_init(JCR *jcr) return false; } + jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name()); + if (jcr->jr.PoolId == 0) { + Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId); + Jmsg(jcr, M_FATAL, 0, _("Could not get or create a Pool record.\n")); + return false; + } /* * Note, at this point, pool is the pool for this job. We * transfer it to rpool (read pool), and a bit later, @@ -81,6 +89,8 @@ bool do_vbackup_init(JCR *jcr) jcr->rpool = jcr->pool; /* save read pool */ pm_strcpy(jcr->rpool_source, jcr->pool_source); + /* If pool storage specified, use it for restore */ + copy_rstorage(jcr, jcr->pool->storage, _("Pool resource")); Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source); @@ -102,6 +112,26 @@ bool do_vbackup_init(JCR *jcr) return false; } + /* + * Now we find the last job that ran and store it's info in + * the previous_jr record. We will set our times to the + * values from that job so that anything changed after that + * time will be picked up on the next backup. + */ + p = strrchr(jobids, ','); /* find last jobid */ + if (p != NULL) { + p++; + } else { + p = jobids; + } + memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr)); + jcr->previous_jr.JobId = str_to_int64(p); + if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) { + Jmsg(jcr, M_FATAL, 0, _("Error getting Job record for previous Job: ERR=%s"), + db_strerror(jcr->db)); + return false; + } + if (!create_bootstrap_file(jcr, jobids)) { Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n")); free_pool_memory(jobids); @@ -120,7 +150,6 @@ bool do_vbackup_init(JCR *jcr) return false; } } - if (!set_migration_wstorage(jcr, jcr->pool)) { return false; } @@ -128,13 +157,14 @@ bool do_vbackup_init(JCR *jcr) Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name()); - create_clones(jcr); +// create_clones(jcr); return true; } /* - * Do a backup of the specified FileSet + * Do a virtual backup, which consolidates all previous backups into + * a sort of synthetic Full. * * Returns: false on failure * true on success @@ -144,8 +174,19 @@ bool do_vbackup(JCR *jcr) char ed1[100]; BSOCK *sd; + Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage); + Dmsg2(100, "Read store=%s, write store=%s\n", + ((STORE *)jcr->rstorage->first())->name(), + ((STORE *)jcr->wstorage->first())->name()); + /* ***FIXME*** we really should simply verify that the pools are different */ + if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) { + Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"), + ((STORE *)jcr->rstorage->first())->name()); + return false; + } + /* Print Job Start message */ - Jmsg(jcr, M_INFO, 0, _("Start Vbackup JobId %s, Job=%s\n"), + Jmsg(jcr, M_INFO, 0, _("Start Virtual Backup JobId %s, Job=%s\n"), edit_uint64(jcr->JobId, ed1), jcr->Job); /* @@ -163,18 +204,10 @@ bool do_vbackup(JCR *jcr) return false; } sd = jcr->store_bsock; + /* * Now start a job with the Storage daemon */ - Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage); - Dmsg2(100, "Read store=%s, write store=%s\n", - ((STORE *)jcr->rstorage->first())->name(), - ((STORE *)jcr->wstorage->first())->name()); - if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) { - Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"), - ((STORE *)jcr->rstorage->first())->name()); - return false; - } if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) { return false; } @@ -200,7 +233,7 @@ bool do_vbackup(JCR *jcr) jcr->jr.JobTDate = jcr->start_time; set_jcr_job_status(jcr, JS_Running); - /* Update job start record for this migration control job */ + /* Update job start record */ if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db)); return false; @@ -253,6 +286,7 @@ void vbackup_cleanup(JCR *jcr, int TermCode) CLIENT_DBR cr; double kbps, compression; utime_t RunTime; + POOL_MEM query(PM_MESSAGE); Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode); memset(&mr, 0, sizeof(mr)); @@ -264,16 +298,13 @@ void vbackup_cleanup(JCR *jcr, int TermCode) jcr->JobBytes = jcr->SDJobBytes; update_job_end(jcr, TermCode); -#ifdef xxx - /* ***FIXME*** set to time of last incremental */ /* Update final items to set them to the previous job's values */ Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s'," "JobTDate=%s WHERE JobId=%s", jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime, edit_uint64(jcr->previous_jr.JobTDate, ec1), - edit_uint64(mig_jcr->jr.JobId, ec2)); - db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL); -#endif + edit_uint64(jcr->JobId, ec3)); + db_sql_query(jcr->db, query.c_str(), NULL, NULL); if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) { Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"), diff --git a/bacula/src/stored/mac.c b/bacula/src/stored/mac.c index f2435e7838..d988114b3b 100644 --- a/bacula/src/stored/mac.c +++ b/bacula/src/stored/mac.c @@ -122,6 +122,7 @@ bail_out: ok_out: if (jcr->dcr) { dev = jcr->dcr->dev; + Dmsg1(100, "ok=%d\n", ok); if (ok || dev->can_write()) { /* Flush out final partial block of this session */ if (!write_block_to_device(jcr->dcr)) { @@ -173,7 +174,7 @@ ok_out: generate_daemon_event(jcr, "JobEnd"); dir->fsend(Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, edit_uint64(jcr->JobBytes, ec1)); - Dmsg4(200, Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, ec1); + Dmsg4(100, Job_end, jcr->Job, jcr->JobStatus, jcr->JobFiles, ec1); dir->signal(BNET_EOD); /* send EOD to Director daemon */ @@ -191,6 +192,9 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) DEVICE *dev = jcr->dcr->dev; char buf1[100], buf2[100]; int32_t stream; + uint32_t last_VolSessionId = 0; + uint32_t last_VolSessionTime = 0; + int32_t last_FileIndex = 0; /* If label and not for us, discard it */ if (rec->FileIndex < 0 && rec->match_stat <= 0) { @@ -204,6 +208,23 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) case EOM_LABEL: return true; /* don't write vol labels */ } + /* + * For normal migration jobs, FileIndex values are sequential because + * we are dealing with one job. However, for Vbackup (consolidation), + * we will be getting records from multiple jobs and writing them back + * out, so we need to ensure that the output FileIndex is sequential. + * We do so by detecting a FileIndex change and incrementing the + * JobFiles, which we then use as the output FileIndex. + */ + if (rec->VolSessionId != last_VolSessionId || + rec->VolSessionTime != last_VolSessionTime || + (rec->FileIndex > 0 && rec->FileIndex != last_FileIndex)) { + jcr->JobFiles++; + last_VolSessionId = rec->VolSessionId; + last_VolSessionTime = rec->VolSessionTime; + last_FileIndex = rec->FileIndex; + rec->FileIndex = jcr->JobFiles; /* set sequential output FileIndex */ + } /* * Modify record SessionId and SessionTime to correspond to * output. @@ -228,9 +249,7 @@ static bool record_cb(DCR *dcr, DEV_RECORD *rec) Dmsg2(200, "===== Wrote block new pos %u:%u\n", dev->file, dev->block_num); } jcr->JobBytes += rec->data_len; /* increment bytes this job */ - if (rec->FileIndex > 0) { - jcr->JobFiles = rec->FileIndex; - } else { + if (rec->FileIndex <= 0) { return true; /* don't send LABELs to Dir */ } Dmsg5(500, "wrote_record JobId=%d FI=%s SessId=%d Strm=%s len=%d\n", diff --git a/bacula/technotes-2.5 b/bacula/technotes-2.5 index 3cd1b27dd5..e06dec0029 100644 --- a/bacula/technotes-2.5 +++ b/bacula/technotes-2.5 @@ -33,6 +33,9 @@ separator in console (!$%&'()*+,-/:;<>?[]^`{|}~) General: 28Aug08 +kes Fix problem of Virtual backup not writing a sequential FileIndex. +kes Reset Virtual backup time/date to the value from the last backup. +kes Ensure that storage name is passed to SD on read. kes Correct a problem with Level and Virtual backup, rework how bsrs are printed in debug to use standard routine. kes Apply patch from Chris in bug #1133 that provides alternate db port