/*
Bacula® - The Network Backup Solution
- Copyright (C) 2004-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2004-2012 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
+ modify it under the terms of version three of the GNU Affero General Public
License as published by the Free Software Foundation and included
in the file LICENSE.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
- You should have received a copy of the GNU General Public License
+ You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
* to do the backup.
* When the Storage daemon finishes the job, update the DB.
*
- * Version $Id$
*/
#include "bacula.h"
static const int dbglevel = 10;
-static char OKbootstrap[] = "3000 OK bootstrap\n";
-static int get_job_to_migrate(JCR *jcr);
+static int getJob_to_migrate(JCR *jcr);
struct idpkt;
static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
const char *query2, const char *type);
static bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids);
static void start_migration_job(JCR *jcr);
static int get_next_dbid_from_list(char **p, DBId_t *DBId);
+static bool set_migration_next_pool(JCR *jcr, POOL **pool);
/*
* Called here before the job is run to do the job
*/
bool do_migration_init(JCR *jcr)
{
- POOL_DBR pr;
- POOL *pool;
- char ed1[100];
+ POOL *pool = NULL;
JOB *job, *prev_job;
JCR *mig_jcr; /* newly migrated job */
int count;
Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source);
+ if (!get_or_create_fileset_record(jcr)) {
+ Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
+ Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
+ return false;
+ }
+
/* If we find a job or jobs to migrate it is previous_jr.JobId */
- count = get_job_to_migrate(jcr);
+ count = getJob_to_migrate(jcr);
if (count < 0) {
return false;
}
if (count == 0) {
- return true;
+ set_migration_next_pool(jcr, &pool);
+ return true; /* no work */
}
- Dmsg1(dbglevel, "Back from get_job_to_migrate JobId=%d\n", (int)jcr->JobId);
+ Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId);
Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
+ set_migration_next_pool(jcr, &pool);
return true; /* no work */
}
- if (!get_or_create_fileset_record(jcr)) {
- Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
- Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
+ if (create_restore_bootstrap_file(jcr) < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Create bootstrap file failed.\n"));
return false;
}
- create_restore_bootstrap_file(jcr);
-
if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
Dmsg1(dbglevel, "JobId=%d expected files == 0\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
} else {
Jmsg(jcr, M_INFO, 0, _("Previous Job has no data to %s.\n"), jcr->get_ActionName(0));
}
+ set_migration_next_pool(jcr, &pool);
return true; /* no work */
}
jcr->spool_data = job->spool_data; /* turn on spooling if requested in job */
- /* Create a migation jcr */
+ /* Create a migration jcr */
mig_jcr = jcr->mig_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
memcpy(&mig_jcr->previous_jr, &jcr->previous_jr, sizeof(mig_jcr->previous_jr));
mig_jcr->jr.PoolId = jcr->jr.PoolId;
mig_jcr->jr.JobId = mig_jcr->JobId;
+ /* Don't let WatchDog checks Max*Time value on this Job */
+ mig_jcr->no_maxtime = true;
+
+ /*
+ * Don't check for duplicates on migration and copy jobs
+ */
+ mig_jcr->job->IgnoreDuplicateJobChecking = true;
+
Dmsg4(dbglevel, "mig_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
mig_jcr->jr.Name, (int)mig_jcr->jr.JobId,
mig_jcr->jr.JobType, mig_jcr->jr.JobLevel);
+ if (set_migration_next_pool(jcr, &pool)) {
+ /* If pool storage specified, use it for restore */
+ copy_rstorage(mig_jcr, pool->storage, _("Pool resource"));
+ copy_rstorage(jcr, pool->storage, _("Pool resource"));
+
+ mig_jcr->pool = jcr->pool;
+ mig_jcr->jr.PoolId = jcr->jr.PoolId;
+ }
+
+ return true;
+}
+
+
+/*
+ * set_migration_next_pool() called by do_migration_init()
+ * at differents stages.
+ * The idea here is to make a common subroutine for the
+ * NextPool's search code and to permit do_migration_init()
+ * to return with NextPool set in jcr struct.
+ */
+static bool set_migration_next_pool(JCR *jcr, POOL **retpool)
+{
+ POOL_DBR pr;
+ POOL *pool;
+ char ed1[100];
+
/*
* Get the PoolId used with the original job. Then
* find the pool name from the database record.
*/
memset(&pr, 0, sizeof(pr));
- pr.PoolId = mig_jcr->previous_jr.PoolId;
+ pr.PoolId = jcr->jr.PoolId;
if (!db_get_pool_record(jcr, jcr->db, &pr)) {
Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
}
/* Get the pool resource corresponding to the original job */
pool = (POOL *)GetResWithName(R_POOL, pr.Name);
+ *retpool = pool;
if (!pool) {
Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name);
return false;
}
- /* If pool storage specified, use it for restore */
- copy_rstorage(mig_jcr, pool->storage, _("Pool resource"));
- copy_rstorage(jcr, pool->storage, _("Pool resource"));
-
/*
* If the original backup pool has a NextPool, make sure a
* record exists in the database. Note, in this case, we
if (!set_migration_wstorage(jcr, pool)) {
return false;
}
- mig_jcr->pool = jcr->pool = pool->NextPool;
+ jcr->pool = pool->NextPool;
pm_strcpy(jcr->pool_source, _("Job Pool's NextPool resource"));
- mig_jcr->jr.PoolId = jcr->jr.PoolId;
Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name());
+
return true;
}
+
/*
* Do a Migration of a previous job
*
* so set a normal status, cleanup and return OK.
*/
if (!mig_jcr) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
edit_int64(jcr->previous_jr.JobId, ed1),
jcr->get_ActionName(0),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
/* Make sure this job was not already migrated */
- if (jcr->previous_jr.JobType != JT_BACKUP) {
+ if (jcr->previous_jr.JobType != JT_BACKUP &&
+ jcr->previous_jr.JobType != JT_JOB_COPY) {
Jmsg(jcr, M_INFO, 0, _("JobId %s already %s probably by another Job. %s stopped.\n"),
edit_int64(jcr->previous_jr.JobId, ed1),
jcr->get_ActionName(1),
jcr->get_OperationName());
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
*
*/
Dmsg0(110, "Open connection with storage daemon\n");
- set_jcr_job_status(jcr, JS_WaitSD);
- set_jcr_job_status(mig_jcr, JS_WaitSD);
+ jcr->setJobStatus(JS_WaitSD);
+ mig_jcr->setJobStatus(JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
((STORE *)jcr->rstorage->first())->name(),
((STORE *)jcr->wstorage->first())->name());
- if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) {
- Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"),
- ((STORE *)jcr->rstorage->first())->name());
- return false;
- }
- if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
+
+ if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /*send_bsr*/true)) {
return false;
}
Dmsg0(150, "Storage daemon connection OK\n");
- if (!send_bootstrap_file(jcr, sd) ||
- !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
- return false;
- }
/*
* We re-update the job start record so that the start
jcr->start_time = time(NULL);
jcr->jr.StartTime = jcr->start_time;
jcr->jr.JobTDate = jcr->start_time;
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
/* Update job start record for this migration control job */
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
mig_jcr->start_time = time(NULL);
mig_jcr->jr.StartTime = mig_jcr->start_time;
mig_jcr->jr.JobTDate = mig_jcr->start_time;
- set_jcr_job_status(mig_jcr, JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Update job start record for the real migration backup job */
if (!db_update_job_start_record(mig_jcr, mig_jcr->db, &mig_jcr->jr)) {
}
- set_jcr_job_status(jcr, JS_Running);
- set_jcr_job_status(mig_jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Pickup Job termination data */
- /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
+ /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */
wait_for_storage_daemon_termination(jcr);
- set_jcr_job_status(jcr, jcr->SDJobStatus);
+ jcr->setJobStatus(jcr->SDJobStatus);
db_write_batch_file_records(jcr); /* used by bulk batch file insert */
if (jcr->JobStatus != JS_Terminated) {
return false;
}
migration_cleanup(jcr, jcr->JobStatus);
- if (jcr->get_JobType() == JT_MIGRATE && mig_jcr) {
- char jobid[50];
- UAContext *ua = new_ua_context(jcr);
- edit_uint64(jcr->previous_jr.JobId, jobid);
- /* Purge all old file records, but leave Job record */
- purge_files_from_jobs(ua, jobid);
- free_ua_context(ua);
- }
+
return true;
}
{
idpkt *ids = (idpkt *)ctx;
+ /* Sanity check */
+ if (!row || !row[0]) {
+ Dmsg0(dbglevel, "dbid_hdlr error empty row\n");
+ return 1; /* stop calling us */
+ }
+
add_unique_id(ids, row[0]);
Dmsg3(dbglevel, "dbid_hdlr count=%d Ids=%p %s\n", ids->count, ids->list, ids->list);
return 0;
const char *sql_jobids_from_client =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool,Client"
" WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
- " AND Job.ClientId=Client.ClientId AND Job.Type='B'"
- " AND Job.JobStatus = 'T'"
+ " AND Job.ClientId=Client.ClientId AND Job.Type IN ('B','C')"
+ " AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
/* Get Volume names in Pool */
const char *sql_jobids_from_vol =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job"
" WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
- " AND JobMedia.JobId=Job.JobId AND Job.Type='B'"
- " AND Job.JobStatus = 'T' AND Media.Enabled=1"
+ " AND JobMedia.JobId=Job.JobId AND Job.Type IN ('B','C')"
+ " AND Job.JobStatus IN ('T','W') AND Media.Enabled=1"
" ORDER by Job.StartTime";
const char *sql_smallest_vol =
const char *sql_jobids_from_mediaid =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job"
" WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId IN (%s)"
- " AND Job.Type='B' AND Job.JobStatus = 'T'"
+ " AND Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
/* Get the number of bytes in the pool */
" (SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE"
" Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
" VolStatus in ('Full','Used','Error','Append') AND Media.Enabled=1 AND"
- " Job.Type='B' AND Job.JobStatus = 'T' AND"
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
" JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId)";
/* Get the number of bytes in the Jobs */
const char *sql_pool_time =
"SELECT DISTINCT Job.JobId FROM Pool,Job,Media,JobMedia WHERE"
" Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
- " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
- " Job.Type='B' AND Job.JobStatus = 'T' AND"
+ " VolStatus IN ('Full','Used','Error') AND Media.Enabled=1 AND"
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
" JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"
" AND Job.RealEndTime<='%s'";
const char *sql_jobids_of_pool_uncopied_jobs =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
" WHERE Pool.Name = '%s' AND Pool.PoolId = Job.PoolId"
- " AND Job.Type = 'B' AND Job.JobStatus = 'T'"
+ " AND Job.Type = 'B' AND Job.JobStatus IN ('T','W')"
+ " AND Job.jobBytes > 0"
" AND Job.JobId NOT IN"
" (SELECT PriorJobId FROM Job WHERE"
- " Type = 'B' AND Job.JobStatus = 'T'"
+ " Type IN ('B','C') AND Job.JobStatus IN ('T','W')"
" AND PriorJobId != 0)"
" ORDER by Job.StartTime";
* 0 if no jobs to migrate
* 1 if OK and jcr->previous_jr filled in
*/
-static int get_job_to_migrate(JCR *jcr)
+static int getJob_to_migrate(JCR *jcr)
{
char ed1[30], ed2[30];
POOL_MEM query(PM_MESSAGE);
struct tm tm;
char dt[MAX_TIME_LENGTH];
int count = 0;
+ int limit = 99; /* limit + 1 is max jobs to start */
ids.list = get_pool_memory(PM_MESSAGE);
ids.list[0] = 0;
jids.list[0] = 0;
jids.count = 0;
-
/*
* If MigrateJobId is set, then we migrate only that Job,
* otherwise, we go through the full selection of jobs to
*/
if (jcr->MigrateJobId != 0) {
Dmsg1(dbglevel, "At Job start previous jobid=%u\n", jcr->MigrateJobId);
- edit_uint64(jcr->MigrateJobId, ids.list);
- ids.count = 1;
+ JobId = jcr->MigrateJobId;
} else {
switch (jcr->job->selection_type) {
case MT_JOB:
Jmsg(jcr, M_FATAL, 0, _("Unknown %s Selection Type.\n"), jcr->get_OperationName());
goto bail_out;
}
- }
-
- /*
- * Loop over all jobids except the last one, sending
- * them to start_migration_job(), which will start a job
- * for each of them. For the last JobId, we handle it below.
- */
- p = ids.list;
- if (ids.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
- goto ok_out;
- }
- Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"),
- ids.count, (ids.count < 2) ? _(" was") : _("s were"),
- jcr->get_ActionName(1), ids.list);
+ /*
+ * Loop over all jobids except the last one, sending
+ * them to start_migration_job(), which will start a job
+ * for each of them. For the last JobId, we handle it below.
+ */
+ p = ids.list;
+ if (ids.count == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
- Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
- for (int i=1; i < (int)ids.count; i++) {
+ Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"),
+ ids.count, (ids.count < 2) ? _(" was") : _("s were"),
+ jcr->get_ActionName(1), ids.list);
+
+ Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
+ /*
+ * Note: to not over load the system, limit the number
+ * of new jobs started to 100 (see limit above)
+ */
+ for (int i=1; i < (int)ids.count; i++) {
+ JobId = 0;
+ stat = get_next_jobid_from_list(&p, &JobId);
+ Dmsg3(dbglevel, "getJobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
+ if (stat < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
+ goto bail_out;
+ } else if (stat == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
+ jcr->MigrateJobId = JobId;
+ /* Don't start any more when limit reaches zero */
+ limit--;
+ if (limit > 0) {
+ start_migration_job(jcr);
+ Dmsg0(dbglevel, "Back from start_migration_job\n");
+ }
+ }
+
+ /* Now get the last JobId and handle it in the current job */
JobId = 0;
stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg3(dbglevel, "get_jobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
- jcr->MigrateJobId = JobId;
- start_migration_job(jcr);
- Dmsg0(dbglevel, "Back from start_migration_job\n");
+ Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
if (stat < 0) {
Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
goto bail_out;
goto ok_out;
}
}
-
- /* Now get the last JobId and handle it in the current job */
- JobId = 0;
- stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
- if (stat < 0) {
- Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
- goto bail_out;
- } else if (stat == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
- goto ok_out;
- }
jcr->previous_jr.JobId = JobId;
Dmsg1(dbglevel, "Previous jobid=%d\n", (int)jcr->previous_jr.JobId);
db_strerror(jcr->db));
goto bail_out;
}
+
Jmsg(jcr, M_INFO, 0, _("%s using JobId=%s Job=%s\n"),
jcr->get_OperationName(),
edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job);
UAContext *ua = new_ua_context(jcr);
char ed1[50];
ua->batch = true;
- Mmsg(ua->cmd, "run %s jobid=%s", jcr->job->hdr.name,
- edit_uint64(jcr->MigrateJobId, ed1));
+ Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes pool=\"%s\"",
+ jcr->job->name(), edit_uint64(jcr->MigrateJobId, ed1),
+ jcr->pool->name());
Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd);
parse_ua_args(ua); /* parse command */
JobId_t jobid = run_cmd(ua, ua->cmd);
POOL_MEM query(PM_MESSAGE);
/* Only a copy job is allowed */
- if (jcr->get_JobType() != JT_COPY) {
+ if (jcr->getJobType() != JT_COPY) {
Jmsg(jcr, M_FATAL, 0,
_("Selection Type 'pooluncopiedjobs' only applies to Copy Jobs"));
goto bail_out;
Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode);
update_job_end(jcr, TermCode);
- memset(&mr, 0, sizeof(mr));
/*
* Check if we actually did something.
* mig_jcr is jcr of the newly migrated job.
*/
if (mig_jcr) {
+ char old_jobid[50], new_jobid[50];
+
+ edit_uint64(jcr->previous_jr.JobId, old_jobid);
+ edit_uint64(mig_jcr->jr.JobId, new_jobid);
+
mig_jcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
mig_jcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
mig_jcr->VolSessionId = jcr->VolSessionId;
"JobTDate=%s WHERE JobId=%s",
jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
edit_uint64(jcr->previous_jr.JobTDate, ec1),
- edit_uint64(mig_jcr->jr.JobId, ec2));
+ new_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
- /* Now mark the previous job as migrated if it terminated normally */
- if (jcr->get_JobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
+ /*
+ * If we terminated a migration normally:
+ * - mark the previous job as migrated
+ * - move any Log records to the new JobId
+ * - Purge the File records from the previous job
+ */
+ if (jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
- (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
+ (char)JT_MIGRATED_JOB, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+ UAContext *ua = new_ua_context(jcr);
+ /* Move JobLog to new JobId */
+ Mmsg(query, "UPDATE Log SET JobId=%s WHERE JobId=%s",
+ new_jobid, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+
+ if (jcr->job->PurgeMigrateJob) {
+ /* Purge old Job record */
+ purge_jobs_from_catalog(ua, old_jobid);
+ } else {
+ /* Purge all old file records, but leave Job record */
+ purge_files_from_jobs(ua, old_jobid);
+ }
+
+ free_ua_context(ua);
+ }
+
+ /*
+ * If we terminated a Copy (rather than a Migration) normally:
+ * - copy any Log records to the new JobId
+ * - set type="Job Copy" for the new job
+ */
+ if (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
+ /* Copy JobLog to new JobId */
+ Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) "
+ "SELECT %s, Time, LogText FROM Log WHERE JobId=%s",
+ new_jobid, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+ Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
+ (char)JT_JOB_COPY, new_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
}
if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
update_bootstrap_file(mig_jcr);
switch (jcr->JobStatus) {
case JS_Terminated:
- if (jcr->Errors || jcr->SDErrors) {
+ if (jcr->JobErrors || jcr->SDErrors) {
term_msg = _("%s OK -- with warnings");
} else {
term_msg = _("%s OK");
break;
}
} else {
- if (jcr->get_JobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
/* Mark previous job as migrated */
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
- Jmsg(jcr, msg_type, 0, _("%s %s %s (%s): %s\n"
+ Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n"
" Build OS: %s %s %s\n"
" Prev Backup JobId: %s\n"
+" Prev Backup Job: %s\n"
" New Backup JobId: %s\n"
" Current JobId: %s\n"
" Current Job: %s\n"
" SD Errors: %d\n"
" SD termination status: %s\n"
" Termination: %s\n\n"),
- BACULA, my_name, VERSION, LSMDATE, edt,
+ BACULA, my_name, VERSION, LSMDATE,
HOST_OS, DISTNAME, DISTVER,
edit_uint64(jcr->previous_jr.JobId, ec6),
+ jcr->previous_jr.Job,
mig_jcr ? edit_uint64(mig_jcr->jr.JobId, ec7) : "0",
edit_uint64(jcr->jr.JobId, ec8),
jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since,
+ level_to_str(jcr->getJobLevel()), jcr->since,
jcr->client->name(),
jcr->fileset->name(), jcr->FSCreateTime,
jcr->rpool->name(), jcr->rpool_source,