/*
Bacula® - The Network Backup Solution
- Copyright (C) 2004-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2004-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
+ modify it under the terms of version three of the GNU Affero General Public
License as published by the Free Software Foundation and included
in the file LICENSE.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
- You should have received a copy of the GNU General Public License
+ You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
* to do the backup.
* When the Storage daemon finishes the job, update the DB.
*
- * Version $Id$
*/
#include "bacula.h"
static const int dbglevel = 10;
-static char OKbootstrap[] = "3000 OK bootstrap\n";
-static int get_job_to_migrate(JCR *jcr);
+static int getJob_to_migrate(JCR *jcr);
struct idpkt;
static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
const char *query2, const char *type);
}
/* If we find a job or jobs to migrate it is previous_jr.JobId */
- count = get_job_to_migrate(jcr);
+ count = getJob_to_migrate(jcr);
if (count < 0) {
return false;
}
return true; /* no work */
}
- Dmsg1(dbglevel, "Back from get_job_to_migrate JobId=%d\n", (int)jcr->JobId);
+ Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId);
return true; /* no work */
}
- create_restore_bootstrap_file(jcr);
+ if (create_restore_bootstrap_file(jcr) < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Create bootstrap file failed.\n"));
+ return false;
+ }
if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
Dmsg1(dbglevel, "JobId=%d expected files == 0\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
mig_jcr->jr.PoolId = jcr->jr.PoolId;
mig_jcr->jr.JobId = mig_jcr->JobId;
+ /* Don't let WatchDog checks Max*Time value on this Job */
+ mig_jcr->no_maxtime = true;
+
+ /*
+ * Don't check for duplicates on migration and copy jobs
+ */
+ mig_jcr->job->IgnoreDuplicateJobChecking = true;
+
Dmsg4(dbglevel, "mig_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
mig_jcr->jr.Name, (int)mig_jcr->jr.JobId,
mig_jcr->jr.JobType, mig_jcr->jr.JobLevel);
* so set a normal status, cleanup and return OK.
*/
if (!mig_jcr) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
edit_int64(jcr->previous_jr.JobId, ed1),
jcr->get_ActionName(0),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
/* Make sure this job was not already migrated */
- if (jcr->previous_jr.JobType != JT_BACKUP) {
+ if (jcr->previous_jr.JobType != JT_BACKUP &&
+ jcr->previous_jr.JobType != JT_JOB_COPY) {
Jmsg(jcr, M_INFO, 0, _("JobId %s already %s probably by another Job. %s stopped.\n"),
edit_int64(jcr->previous_jr.JobId, ed1),
jcr->get_ActionName(1),
jcr->get_OperationName());
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
*
*/
Dmsg0(110, "Open connection with storage daemon\n");
- set_jcr_job_status(jcr, JS_WaitSD);
- set_jcr_job_status(mig_jcr, JS_WaitSD);
+ jcr->setJobStatus(JS_WaitSD);
+ mig_jcr->setJobStatus(JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
((STORE *)jcr->rstorage->first())->name(),
((STORE *)jcr->wstorage->first())->name());
- if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) {
- Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"),
- ((STORE *)jcr->rstorage->first())->name());
- return false;
- }
- if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
+
+ if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /*send_bsr*/true)) {
return false;
}
Dmsg0(150, "Storage daemon connection OK\n");
- if (!send_bootstrap_file(jcr, sd) ||
- !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
- return false;
- }
/*
* We re-update the job start record so that the start
jcr->start_time = time(NULL);
jcr->jr.StartTime = jcr->start_time;
jcr->jr.JobTDate = jcr->start_time;
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
/* Update job start record for this migration control job */
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
mig_jcr->start_time = time(NULL);
mig_jcr->jr.StartTime = mig_jcr->start_time;
mig_jcr->jr.JobTDate = mig_jcr->start_time;
- set_jcr_job_status(mig_jcr, JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Update job start record for the real migration backup job */
if (!db_update_job_start_record(mig_jcr, mig_jcr->db, &mig_jcr->jr)) {
}
- set_jcr_job_status(jcr, JS_Running);
- set_jcr_job_status(mig_jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Pickup Job termination data */
/* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */
wait_for_storage_daemon_termination(jcr);
- set_jcr_job_status(jcr, jcr->SDJobStatus);
+ jcr->setJobStatus(jcr->SDJobStatus);
db_write_batch_file_records(jcr); /* used by bulk batch file insert */
if (jcr->JobStatus != JS_Terminated) {
return false;
{
idpkt *ids = (idpkt *)ctx;
+ /* Sanity check */
+ if (!row || !row[0]) {
+ Dmsg0(dbglevel, "dbid_hdlr error empty row\n");
+ return 1; /* stop calling us */
+ }
+
add_unique_id(ids, row[0]);
Dmsg3(dbglevel, "dbid_hdlr count=%d Ids=%p %s\n", ids->count, ids->list, ids->list);
return 0;
const char *sql_jobids_from_client =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool,Client"
" WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
- " AND Job.ClientId=Client.ClientId AND Job.Type='B'"
+ " AND Job.ClientId=Client.ClientId AND Job.Type IN ('B','C')"
" AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
const char *sql_jobids_from_vol =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job"
" WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
- " AND JobMedia.JobId=Job.JobId AND Job.Type='B'"
+ " AND JobMedia.JobId=Job.JobId AND Job.Type IN ('B','C')"
" AND Job.JobStatus IN ('T','W') AND Media.Enabled=1"
" ORDER by Job.StartTime";
const char *sql_jobids_from_mediaid =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job"
" WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId IN (%s)"
- " AND Job.Type='B' AND Job.JobStatus IN ('T','W')"
+ " AND Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
/* Get the number of bytes in the pool */
" (SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE"
" Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
" VolStatus in ('Full','Used','Error','Append') AND Media.Enabled=1 AND"
- " Job.Type='B' AND Job.JobStatus IN ('T','W') AND"
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
" JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId)";
/* Get the number of bytes in the Jobs */
"SELECT DISTINCT Job.JobId FROM Pool,Job,Media,JobMedia WHERE"
" Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
" VolStatus IN ('Full','Used','Error') AND Media.Enabled=1 AND"
- " Job.Type='B' AND Job.JobStatus IN ('T','W') AND"
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
" JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"
" AND Job.RealEndTime<='%s'";
* 0 if no jobs to migrate
* 1 if OK and jcr->previous_jr filled in
*/
-static int get_job_to_migrate(JCR *jcr)
+static int getJob_to_migrate(JCR *jcr)
{
char ed1[30], ed2[30];
POOL_MEM query(PM_MESSAGE);
jids.list[0] = 0;
jids.count = 0;
-
/*
* If MigrateJobId is set, then we migrate only that Job,
* otherwise, we go through the full selection of jobs to
*/
if (jcr->MigrateJobId != 0) {
Dmsg1(dbglevel, "At Job start previous jobid=%u\n", jcr->MigrateJobId);
- edit_uint64(jcr->MigrateJobId, ids.list);
- ids.count = 1;
+ JobId = jcr->MigrateJobId;
} else {
switch (jcr->job->selection_type) {
case MT_JOB:
Jmsg(jcr, M_FATAL, 0, _("Unknown %s Selection Type.\n"), jcr->get_OperationName());
goto bail_out;
}
- }
-
- /*
- * Loop over all jobids except the last one, sending
- * them to start_migration_job(), which will start a job
- * for each of them. For the last JobId, we handle it below.
- */
- p = ids.list;
- if (ids.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
- goto ok_out;
- }
- Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"),
- ids.count, (ids.count < 2) ? _(" was") : _("s were"),
- jcr->get_ActionName(1), ids.list);
+ /*
+ * Loop over all jobids except the last one, sending
+ * them to start_migration_job(), which will start a job
+ * for each of them. For the last JobId, we handle it below.
+ */
+ p = ids.list;
+ if (ids.count == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
- Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
- for (int i=1; i < (int)ids.count; i++) {
+ Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"),
+ ids.count, (ids.count < 2) ? _(" was") : _("s were"),
+ jcr->get_ActionName(1), ids.list);
+
+ Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
+ for (int i=1; i < (int)ids.count; i++) {
+ JobId = 0;
+ stat = get_next_jobid_from_list(&p, &JobId);
+ Dmsg3(dbglevel, "getJobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
+ if (stat < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
+ goto bail_out;
+ } else if (stat == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
+ jcr->MigrateJobId = JobId;
+ start_migration_job(jcr);
+ Dmsg0(dbglevel, "Back from start_migration_job\n");
+ }
+
+ /* Now get the last JobId and handle it in the current job */
JobId = 0;
stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg3(dbglevel, "get_jobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
- jcr->MigrateJobId = JobId;
- start_migration_job(jcr);
- Dmsg0(dbglevel, "Back from start_migration_job\n");
+ Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
if (stat < 0) {
Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
goto bail_out;
goto ok_out;
}
}
-
- /* Now get the last JobId and handle it in the current job */
- JobId = 0;
- stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
- if (stat < 0) {
- Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
- goto bail_out;
- } else if (stat == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
- goto ok_out;
- }
jcr->previous_jr.JobId = JobId;
Dmsg1(dbglevel, "Previous jobid=%d\n", (int)jcr->previous_jr.JobId);
db_strerror(jcr->db));
goto bail_out;
}
+
Jmsg(jcr, M_INFO, 0, _("%s using JobId=%s Job=%s\n"),
jcr->get_OperationName(),
edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job);
UAContext *ua = new_ua_context(jcr);
char ed1[50];
ua->batch = true;
- Mmsg(ua->cmd, "run %s jobid=%s", jcr->job->hdr.name,
+ Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes", jcr->job->name(),
edit_uint64(jcr->MigrateJobId, ed1));
Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd);
parse_ua_args(ua); /* parse command */
POOL_MEM query(PM_MESSAGE);
/* Only a copy job is allowed */
- if (jcr->get_JobType() != JT_COPY) {
+ if (jcr->getJobType() != JT_COPY) {
Jmsg(jcr, M_FATAL, 0,
_("Selection Type 'pooluncopiedjobs' only applies to Copy Jobs"));
goto bail_out;
* - move any Log records to the new JobId
* - Purge the File records from the previous job
*/
- if (jcr->get_JobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, old_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
Mmsg(query, "UPDATE Log SET JobId=%s WHERE JobId=%s",
new_jobid, old_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
- /* Purge all old file records, but leave Job record */
- purge_files_from_jobs(ua, old_jobid);
+
+ if (jcr->job->PurgeMigrateJob) {
+ /* Purge old Job record */
+ purge_jobs_from_catalog(ua, old_jobid);
+ } else {
+ /* Purge all old file records, but leave Job record */
+ purge_files_from_jobs(ua, old_jobid);
+ }
+
free_ua_context(ua);
}
/*
- * If we terminated a copy normally:
+ * If we terminated a Copy (rather than a Migration) normally:
* - copy any Log records to the new JobId
* - set type="Job Copy" for the new job
*/
- if (jcr->get_JobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
+ if (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
/* Copy JobLog to new JobId */
Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) "
"SELECT %s, Time, LogText FROM Log WHERE JobId=%s",
if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
update_bootstrap_file(mig_jcr);
break;
}
} else {
- if (jcr->get_JobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
/* Mark previous job as migrated */
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
- Jmsg(jcr, msg_type, 0, _("%s %s %s (%s): %s\n"
+ Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n"
" Build OS: %s %s %s\n"
" Prev Backup JobId: %s\n"
+" Prev Backup Job: %s\n"
" New Backup JobId: %s\n"
" Current JobId: %s\n"
" Current Job: %s\n"
" SD Errors: %d\n"
" SD termination status: %s\n"
" Termination: %s\n\n"),
- BACULA, my_name, VERSION, LSMDATE, edt,
+ BACULA, my_name, VERSION, LSMDATE,
HOST_OS, DISTNAME, DISTVER,
edit_uint64(jcr->previous_jr.JobId, ec6),
+ jcr->previous_jr.Job,
mig_jcr ? edit_uint64(mig_jcr->jr.JobId, ec7) : "0",
edit_uint64(jcr->jr.JobId, ec8),
jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since,
+ level_to_str(jcr->getJobLevel()), jcr->since,
jcr->client->name(),
jcr->fileset->name(), jcr->FSCreateTime,
jcr->rpool->name(), jcr->rpool_source,