/*
Bacula® - The Network Backup Solution
- Copyright (C) 2004-2007 Free Software Foundation Europe e.V.
+ Copyright (C) 2004-2012 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
- License as published by the Free Software Foundation plus additions
- that are listed in the file LICENSE.
+ modify it under the terms of version three of the GNU Affero General Public
+ License as published by the Free Software Foundation and included
+ in the file LICENSE.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
- You should have received a copy of the GNU General Public License
+ You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Bacula® is a registered trademark of John Walker.
+ Bacula® is a registered trademark of Kern Sibbald.
The licensor of Bacula is the Free Software Foundation Europe
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
/*
*
* Bacula Director -- migrate.c -- responsible for doing
- * migration jobs.
+ * migration and copy jobs.
+ *
+ * Also handles Copy jobs (March MMVIII)
*
* Kern Sibbald, September MMIV
*
* to do the backup.
* When the Storage daemon finishes the job, update the DB.
*
- * Version $Id$
*/
#include "bacula.h"
static const int dbglevel = 10;
-static char OKbootstrap[] = "3000 OK bootstrap\n";
-static int get_job_to_migrate(JCR *jcr);
+static int getJob_to_migrate(JCR *jcr);
struct idpkt;
static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
const char *query2, const char *type);
static bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1,
const char *type);
static bool find_jobids_from_mediaid_list(JCR *jcr, idpkt *ids, const char *type);
+static bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids);
static void start_migration_job(JCR *jcr);
static int get_next_dbid_from_list(char **p, DBId_t *DBId);
+static bool set_migration_next_pool(JCR *jcr, POOL **pool);
/*
* Called here before the job is run to do the job
*/
bool do_migration_init(JCR *jcr)
{
- POOL_DBR pr;
- POOL *pool;
- char ed1[100];
+ POOL *pool = NULL;
JOB *job, *prev_job;
JCR *mig_jcr; /* newly migrated job */
int count;
apply_pool_overrides(jcr);
+ if (!allow_duplicate_job(jcr)) {
+ return false;
+ }
+
jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name());
if (jcr->jr.PoolId == 0) {
Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId);
Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source);
+ if (!get_or_create_fileset_record(jcr)) {
+ Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
+ Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
+ return false;
+ }
+
/* If we find a job or jobs to migrate it is previous_jr.JobId */
- count = get_job_to_migrate(jcr);
+ count = getJob_to_migrate(jcr);
if (count < 0) {
return false;
}
if (count == 0) {
- return true;
+ set_migration_next_pool(jcr, &pool);
+ return true; /* no work */
}
- Dmsg1(dbglevel, "Back from get_job_to_migrate JobId=%d\n", (int)jcr->JobId);
+ Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId);
- Jmsg(jcr, M_INFO, 0, _("No previous Job found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
+ set_migration_next_pool(jcr, &pool);
return true; /* no work */
}
- if (!get_or_create_fileset_record(jcr)) {
- Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
- Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
+ if (create_restore_bootstrap_file(jcr) < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Create bootstrap file failed.\n"));
return false;
}
- create_restore_bootstrap_file(jcr);
-
if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
Dmsg1(dbglevel, "JobId=%d expected files == 0\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
- Jmsg(jcr, M_INFO, 0, _("No previous Job found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
} else {
- Jmsg(jcr, M_INFO, 0, _("Previous Job has no data to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("Previous Job has no data to %s.\n"), jcr->get_ActionName(0));
}
+ set_migration_next_pool(jcr, &pool);
return true; /* no work */
}
- Dmsg5(dbglevel, "JobId=%d: Previous: Name=%s JobId=%d Type=%c Level=%c\n",
- (int)jcr->JobId,
- jcr->previous_jr.Name, (int)jcr->previous_jr.JobId,
- jcr->previous_jr.JobType, jcr->previous_jr.JobLevel);
Dmsg5(dbglevel, "JobId=%d: Current: Name=%s JobId=%d Type=%c Level=%c\n",
(int)jcr->JobId,
jcr->spool_data = job->spool_data; /* turn on spooling if requested in job */
- /* Create a migation jcr */
+ /* Create a migration jcr */
mig_jcr = jcr->mig_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
memcpy(&mig_jcr->previous_jr, &jcr->previous_jr, sizeof(mig_jcr->previous_jr));
mig_jcr->jr.PoolId = jcr->jr.PoolId;
mig_jcr->jr.JobId = mig_jcr->JobId;
+ /* Don't let WatchDog checks Max*Time value on this Job */
+ mig_jcr->no_maxtime = true;
+
+ /*
+ * Don't check for duplicates on migration and copy jobs
+ */
+ mig_jcr->job->IgnoreDuplicateJobChecking = true;
+
Dmsg4(dbglevel, "mig_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
mig_jcr->jr.Name, (int)mig_jcr->jr.JobId,
mig_jcr->jr.JobType, mig_jcr->jr.JobLevel);
+ if (set_migration_next_pool(jcr, &pool)) {
+ /* If pool storage specified, use it for restore */
+ copy_rstorage(mig_jcr, pool->storage, _("Pool resource"));
+ copy_rstorage(jcr, pool->storage, _("Pool resource"));
+
+ mig_jcr->pool = jcr->pool;
+ mig_jcr->jr.PoolId = jcr->jr.PoolId;
+ }
+
+ return true;
+}
+
+
+/*
+ * set_migration_next_pool() called by do_migration_init()
+ * at differents stages.
+ * The idea here is to make a common subroutine for the
+ * NextPool's search code and to permit do_migration_init()
+ * to return with NextPool set in jcr struct.
+ */
+static bool set_migration_next_pool(JCR *jcr, POOL **retpool)
+{
+ POOL_DBR pr;
+ POOL *pool;
+ char ed1[100];
+
/*
* Get the PoolId used with the original job. Then
* find the pool name from the database record.
*/
memset(&pr, 0, sizeof(pr));
- pr.PoolId = mig_jcr->previous_jr.PoolId;
+ pr.PoolId = jcr->jr.PoolId;
if (!db_get_pool_record(jcr, jcr->db, &pr)) {
Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
}
/* Get the pool resource corresponding to the original job */
pool = (POOL *)GetResWithName(R_POOL, pr.Name);
+ *retpool = pool;
if (!pool) {
Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name);
return false;
}
- /* If pool storage specified, use it for restore */
- copy_rstorage(mig_jcr, pool->storage, _("Pool resource"));
- copy_rstorage(jcr, pool->storage, _("Pool resource"));
-
/*
* If the original backup pool has a NextPool, make sure a
* record exists in the database. Note, in this case, we
if (!set_migration_wstorage(jcr, pool)) {
return false;
}
- mig_jcr->pool = jcr->pool = pool->NextPool;
+ jcr->pool = pool->NextPool;
pm_strcpy(jcr->pool_source, _("Job Pool's NextPool resource"));
- mig_jcr->jr.PoolId = jcr->jr.PoolId;
Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name());
+
return true;
}
+
/*
* Do a Migration of a previous job
*
* so set a normal status, cleanup and return OK.
*/
if (!mig_jcr) {
- set_jcr_job_status(jcr, JS_Terminated);
+ jcr->setJobStatus(JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true;
}
- /* Print Job Start message */
- Jmsg(jcr, M_INFO, 0, _("Start Migration JobId %s, Job=%s\n"),
- edit_uint64(jcr->JobId, ed1), jcr->Job);
-
+ if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
+ Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to %s. ERR=%s"),
+ edit_int64(jcr->previous_jr.JobId, ed1),
+ jcr->get_ActionName(0),
+ db_strerror(jcr->db));
+ jcr->setJobStatus(JS_Terminated);
+ migration_cleanup(jcr, jcr->JobStatus);
+ return true;
+ }
+ /* Make sure this job was not already migrated */
+ if (jcr->previous_jr.JobType != JT_BACKUP &&
+ jcr->previous_jr.JobType != JT_JOB_COPY) {
+ Jmsg(jcr, M_INFO, 0, _("JobId %s already %s probably by another Job. %s stopped.\n"),
+ edit_int64(jcr->previous_jr.JobId, ed1),
+ jcr->get_ActionName(1),
+ jcr->get_OperationName());
+ jcr->setJobStatus(JS_Terminated);
+ migration_cleanup(jcr, jcr->JobStatus);
+ return true;
+ }
+ /* Print Job Start message */
+ Jmsg(jcr, M_INFO, 0, _("Start %s JobId %s, Job=%s\n"),
+ jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job);
/*
* Open a message channel connection with the Storage
*
*/
Dmsg0(110, "Open connection with storage daemon\n");
- set_jcr_job_status(jcr, JS_WaitSD);
- set_jcr_job_status(mig_jcr, JS_WaitSD);
+ jcr->setJobStatus(JS_WaitSD);
+ mig_jcr->setJobStatus(JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
((STORE *)jcr->rstorage->first())->name(),
((STORE *)jcr->wstorage->first())->name());
- if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) {
- Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"),
- ((STORE *)jcr->rstorage->first())->name());
- return false;
- }
- if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
+
+ if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /*send_bsr*/true)) {
return false;
}
Dmsg0(150, "Storage daemon connection OK\n");
- if (!send_bootstrap_file(jcr, sd) ||
- !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
- return false;
- }
/*
* We re-update the job start record so that the start
jcr->start_time = time(NULL);
jcr->jr.StartTime = jcr->start_time;
jcr->jr.JobTDate = jcr->start_time;
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
/* Update job start record for this migration control job */
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
mig_jcr->start_time = time(NULL);
mig_jcr->jr.StartTime = mig_jcr->start_time;
mig_jcr->jr.JobTDate = mig_jcr->start_time;
- set_jcr_job_status(mig_jcr, JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Update job start record for the real migration backup job */
if (!db_update_job_start_record(mig_jcr, mig_jcr->db, &mig_jcr->jr)) {
* to avoid two threads from using the BSOCK structure at
* the same time.
*/
- if (!bnet_fsend(sd, "run")) {
+ if (!sd->fsend("run")) {
return false;
}
}
- set_jcr_job_status(jcr, JS_Running);
- set_jcr_job_status(mig_jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
+ mig_jcr->setJobStatus(JS_Running);
/* Pickup Job termination data */
- /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
+ /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */
wait_for_storage_daemon_termination(jcr);
- set_jcr_job_status(jcr, jcr->SDJobStatus);
+ jcr->setJobStatus(jcr->SDJobStatus);
db_write_batch_file_records(jcr); /* used by bulk batch file insert */
if (jcr->JobStatus != JS_Terminated) {
return false;
}
migration_cleanup(jcr, jcr->JobStatus);
- if (mig_jcr) {
- char jobid[50];
- UAContext *ua = new_ua_context(jcr);
- edit_uint64(jcr->previous_jr.JobId, jobid);
- /* Purge all old file records, but leave Job record */
- purge_files_from_jobs(ua, jobid);
- free_ua_context(ua);
- }
+
return true;
}
/* Add an item to the list if it is unique */
static void add_unique_id(idpkt *ids, char *item)
{
- char id[30];
+ const int maxlen = 30;
+ char id[maxlen+1];
char *q = ids->list;
/* Walk through current list to see if each item is the same as item */
for ( ; *q; ) {
id[0] = 0;
- for (int i=0; i<(int)sizeof(id); i++) {
+ for (int i=0; i<maxlen; i++) {
if (*q == 0) {
break;
} else if (*q == ',') {
{
idpkt *ids = (idpkt *)ctx;
+ /* Sanity check */
+ if (!row || !row[0]) {
+ Dmsg0(dbglevel, "dbid_hdlr error empty row\n");
+ return 1; /* stop calling us */
+ }
+
add_unique_id(ids, row[0]);
Dmsg3(dbglevel, "dbid_hdlr count=%d Ids=%p %s\n", ids->count, ids->list, ids->list);
return 0;
const char *sql_jobids_from_client =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool,Client"
" WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
- " AND Job.ClientId=Client.ClientId AND Job.Type='B'"
+ " AND Job.ClientId=Client.ClientId AND Job.Type IN ('B','C')"
+ " AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
/* Get Volume names in Pool */
const char *sql_jobids_from_vol =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job"
" WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
- " AND JobMedia.JobId=Job.JobId AND Job.Type='B'"
+ " AND JobMedia.JobId=Job.JobId AND Job.Type IN ('B','C')"
+ " AND Job.JobStatus IN ('T','W') AND Media.Enabled=1"
" ORDER by Job.StartTime";
-
const char *sql_smallest_vol =
- "SELECT MediaId FROM Media,Pool WHERE"
- " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
+ "SELECT Media.MediaId FROM Media,Pool,JobMedia WHERE"
+ " Media.MediaId in (SELECT DISTINCT MediaId from JobMedia) AND"
+ " Media.VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
" Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
" ORDER BY VolBytes ASC LIMIT 1";
const char *sql_oldest_vol =
- "SELECT MediaId FROM Media,Pool WHERE"
- " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
+ "SELECT Media.MediaId FROM Media,Pool,JobMedia WHERE"
+ " Media.MediaId in (SELECT DISTINCT MediaId from JobMedia) AND"
+ " Media.VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
" Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
" ORDER BY LastWritten ASC LIMIT 1";
/* Get JobIds when we have selected MediaId */
const char *sql_jobids_from_mediaid =
"SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job"
- " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId=%s"
- " AND Job.Type='B'"
+ " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId IN (%s)"
+ " AND Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W')"
" ORDER by Job.StartTime";
-/* Get tne number of bytes in the pool */
+/* Get the number of bytes in the pool */
const char *sql_pool_bytes =
- "SELECT SUM(VolBytes) FROM Media,Pool WHERE"
+ "SELECT SUM(JobBytes) FROM Job WHERE JobId IN"
+ " (SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE"
+ " Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
" VolStatus in ('Full','Used','Error','Append') AND Media.Enabled=1 AND"
- " Media.PoolId=Pool.PoolId AND Pool.Name='%s'";
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
+ " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId)";
-/* Get tne number of bytes in the Jobs */
+/* Get the number of bytes in the Jobs */
const char *sql_job_bytes =
"SELECT SUM(JobBytes) FROM Job WHERE JobId IN (%s)";
-
/* Get Media Ids in Pool */
const char *sql_mediaids =
"SELECT MediaId FROM Media,Pool WHERE"
/* Get JobIds in Pool longer than specified time */
const char *sql_pool_time =
- "SELECT DISTINCT Job.JobId from Pool,Job,Media,JobMedia WHERE"
+ "SELECT DISTINCT Job.JobId FROM Pool,Job,Media,JobMedia WHERE"
" Pool.Name='%s' AND Media.PoolId=Pool.PoolId AND"
- " VolStatus in ('Full','Used','Error') AND Media.Enabled=1 AND"
- " Job.Type='B' AND"
+ " VolStatus IN ('Full','Used','Error') AND Media.Enabled=1 AND"
+ " Job.Type IN ('B','C') AND Job.JobStatus IN ('T','W') AND"
" JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId"
" AND Job.RealEndTime<='%s'";
+/* Get JobIds from successfully completed backup jobs which have not been copied before */
+const char *sql_jobids_of_pool_uncopied_jobs =
+ "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
+ " WHERE Pool.Name = '%s' AND Pool.PoolId = Job.PoolId"
+ " AND Job.Type = 'B' AND Job.JobStatus IN ('T','W')"
+ " AND Job.jobBytes > 0"
+ " AND Job.JobId NOT IN"
+ " (SELECT PriorJobId FROM Job WHERE"
+ " Type IN ('B','C') AND Job.JobStatus IN ('T','W')"
+ " AND PriorJobId != 0)"
+ " ORDER by Job.StartTime";
+
/*
* const char *sql_ujobid =
* "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia "
* " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId";
*/
-
-
/*
*
* This is the central piece of code that finds a job or jobs
* 0 if no jobs to migrate
* 1 if OK and jcr->previous_jr filled in
*/
-static int get_job_to_migrate(JCR *jcr)
+static int getJob_to_migrate(JCR *jcr)
{
char ed1[30], ed2[30];
POOL_MEM query(PM_MESSAGE);
JobId_t JobId;
- DBId_t MediaId = 0;
+ DBId_t DBId = 0;
int stat;
char *p;
idpkt ids, mid, jids;
struct tm tm;
char dt[MAX_TIME_LENGTH];
int count = 0;
+ int limit = 99; /* limit + 1 is max jobs to start */
ids.list = get_pool_memory(PM_MESSAGE);
ids.list[0] = 0;
jids.list[0] = 0;
jids.count = 0;
-
/*
* If MigrateJobId is set, then we migrate only that Job,
* otherwise, we go through the full selection of jobs to
*/
if (jcr->MigrateJobId != 0) {
Dmsg1(dbglevel, "At Job start previous jobid=%u\n", jcr->MigrateJobId);
- edit_uint64(jcr->MigrateJobId, ids.list);
- ids.count = 1;
+ JobId = jcr->MigrateJobId;
} else {
switch (jcr->job->selection_type) {
case MT_JOB:
break;
case MT_SQLQUERY:
if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No Migration SQL selection pattern specified.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("No %s SQL selection pattern specified.\n"), jcr->get_OperationName());
goto bail_out;
}
Dmsg1(dbglevel, "SQL=%s\n", jcr->job->selection_pattern);
goto bail_out;
}
break;
-
case MT_POOL_OCCUPANCY:
ctx.count = 0;
/* Find count of bytes in pool */
goto bail_out;
}
if (ctx.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0));
goto ok_out;
}
pool_bytes = ctx.value;
- Dmsg2(dbglevel, "highbytes=%d pool=%d\n", (int)jcr->rpool->MigrationHighBytes,
- (int)pool_bytes);
+ Dmsg2(dbglevel, "highbytes=%lld pool=%lld\n", jcr->rpool->MigrationHighBytes,
+ pool_bytes);
if (pool_bytes < (int64_t)jcr->rpool->MigrationHighBytes) {
- Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0));
goto ok_out;
}
Dmsg0(dbglevel, "We should do Occupation migration.\n");
goto bail_out;
}
if (ids.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0));
goto ok_out;
}
Dmsg2(dbglevel, "Pool Occupancy ids=%d MediaIds=%s\n", ids.count, ids.list);
- /*
- * Now loop over MediaIds getting more JobIds to migrate until
- * we reduce the pool occupancy below the low water mark.
- */
+ if (!find_jobids_from_mediaid_list(jcr, &ids, "Volume")) {
+ goto bail_out;
+ }
+ /* ids == list of jobs */
p = ids.list;
for (int i=0; i < (int)ids.count; i++) {
- stat = get_next_dbid_from_list(&p, &MediaId);
- Dmsg2(dbglevel, "get_next_dbid stat=%d MediaId=%u\n", stat, MediaId);
+ stat = get_next_dbid_from_list(&p, &DBId);
+ Dmsg2(dbglevel, "get_next_dbid stat=%d JobId=%u\n", stat, (uint32_t)DBId);
if (stat < 0) {
- Jmsg(jcr, M_FATAL, 0, _("Invalid MediaId found.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
goto bail_out;
} else if (stat == 0) {
break;
}
+
mid.count = 1;
- Mmsg(mid.list, "%s", edit_int64(MediaId, ed1));
- if (!find_jobids_from_mediaid_list(jcr, &mid, "Volumes")) {
- continue;
- }
- if (i != 0) {
+ Mmsg(mid.list, "%s", edit_int64(DBId, ed1));
+ if (jids.count > 0) {
pm_strcat(jids.list, ",");
}
pm_strcat(jids.list, mid.list);
jids.count += mid.count;
- /* Now get the count of bytes added */
- ctx.count = 0;
/* Find count of bytes from Jobs */
Mmsg(query, sql_job_bytes, mid.list);
Dmsg1(dbglevel, "Jobbytes query: %s\n", query.c_str());
goto bail_out;
}
pool_bytes -= ctx.value;
- Dmsg1(dbglevel, "Total migrate Job bytes=%s\n", edit_int64(ctx.value, ed1));
+ Dmsg2(dbglevel, "Total %s Job bytes=%s\n", jcr->get_ActionName(0), edit_int64_with_commas(ctx.value, ed1));
Dmsg2(dbglevel, "lowbytes=%s poolafter=%s\n",
- edit_int64(jcr->rpool->MigrationLowBytes, ed1),
- edit_int64(pool_bytes, ed2));
+ edit_int64_with_commas(jcr->rpool->MigrationLowBytes, ed1),
+ edit_int64_with_commas(pool_bytes, ed2));
if (pool_bytes <= (int64_t)jcr->rpool->MigrationLowBytes) {
Dmsg0(dbglevel, "We should be done.\n");
break;
}
-
}
/* Transfer jids to ids, where the jobs list is expected */
ids.count = jids.count;
pm_strcpy(ids.list, jids.list);
Dmsg2(dbglevel, "Pool Occupancy ids=%d JobIds=%s\n", ids.count, ids.list);
break;
-
case MT_POOL_TIME:
ttime = time(NULL) - (time_t)jcr->rpool->MigrationTime;
(void)localtime_r(&ttime, &tm);
goto bail_out;
}
if (ids.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No Volumes found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No Volumes found to %s.\n"), jcr->get_ActionName(0));
goto ok_out;
}
Dmsg2(dbglevel, "PoolTime ids=%d JobIds=%s\n", ids.count, ids.list);
break;
-
+ case MT_POOL_UNCOPIED_JOBS:
+ if (!find_jobids_of_pool_uncopied_jobs(jcr, &ids)) {
+ goto bail_out;
+ }
+ break;
default:
- Jmsg(jcr, M_FATAL, 0, _("Unknown Migration Selection Type.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("Unknown %s Selection Type.\n"), jcr->get_OperationName());
goto bail_out;
}
- }
-
- /*
- * Loop over all jobids except the last one, sending
- * them to start_migration_job(), which will start a job
- * for each of them. For the last JobId, we handle it below.
- */
- p = ids.list;
- if (ids.count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
- goto ok_out;
- }
- Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s were chosen to be migrated: %s\n"),
- ids.count, ids.count==0?"":"s", ids.list);
+ /*
+ * Loop over all jobids except the last one, sending
+ * them to start_migration_job(), which will start a job
+ * for each of them. For the last JobId, we handle it below.
+ */
+ p = ids.list;
+ if (ids.count == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
- Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
- for (int i=1; i < (int)ids.count; i++) {
+ Jmsg(jcr, M_INFO, 0, _("The following %u JobId%s chosen to be %s: %s\n"),
+ ids.count, (ids.count < 2) ? _(" was") : _("s were"),
+ jcr->get_ActionName(1), ids.list);
+
+ Dmsg2(dbglevel, "Before loop count=%d ids=%s\n", ids.count, ids.list);
+ /*
+ * Note: to not over load the system, limit the number
+ * of new jobs started to 100 (see limit above)
+ */
+ for (int i=1; i < (int)ids.count; i++) {
+ JobId = 0;
+ stat = get_next_jobid_from_list(&p, &JobId);
+ Dmsg3(dbglevel, "getJobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
+ if (stat < 0) {
+ Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
+ goto bail_out;
+ } else if (stat == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
+ goto ok_out;
+ }
+ jcr->MigrateJobId = JobId;
+ /* Don't start any more when limit reaches zero */
+ limit--;
+ if (limit > 0) {
+ start_migration_job(jcr);
+ Dmsg0(dbglevel, "Back from start_migration_job\n");
+ }
+ }
+
+ /* Now get the last JobId and handle it in the current job */
JobId = 0;
stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg3(dbglevel, "get_jobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
- jcr->MigrateJobId = JobId;
- start_migration_job(jcr);
- Dmsg0(dbglevel, "Back from start_migration_job\n");
+ Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
if (stat < 0) {
Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
goto bail_out;
} else if (stat == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("No JobIds found to %s.\n"), jcr->get_ActionName(0));
goto ok_out;
}
}
-
- /* Now get the last JobId and handle it in the current job */
- JobId = 0;
- stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg2(dbglevel, "Last get_next_jobid stat=%d JobId=%u\n", stat, (int)JobId);
- if (stat < 0) {
- Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
- goto bail_out;
- } else if (stat == 0) {
- Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
- goto ok_out;
- }
jcr->previous_jr.JobId = JobId;
Dmsg1(dbglevel, "Previous jobid=%d\n", (int)jcr->previous_jr.JobId);
if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
- Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to migrate. ERR=%s"),
+ Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to %s. ERR=%s"),
edit_int64(jcr->previous_jr.JobId, ed1),
+ jcr->get_ActionName(0),
db_strerror(jcr->db));
goto bail_out;
}
- Jmsg(jcr, M_INFO, 0, _("Migration using JobId=%s Job=%s\n"),
+
+ Jmsg(jcr, M_INFO, 0, _("%s using JobId=%s Job=%s\n"),
+ jcr->get_OperationName(),
edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job);
- Dmsg3(dbglevel, "Migration JobId=%d using JobId=%s Job=%s\n",
+ Dmsg4(dbglevel, "%s JobId=%d using JobId=%s Job=%s\n",
+ jcr->get_OperationName(),
jcr->JobId,
edit_int64(jcr->previous_jr.JobId, ed1), jcr->previous_jr.Job);
count = 1;
UAContext *ua = new_ua_context(jcr);
char ed1[50];
ua->batch = true;
- Mmsg(ua->cmd, "run %s jobid=%s", jcr->job->hdr.name,
- edit_uint64(jcr->MigrateJobId, ed1));
- Dmsg1(dbglevel, "=============== Migration cmd=%s\n", ua->cmd);
+ Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes pool=\"%s\"",
+ jcr->job->name(), edit_uint64(jcr->MigrateJobId, ed1),
+ jcr->pool->name());
+ Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd);
parse_ua_args(ua); /* parse command */
- int stat = run_cmd(ua, ua->cmd);
- if (stat == 0) {
+ JobId_t jobid = run_cmd(ua, ua->cmd);
+ if (jobid == 0) {
Jmsg(jcr, M_ERROR, 0, _("Could not start migration job.\n"));
} else {
- Jmsg(jcr, M_INFO, 0, _("Migration JobId %d started.\n"), stat);
+ Jmsg(jcr, M_INFO, 0, _("%s JobId %d started.\n"), jcr->get_OperationName(), (int)jobid);
}
free_ua_context(ua);
}
goto bail_out;
}
if (ids->count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No %ss found to migrate.\n"), type);
+ Jmsg(jcr, M_INFO, 0, _("No %s found to %s.\n"), type, jcr->get_ActionName(0));
ok = true; /* Not an error */
goto bail_out;
} else if (ids->count != 1) {
- Jmsg(jcr, M_FATAL, 0, _("SQL error. Expected 1 MediaId got %d\n"),
- ids->count);
+ Jmsg(jcr, M_FATAL, 0, _("SQL error. Expected 1 MediaId got %d\n"), ids->count);
goto bail_out;
}
- Dmsg1(dbglevel, "Smallest Vol Jobids=%s\n", ids->list);
+ Dmsg2(dbglevel, "%s MediaIds=%s\n", type, ids->list);
ok = find_jobids_from_mediaid_list(jcr, ids, type);
return ok;
}
+/*
+ * This routine returns:
+ * false if an error occurred
+ * true otherwise
+ * ids.count number of jobids found (may be zero)
+ */
static bool find_jobids_from_mediaid_list(JCR *jcr, idpkt *ids, const char *type)
{
bool ok = false;
goto bail_out;
}
if (ids->count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No %ss found to migrate.\n"), type);
+ Jmsg(jcr, M_INFO, 0, _("No %ss found to %s.\n"), type, jcr->get_ActionName(0));
}
ok = true;
+
+bail_out:
+ return ok;
+}
+
+/*
+ * This routine returns:
+ * false if an error occurred
+ * true otherwise
+ * ids.count number of jobids found (may be zero)
+ */
+static bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids)
+{
+ bool ok = false;
+ POOL_MEM query(PM_MESSAGE);
+
+ /* Only a copy job is allowed */
+ if (jcr->getJobType() != JT_COPY) {
+ Jmsg(jcr, M_FATAL, 0,
+ _("Selection Type 'pooluncopiedjobs' only applies to Copy Jobs"));
+ goto bail_out;
+ }
+
+ Dmsg1(dbglevel, "copy selection pattern=%s\n", jcr->rpool->name());
+ Mmsg(query, sql_jobids_of_pool_uncopied_jobs, jcr->rpool->name());
+ Dmsg1(dbglevel, "get uncopied jobs query=%s\n", query.c_str());
+ if (!db_sql_query(jcr->db, query.c_str(), unique_dbid_handler, (void *)ids)) {
+ Jmsg(jcr, M_FATAL, 0,
+ _("SQL to get uncopied jobs failed. ERR=%s\n"), db_strerror(jcr->db));
+ goto bail_out;
+ }
+ ok = true;
+
bail_out:
return ok;
}
item_chain = New(dlist(item, &item->link));
if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No Migration %s selection pattern specified.\n"),
- type);
+ Jmsg(jcr, M_FATAL, 0, _("No %s %s selection pattern specified.\n"),
+ jcr->get_OperationName(), type);
goto bail_out;
}
Dmsg1(dbglevel, "regex-sel-pattern=%s\n", jcr->job->selection_pattern);
}
Dmsg1(dbglevel, "query1 returned %d names\n", item_chain->size());
if (item_chain->size() == 0) {
- Jmsg(jcr, M_INFO, 0, _("Query of Pool \"%s\" returned no Jobs to migrate.\n"),
- jcr->rpool->name());
+ Jmsg(jcr, M_INFO, 0, _("Query of Pool \"%s\" returned no Jobs to %s.\n"),
+ jcr->rpool->name(), jcr->get_ActionName(0));
ok = true;
goto bail_out; /* skip regex match */
} else {
regfree(&preg);
}
if (item_chain->size() == 0) {
- Jmsg(jcr, M_INFO, 0, _("Regex pattern matched no Jobs to migrate.\n"));
+ Jmsg(jcr, M_INFO, 0, _("Regex pattern matched no Jobs to %s.\n"), jcr->get_ActionName(0));
ok = true;
goto bail_out; /* skip regex match */
}
}
}
if (ids->count == 0) {
- Jmsg(jcr, M_INFO, 0, _("No %ss found to migrate.\n"), type);
+ Jmsg(jcr, M_INFO, 0, _("No %ss found to %s.\n"), type, jcr->get_ActionName(0));
}
ok = true;
return ok;
}
-
/*
* Release resources allocated during backup.
*/
Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode);
update_job_end(jcr, TermCode);
- memset(&mr, 0, sizeof(mr));
/*
* Check if we actually did something.
* mig_jcr is jcr of the newly migrated job.
*/
if (mig_jcr) {
+ char old_jobid[50], new_jobid[50];
+
+ edit_uint64(jcr->previous_jr.JobId, old_jobid);
+ edit_uint64(mig_jcr->jr.JobId, new_jobid);
+
mig_jcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
mig_jcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
mig_jcr->VolSessionId = jcr->VolSessionId;
"JobTDate=%s WHERE JobId=%s",
jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
edit_uint64(jcr->previous_jr.JobTDate, ec1),
- edit_uint64(mig_jcr->jr.JobId, ec2));
+ new_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
- /* Now mark the previous job as migrated if it terminated normally */
- if (jcr->JobStatus == JS_Terminated) {
+ /*
+ * If we terminated a migration normally:
+ * - mark the previous job as migrated
+ * - move any Log records to the new JobId
+ * - Purge the File records from the previous job
+ */
+ if (jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
- (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
+ (char)JT_MIGRATED_JOB, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+ UAContext *ua = new_ua_context(jcr);
+ /* Move JobLog to new JobId */
+ Mmsg(query, "UPDATE Log SET JobId=%s WHERE JobId=%s",
+ new_jobid, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+
+ if (jcr->job->PurgeMigrateJob) {
+ /* Purge old Job record */
+ purge_jobs_from_catalog(ua, old_jobid);
+ } else {
+ /* Purge all old file records, but leave Job record */
+ purge_files_from_jobs(ua, old_jobid);
+ }
+
+ free_ua_context(ua);
+ }
+
+ /*
+ * If we terminated a Copy (rather than a Migration) normally:
+ * - copy any Log records to the new JobId
+ * - set type="Job Copy" for the new job
+ */
+ if (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
+ /* Copy JobLog to new JobId */
+ Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) "
+ "SELECT %s, Time, LogText FROM Log WHERE JobId=%s",
+ new_jobid, old_jobid);
+ db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
+ Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
+ (char)JT_JOB_COPY, new_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
}
if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
- }
-
- bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
- if (!db_get_media_record(jcr, jcr->db, &mr)) {
- Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
- mr.VolumeName, db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
update_bootstrap_file(mig_jcr);
}
mig_jcr->VolumeName[0] = 0; /* none */
}
+
+ if (mig_jcr->VolumeName[0]) {
+ /* Find last volume name. Multiple vols are separated by | */
+ char *p = strrchr(mig_jcr->VolumeName, '|');
+ if (p) {
+ p++; /* skip | */
+ } else {
+ p = mig_jcr->VolumeName; /* no |, take full name */
+ }
+ bstrncpy(mr.VolumeName, p, sizeof(mr.VolumeName));
+ if (!db_get_media_record(jcr, jcr->db, &mr)) {
+ Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
+ mr.VolumeName, db_strerror(jcr->db));
+ }
+ }
+
switch (jcr->JobStatus) {
case JS_Terminated:
- if (jcr->Errors || jcr->SDErrors) {
+ if (jcr->JobErrors || jcr->SDErrors) {
term_msg = _("%s OK -- with warnings");
} else {
term_msg = _("%s OK");
term_msg = _("Inappropriate %s term code");
break;
}
- } else {
- if (jcr->previous_jr.JobId != 0) {
- /* Mark previous job as migrated */
- Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
- (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
- Dmsg1(000, "Mark: %s\n", query.c_str());
- db_sql_query(jcr->db, query.c_str(), NULL, NULL);
- }
- term_msg = _("%s -- no files to migrate");
- }
-
- bsnprintf(term_code, sizeof(term_code), term_msg, "Migration");
+ } else {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
+ /* Mark previous job as migrated */
+ Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
+ (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
+ db_sql_query(jcr->db, query.c_str(), NULL, NULL);
+ }
+ term_msg = _("%s -- no files to %s");
+ }
+
+ bsnprintf(term_code, sizeof(term_code), term_msg, jcr->get_OperationName(), jcr->get_ActionName(0));
bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
kbps = (double)jcr->SDJobBytes / (1000 * RunTime);
}
-
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
- Jmsg(jcr, msg_type, 0, _("Bacula %s %s (%s): %s\n"
+ Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n"
" Build OS: %s %s %s\n"
" Prev Backup JobId: %s\n"
+" Prev Backup Job: %s\n"
" New Backup JobId: %s\n"
-" Migration JobId: %s\n"
-" Migration Job: %s\n"
+" Current JobId: %s\n"
+" Current Job: %s\n"
" Backup Level: %s%s\n"
" Client: %s\n"
" FileSet: \"%s\" %s\n"
" Read Storage: \"%s\" (From %s)\n"
" Write Pool: \"%s\" (From %s)\n"
" Write Storage: \"%s\" (From %s)\n"
+" Catalog: \"%s\" (From %s)\n"
" Start time: %s\n"
" End time: %s\n"
" Elapsed time: %s\n"
" SD Errors: %d\n"
" SD termination status: %s\n"
" Termination: %s\n\n"),
- my_name, VERSION, LSMDATE, edt,
+ BACULA, my_name, VERSION, LSMDATE,
HOST_OS, DISTNAME, DISTVER,
edit_uint64(jcr->previous_jr.JobId, ec6),
+ jcr->previous_jr.Job,
mig_jcr ? edit_uint64(mig_jcr->jr.JobId, ec7) : "0",
edit_uint64(jcr->jr.JobId, ec8),
jcr->jr.Job,
- level_to_str(jcr->JobLevel), jcr->since,
+ level_to_str(jcr->getJobLevel()), jcr->since,
jcr->client->name(),
jcr->fileset->name(), jcr->FSCreateTime,
jcr->rpool->name(), jcr->rpool_source,
jcr->pool->name(), jcr->pool_source,
jcr->wstore?jcr->wstore->name():"*None*",
NPRT(jcr->wstore_source),
+ jcr->catalog->name(), jcr->catalog_source,
sdt,
edt,
edit_utime(RunTime, elapsed, sizeof(elapsed)),
*/
static int get_next_dbid_from_list(char **p, DBId_t *DBId)
{
- char id[30];
+ const int maxlen = 30;
+ char id[maxlen+1];
char *q = *p;
id[0] = 0;
- for (int i=0; i<(int)sizeof(id); i++) {
+ for (int i=0; i<maxlen; i++) {
if (*q == 0) {
break;
} else if (*q == ',') {