#include "bacula.h"
#include "dird.h"
#include "ua.h"
+#ifndef HAVE_REGEX_H
+#include "lib/bregex.h"
+#else
#include <regex.h>
+#endif
static char OKbootstrap[] = "3000 OK bootstrap\n";
static bool get_job_to_migrate(JCR *jcr);
{
POOL_DBR pr;
+ /* If we find a job to migrate it is previous_jr.JobId */
if (!get_job_to_migrate(jcr)) {
return false;
}
POOL *pool;
char ed1[100];
BSOCK *sd;
- JOB *job, *tjob;
- JCR *tjcr;
+ JOB *job, *prev_job;
+ JCR *prev_jcr;
if (jcr->previous_jr.JobId == 0) {
- jcr->JobStatus = JS_Terminated;
+ set_jcr_job_status(jcr, JS_Terminated);
migration_cleanup(jcr, jcr->JobStatus);
return true; /* no work */
}
- Dmsg4(100, "Target: Name=%s JobId=%d Type=%c Level=%c\n",
+
+ Dmsg4(000, "Previous:: Name=%s JobId=%d Type=%c Level=%c\n",
jcr->previous_jr.Name, jcr->previous_jr.JobId,
jcr->previous_jr.JobType, jcr->previous_jr.JobLevel);
- Dmsg4(100, "Current: Name=%s JobId=%d Type=%c Level=%c\n",
+ Dmsg4(000, "Current: Name=%s JobId=%d Type=%c Level=%c\n",
jcr->jr.Name, jcr->jr.JobId,
jcr->jr.JobType, jcr->jr.JobLevel);
LockRes();
job = (JOB *)GetResWithName(R_JOB, jcr->jr.Name);
- tjob = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name);
+ prev_job = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name);
UnlockRes();
- if (!job || !tjob) {
+ if (!job || !prev_job) {
return false;
}
/*
- * Target jcr is the new Job that corresponds to the original
- * target job. It "runs" at the same time as the current
+ * prev_jcr is the new Job that corresponds to the original
+ * job. It "runs" at the same time as the current
* migration job and becomes a new backup job that replaces
* the original backup job. Most operations on the current
- * migration jcr are also done on the target jcr.
+ * migration jcr are also done on the prev_jcr.
*/
- tjcr = jcr->previous_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
- memcpy(&tjcr->previous_jr, &jcr->previous_jr, sizeof(tjcr->previous_jr));
+ prev_jcr = jcr->previous_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
+ memcpy(&prev_jcr->previous_jr, &jcr->previous_jr, sizeof(prev_jcr->previous_jr));
- /* Turn the tjcr into a "real" job */
- set_jcr_defaults(tjcr, tjob);
- if (!setup_job(tjcr)) {
+ /* Turn the prev_jcr into a "real" job */
+ set_jcr_defaults(prev_jcr, prev_job);
+ if (!setup_job(prev_jcr)) {
return false;
}
/* Set output PoolId and FileSetId. */
- tjcr->jr.PoolId = jcr->jr.PoolId;
- tjcr->jr.FileSetId = jcr->jr.FileSetId;
+ prev_jcr->jr.PoolId = jcr->jr.PoolId;
+ prev_jcr->jr.FileSetId = jcr->jr.FileSetId;
/*
* Get the PoolId used with the original job. Then
* find the pool name from the database record.
*/
memset(&pr, 0, sizeof(pr));
- pr.PoolId = tjcr->previous_jr.PoolId;
+ pr.PoolId = prev_jcr->previous_jr.PoolId;
if (!db_get_pool_record(jcr, jcr->db, &pr)) {
Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
/* ***FIXME*** */
/* If pool storage specified, use it for restore */
- copy_storage(tjcr, pool->storage);
+ copy_storage(prev_jcr, pool->storage);
/* If the original backup pool has a NextPool, make sure a
* record exists in the database.
* put the "NextPool" resource pointer in our jcr so that we
* can pull the Storage reference from it.
*/
- tjcr->pool = jcr->pool = pool->NextPool;
- tjcr->jr.PoolId = jcr->jr.PoolId = pr.PoolId;
+ prev_jcr->pool = jcr->pool = pool->NextPool;
+ prev_jcr->jr.PoolId = jcr->jr.PoolId = pr.PoolId;
}
/* If pool storage specified, use it instead of job storage for backup */
edit_uint64(jcr->JobId, ed1), jcr->Job);
set_jcr_job_status(jcr, JS_Running);
- set_jcr_job_status(tjcr, JS_Running);
- Dmsg2(100, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel);
+ set_jcr_job_status(prev_jcr, JS_Running);
+ Dmsg2(000, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel);
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
return false;
}
- if (!db_update_job_start_record(tjcr, tjcr->db, &tjcr->jr)) {
- Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(tjcr->db));
+ if (!db_update_job_start_record(prev_jcr, prev_jcr->db, &prev_jcr->jr)) {
+ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(prev_jcr->db));
return false;
}
*/
Dmsg0(110, "Open connection with storage daemon\n");
set_jcr_job_status(jcr, JS_WaitSD);
- set_jcr_job_status(tjcr, JS_WaitSD);
+ set_jcr_job_status(prev_jcr, JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
* Now start a job with the Storage daemon
*/
Dmsg2(000, "Read store=%s, write store=%s\n",
- ((STORE *)tjcr->storage->first())->hdr.name,
+ ((STORE *)prev_jcr->storage->first())->hdr.name,
((STORE *)jcr->storage->first())->hdr.name);
- if (!start_storage_daemon_job(jcr, tjcr->storage, jcr->storage)) {
+ if (!start_storage_daemon_job(jcr, prev_jcr->storage, jcr->storage)) {
return false;
}
Dmsg0(150, "Storage daemon connection OK\n");
}
set_jcr_job_status(jcr, JS_Running);
- set_jcr_job_status(tjcr, JS_Running);
+ set_jcr_job_status(prev_jcr, JS_Running);
/* Pickup Job termination data */
/* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
wait_for_storage_daemon_termination(jcr);
- jcr->JobStatus = jcr->SDJobStatus;
+ set_jcr_job_status(jcr, jcr->SDJobStatus);
if (jcr->JobStatus == JS_Terminated) {
migration_cleanup(jcr, jcr->JobStatus);
return true;
memset(new_item, 0, sizeof(uitem));
new_item->item = bstrdup(row[0]);
-
+ Dmsg1(000, "Item=%s\n", row[0]);
item = (uitem *)list->binary_insert((void *)new_item, item_compare);
if (item != new_item) { /* already in list */
free(new_item->item);
" Job.PoolId=Media.PoolId";
const char *sql_job =
- "SELECT DISTINCT Job.Name from Pool,Media,Job,JobMedia "
- " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND"
- " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId";
+ "SELECT DISTINCT Job.Name from Job,Pool"
+ " WHERE Pool.Name='%s' AND Job.PoolId=Pool.PoolId";
+
+const char *sql_jobids_from_job =
+ "SELECT DISTINCT Job.JobId FROM Job,Pool"
+ " WHERE Job.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
+ " ORDER by Job.StartTime";
+
const char *sql_ujobid =
"SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia "
int stat, rc;
char *p;
dlist *item_chain;
- uitem *item;
+ uitem *item = NULL;
+ uitem *last_item = NULL;
char prbuf[500];
regex_t preg;
+ JobIds[0] = 0;
if (jcr->MigrateJobId != 0) {
jcr->previous_jr.JobId = jcr->MigrateJobId;
+ Dmsg1(000, "previous jobid=%u\n", jcr->MigrateJobId);
} else {
switch (jcr->job->selection_type) {
+ case MT_JOB:
+ if (!jcr->job->selection_pattern) {
+ Jmsg(jcr, M_FATAL, 0, _("No Migration Job selection pattern specified.\n"));
+ goto bail_out;
+ }
+ Dmsg1(000, "Job regex=%s\n", jcr->job->selection_pattern);
+ /* Complie regex expression */
+ rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
+ if (rc != 0) {
+ regerror(rc, &preg, prbuf, sizeof(prbuf));
+ Jmsg(jcr, M_FATAL, 0, _("Could not compile regex pattern \"%s\" ERR=%s\n"),
+ jcr->job->selection_pattern, prbuf);
+ goto bail_out;
+ }
+ item_chain = New(dlist(item, &item->link));
+ /* Basic query for Job names */
+ Mmsg(query, sql_job, jcr->pool->hdr.name);
+ Dmsg1(000, "query=%s\n", query.c_str());
+ if (!db_sql_query(jcr->db, query.c_str(), unique_name_handler,
+ (void *)item_chain)) {
+ Jmsg(jcr, M_FATAL, 0,
+ _("SQL to get Job failed. ERR=%s\n"), db_strerror(jcr->db));
+ goto bail_out;
+ }
+ /* Now apply the regex to the job names and remove any item not matched */
+ foreach_dlist(item, item_chain) {
+ const int nmatch = 30;
+ regmatch_t pmatch[nmatch];
+ if (last_item) {
+ Dmsg1(000, "Remove item %s\n", last_item->item);
+ free(last_item->item);
+ item_chain->remove(last_item);
+ }
+ Dmsg1(000, "Jobitem=%s\n", item->item);
+ rc = regexec(&preg, item->item, nmatch, pmatch, 0);
+ if (rc == 0) {
+ last_item = NULL; /* keep this one */
+ } else {
+ last_item = item;
+ }
+ }
+ if (last_item) {
+ free(last_item->item);
+ Dmsg1(000, "Remove item %s\n", last_item->item);
+ item_chain->remove(last_item);
+ }
+ regfree(&preg);
+ /*
+ * At this point, we have a list of items in item_chain
+ * that have been matched by the regex, so now we need
+ * to look up their jobids.
+ */
+ JobIds[0] = 0;
+ foreach_dlist(item, item_chain) {
+ Dmsg1(000, "Got Job: %s\n", item->item);
+ Mmsg(query, sql_jobids_from_job, item->item, jcr->pool->hdr.name);
+ if (!db_sql_query(jcr->db, query.c_str(), jobid_handler, (void *)JobIds)) {
+ Jmsg(jcr, M_FATAL, 0,
+ _("SQL failed. ERR=%s\n"), db_strerror(jcr->db));
+ goto bail_out;
+ }
+ }
+ if (JobIds[0] == 0) {
+ Jmsg(jcr, M_INFO, 0, _("No jobs found to migrate.\n"));
+ goto ok_out;
+ }
+ Dmsg1(000, "Job Jobids=%s\n", JobIds);
+ delete item_chain;
+ break;
case MT_SMALLEST_VOL:
Mmsg(query, sql_smallest_vol, jcr->pool->hdr.name);
- JobIds = get_pool_memory(PM_MESSAGE);
JobIds[0] = 0;
if (!db_sql_query(jcr->db, query.c_str(), jobid_handler, (void *)JobIds)) {
Jmsg(jcr, M_FATAL, 0,
_("SQL to get Volume failed. ERR=%s\n"), db_strerror(jcr->db));
goto bail_out;
}
- Dmsg1(000, "Jobids=%s\n", JobIds);
+ Dmsg1(000, "Smallest Vol Jobids=%s\n", JobIds);
break;
case MT_OLDEST_VOL:
Mmsg(query, sql_oldest_vol, jcr->pool->hdr.name);
- JobIds = get_pool_memory(PM_MESSAGE);
JobIds[0] = 0;
if (!db_sql_query(jcr->db, query.c_str(), jobid_handler, (void *)JobIds)) {
Jmsg(jcr, M_FATAL, 0,
_("SQL to get Volume failed. ERR=%s\n"), db_strerror(jcr->db));
goto bail_out;
}
- Dmsg1(000, "Jobids=%s\n", JobIds);
+ Dmsg1(000, "Oldest Vol Jobids=%s\n", JobIds);
break;
case MT_POOL_OCCUPANCY:
Mmsg(query, sql_pool_bytes, jcr->pool->hdr.name);
- JobIds = get_pool_memory(PM_MESSAGE);
JobIds[0] = 0;
if (!db_sql_query(jcr->db, query.c_str(), jobid_handler, (void *)JobIds)) {
Jmsg(jcr, M_FATAL, 0,
Jmsg(jcr, M_INFO, 0, _("No jobs found to migrate.\n"));
goto ok_out;
}
+ Dmsg1(000, "Pool Occupancy Jobids=%s\n", JobIds);
break;
case MT_POOL_TIME:
+ Dmsg0(000, "Pool time not implemented\n");
break;
case MT_CLIENT:
if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No selection pattern specified.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("No Migration Client selection pattern specified.\n"));
goto bail_out;
}
+ Dmsg1(000, "Client regex=%s\n", jcr->job->selection_pattern);
rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
if (rc != 0) {
regerror(rc, &preg, prbuf, sizeof(prbuf));
break;
case MT_VOLUME:
if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No selection pattern specified.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("No Migration Volume selection pattern specified.\n"));
goto bail_out;
}
+ Dmsg1(000, "Volume regex=%s\n", jcr->job->selection_pattern);
rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
if (rc != 0) {
regerror(rc, &preg, prbuf, sizeof(prbuf));
regfree(&preg);
delete item_chain;
break;
- case MT_JOB:
- if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No selection pattern specified.\n"));
- goto bail_out;
- }
- rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
- if (rc != 0) {
- regerror(rc, &preg, prbuf, sizeof(prbuf));
- Jmsg(jcr, M_FATAL, 0, _("Could not compile regex pattern \"%s\" ERR=%s\n"),
- jcr->job->selection_pattern, prbuf);
- }
- item_chain = New(dlist(item, &item->link));
- Mmsg(query, sql_job, jcr->pool->hdr.name);
- Dmsg1(100, "query=%s\n", query.c_str());
- if (!db_sql_query(jcr->db, query.c_str(), unique_name_handler,
- (void *)item_chain)) {
- Jmsg(jcr, M_FATAL, 0,
- _("SQL to get Job failed. ERR=%s\n"), db_strerror(jcr->db));
- goto bail_out;
- }
- /* Now apply the regex and create the jobs */
- foreach_dlist(item, item_chain) {
- const int nmatch = 30;
- regmatch_t pmatch[nmatch];
- rc = regexec(&preg, item->item, nmatch, pmatch, 0);
- if (rc == 0) {
- Dmsg1(000, "Do Job=%s\n", item->item);
- }
- free(item->item);
- }
- regfree(&preg);
- delete item_chain;
- break;
case MT_SQLQUERY:
JobIds[0] = 0;
if (!jcr->job->selection_pattern) {
- Jmsg(jcr, M_FATAL, 0, _("No selection pattern specified.\n"));
+ Jmsg(jcr, M_FATAL, 0, _("No Migration SQL selection pattern specified.\n"));
goto bail_out;
}
+ Dmsg1(000, "SQL=%s\n", jcr->job->selection_pattern);
if (!db_sql_query(jcr->db, query.c_str(), jobid_handler, (void *)JobIds)) {
Jmsg(jcr, M_FATAL, 0,
_("SQL to get Volume failed. ERR=%s\n"), db_strerror(jcr->db));
}
p = JobIds;
+ JobId = 0;
stat = get_next_jobid_from_list(&p, &JobId);
Dmsg2(000, "get_next_jobid stat=%d JobId=%u\n", stat, JobId);
if (stat < 0) {
MEDIA_DBR mr;
double kbps;
utime_t RunTime;
- JCR *tjcr = jcr->previous_jcr;
+ JCR *prev_jcr = jcr->previous_jcr;
POOL_MEM query(PM_MESSAGE);
- /* Ensure target is defined to avoid a lot of testing */
- if (!tjcr) {
- tjcr = jcr;
- }
- tjcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
- tjcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
- tjcr->VolSessionId = jcr->VolSessionId;
- tjcr->VolSessionTime = jcr->VolSessionTime;
-
Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode);
dequeue_messages(jcr); /* display any queued messages */
memset(&mr, 0, sizeof(mr));
set_jcr_job_status(jcr, TermCode);
- set_jcr_job_status(tjcr, TermCode);
+ update_job_end_record(jcr); /* update database */
+ /* Check if we actually did something */
+ if (prev_jcr) {
+ prev_jcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
+ prev_jcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
+ prev_jcr->VolSessionId = jcr->VolSessionId;
+ prev_jcr->VolSessionTime = jcr->VolSessionTime;
- update_job_end_record(jcr); /* update database */
- update_job_end_record(tjcr);
-
- Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
- "JobTDate=%s WHERE JobId=%s",
- jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
- edit_uint64(jcr->previous_jr.JobTDate, ec1),
- edit_uint64(tjcr->jr.JobId, ec2));
- db_sql_query(tjcr->db, query.c_str(), NULL, NULL);
-
- if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
- Jmsg(jcr, M_WARNING, 0, _("Error getting job record for stats: %s"),
- db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
- }
+ set_jcr_job_status(prev_jcr, TermCode);
- bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
- if (!db_get_media_record(jcr, jcr->db, &mr)) {
- Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
- mr.VolumeName, db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
- }
+ update_job_end_record(prev_jcr);
+
+ Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
+ "JobTDate=%s WHERE JobId=%s",
+ jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
+ edit_uint64(jcr->previous_jr.JobTDate, ec1),
+ edit_uint64(prev_jcr->jr.JobId, ec2));
+ db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL);
- update_bootstrap_file(tjcr);
+ if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
+ Jmsg(jcr, M_WARNING, 0, _("Error getting job record for stats: %s"),
+ db_strerror(jcr->db));
+ set_jcr_job_status(jcr, JS_ErrorTerminated);
+ }
+
+ bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
+ if (!db_get_media_record(jcr, jcr->db, &mr)) {
+ Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
+ mr.VolumeName, db_strerror(jcr->db));
+ set_jcr_job_status(jcr, JS_ErrorTerminated);
+ }
+
+ update_bootstrap_file(prev_jcr);
+
+ if (!db_get_job_volume_names(prev_jcr, prev_jcr->db, prev_jcr->jr.JobId, &prev_jcr->VolumeName)) {
+ /*
+ * Note, if the job has erred, most likely it did not write any
+ * tape, so suppress this "error" message since in that case
+ * it is normal. Or look at it the other way, only for a
+ * normal exit should we complain about this error.
+ */
+ if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
+ Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(prev_jcr->db));
+ }
+ prev_jcr->VolumeName[0] = 0; /* none */
+ }
+ }
msg_type = M_INFO; /* by default INFO message */
switch (jcr->JobStatus) {
if (RunTime <= 0) {
kbps = 0;
} else {
- kbps = (double)jcr->jr.JobBytes / (1000 * RunTime);
- }
- if (!db_get_job_volume_names(tjcr, tjcr->db, tjcr->jr.JobId, &tjcr->VolumeName)) {
- /*
- * Note, if the job has erred, most likely it did not write any
- * tape, so suppress this "error" message since in that case
- * it is normal. Or look at it the other way, only for a
- * normal exit should we complain about this error.
- */
- if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
- Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(tjcr->db));
- }
- tjcr->VolumeName[0] = 0; /* none */
+ kbps = (double)jcr->SDJobBytes / (1000 * RunTime);
}
+
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
// bmicrosleep(15, 0); /* for debugging SIGHUP */
VERSION,
LSMDATE,
edt,
- jcr->previous_jr.JobId,
- tjcr->jr.JobId,
+ prev_jcr ? jcr->previous_jr.JobId : 0,
+ prev_jcr ? prev_jcr->jr.JobId : 0,
jcr->jr.JobId,
jcr->jr.Job,
level_to_str(jcr->JobLevel), jcr->since,
jcr->JobPriority,
edit_uint64_with_commas(jcr->SDJobFiles, ec1),
edit_uint64_with_commas(jcr->SDJobBytes, ec2),
- edit_uint64_with_suffix(jcr->jr.JobBytes, ec3),
+ edit_uint64_with_suffix(jcr->SDJobBytes, ec3),
(float)kbps,
- tjcr->VolumeName,
+ prev_jcr ? prev_jcr->VolumeName : "",
jcr->VolSessionId,
jcr->VolSessionTime,
edit_uint64_with_commas(mr.VolBytes, ec4),
sd_term_msg,
term_code);
- Dmsg1(100, "Leave migrate_cleanup() previous_jcr=0x%x\n", jcr->previous_jcr);
+ Dmsg1(000, "migrate_cleanup() previous_jcr=0x%x\n", jcr->previous_jcr);
if (jcr->previous_jcr) {
- free_jcr(jcr->previous_jcr);
+// free_jcr(jcr->previous_jcr);
+// jcr->previous_jcr = NULL;
}
+ Dmsg0(000, "Leave migrate_cleanup()\n");
}