bool CancelQueuedDuplicates; /* Cancel queued jobs */
bool CancelRunningDuplicates; /* Cancel Running jobs */
bool PurgeMigrateJob; /* Purges source job on completion */
+ bool IgnoreDuplicateJobChecking; /* Ignore Duplicate Job Checking */
alist *base; /* Base jobs */
{
JOB *job = jcr->job;
JCR *djcr; /* possible duplicate job */
+ bool cancel_dup = false;
+ bool cancel_me = false;
- if (jcr->no_check_duplicates || job->AllowDuplicateJobs) {
+ /*
+ * See if AllowDuplicateJobs is set or
+ * if duplicate checking is disabled for this job.
+ */
+ if (job->AllowDuplicateJobs || job->IgnoreDuplicateJobChecking) {
return true;
}
+
Dmsg0(800, "Enter allow_duplicate_job\n");
+
/*
* After this point, we do not want to allow any duplicate
* job to run.
if (jcr == djcr || djcr->JobId == 0) {
continue; /* do not cancel this job or consoles */
}
+
+ /*
+ * See if this Job has the IgnoreDuplicateJobChecking flag set, ignore it for any
+ * checking against other jobs.
+ */
+ if (djcr->job && djcr->job->IgnoreDuplicateJobChecking) {
+ continue;
+ }
+
if (strcmp(job->name(), djcr->job->name()) == 0) {
- bool cancel_dup = false;
- bool cancel_me = false;
if (job->DuplicateJobProximity > 0) {
utime_t now = (utime_t)time(NULL);
if ((now - djcr->start_time) > job->DuplicateJobProximity) {
djcr->JobId);
break; /* get out of foreach_jcr */
}
- }
- /* Cancel one of the two jobs (me or dup) */
- /* If CancelQueuedDuplicates is set do so only if job is queued */
+ }
+
+ /*
+ * Cancel one of the two jobs (me or dup)
+ * If CancelQueuedDuplicates is set do so only if job is queued.
+ */
if (job->CancelQueuedDuplicates) {
switch (djcr->JobStatus) {
case JS_Created:
break;
}
}
+
if (cancel_dup || job->CancelRunningDuplicates) {
- /* Zap the duplicated job djcr */
+ /*
+ * Zap the duplicated job djcr
+ */
UAContext *ua = new_ua_context(jcr);
Jmsg(jcr, M_INFO, 0, _("Cancelling duplicate JobId=%d.\n"), djcr->JobId);
cancel_job(ua, djcr);
free_ua_context(ua);
Dmsg2(800, "Cancel dup %p JobId=%d\n", djcr, djcr->JobId);
} else {
- /* Zap current job */
+ /*
+ * Zap current job
+ */
Jmsg(jcr, M_FATAL, 0, _("JobId %d already running. Duplicate job not allowed.\n"),
djcr->JobId);
Dmsg2(800, "Cancel me %p JobId=%d\n", jcr, jcr->JobId);
/* Don't let WatchDog checks Max*Time value on this Job */
mig_jcr->no_maxtime = true;
- /* Don't check for duplicates on migration and copy jobs */
- mig_jcr->no_check_duplicates = true;
+ /*
+ * Don't check for duplicates on migration and copy jobs
+ */
+ mig_jcr->job->IgnoreDuplicateJobChecking = true;
Dmsg4(dbglevel, "mig_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
mig_jcr->jr.Name, (int)mig_jcr->jr.JobId,
UAContext *ua = new_ua_context(jcr);
char ed1[50];
ua->batch = true;
- Mmsg(ua->cmd, "run job=\"%s\" jobid=%s allowduplicates=yes", jcr->job->name(),
+ Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes", jcr->job->name(),
edit_uint64(jcr->MigrateJobId, ed1));
Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd);
parse_ua_args(ua); /* parse command */
bool mod;
int spool_data;
bool spool_data_set;
- int allow_duplicates;
- bool allow_duplicates_set;
+ int ignoreduplicatecheck;
+ bool ignoreduplicatecheck_set;
/* Methods */
run_ctx() { memset(this, 0, sizeof(run_ctx));
"pluginoptions", /* 25 */
"spooldata", /* 26 */
"comment", /* 27 */
- "allowduplicates", /* 28 */
+ "ignoreduplicatecheck", /* 28 */
NULL
};
rc.verify_job_name = NULL;
rc.previous_job_name = NULL;
rc.spool_data_set = false;
- rc.allow_duplicates_set = false;
+ rc.ignoreduplicatecheck = false;
rc.comment = NULL;
for (i=1; i<ua->argc; i++) {
rc.comment = ua->argv[i];
kw_ok = true;
break;
- case 28: /* allowduplicates */
- if (rc.allow_duplicates_set) {
- ua->send_msg(_("AllowDuplicates flag specified twice.\n"));
+ case 28: /* ignoreduplicatecheck */
+ if (rc.ignoreduplicatecheck_set) {
+ ua->send_msg(_("IgnoreDuplicateCheck flag specified twice.\n"));
return false;
}
- if (is_yesno(ua->argv[i], &rc.allow_duplicates)) {
- rc.allow_duplicates_set = true;
+ if (is_yesno(ua->argv[i], &rc.ignoreduplicatecheck)) {
+ rc.ignoreduplicatecheck_set = true;
kw_ok = true;
} else {
- ua->send_msg(_("Invalid allowduplicates flag.\n"));
+ ua->send_msg(_("Invalid ignoreduplicatecheck flag.\n"));
}
break;
default:
}
Dmsg1(900, "Spooling data: %s\n", (rc.job->spool_data ? "Yes" : "No"));
- if (rc.allow_duplicates_set) {
- rc.job->AllowDuplicateJobs = rc.allow_duplicates;
+ if (rc.ignoreduplicatecheck) {
+ rc.job->IgnoreDuplicateJobChecking = rc.ignoreduplicatecheck;
}
- Dmsg1(900, "Allow Duplicate Jobs: %s\n", (rc.job->AllowDuplicateJobs ? "Yes" : "No"));
+ Dmsg1(900, "Ignore Duplicate Job Check: %s\n", (rc.job->IgnoreDuplicateJobChecking ? "Yes" : "No"));
if (rc.store_name) {
rc.store->store = GetStoreResWithName(rc.store_name);
bool Encrypt; /* Encryption used by FD */
bool stats_enabled; /* Keep all job records in a table for long term statistics */
bool no_maxtime; /* Don't check Max*Time for this JCR */
- bool no_check_duplicates; /* Don't check duplicates for this JCR */
bool keep_sd_auth_key; /* Clear or not the SD auth key after connection*/
bool use_accurate_chksum; /* Use or not checksum option in accurate code */
bool run_pool_override;