pool = jcr->pool;
if (jcr->job->PruneJobs || jcr->client->AutoPrune) {
- prune_jobs(ua, client, pool, jcr->get_JobType());
+ prune_jobs(ua, client, pool, jcr->getJobType());
pruned = true;
} else {
pruned = false;
bool do_backup_init(JCR *jcr)
{
- if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
+ if (jcr->getJobLevel() == L_VIRTUAL_FULL) {
return do_vbackup_init(jcr);
}
free_rstorage(jcr); /* we don't read so release */
for (char *k=fopts->opts; *k ; k++) { /* Try to find one request */
switch (*k) {
case 'V': /* verify */
- in_block = (jcr->get_JobType() == JT_VERIFY); /* not used now */
+ in_block = (jcr->getJobType() == JT_VERIFY); /* not used now */
break;
case 'J': /* Basejob keyword */
have_basejob_option = in_block = jcr->HasBase;
break;
case 'C': /* Accurate keyword */
- in_block = (jcr->get_JobLevel() != L_FULL);
+ in_block = (jcr->getJobLevel() != L_FULL);
break;
case ':': /* End of keyword */
in_block = false;
return true;
}
/* In base level, no previous job is used */
- if (jcr->get_JobLevel() == L_BASE) {
+ if (jcr->getJobLevel() == L_BASE) {
return true;
}
- if (jcr->get_JobLevel() == L_FULL) {
+ if (jcr->getJobLevel() == L_FULL) {
/* On Full mode, if no previous base job, no accurate things */
if (!get_base_jobids(jcr, &jobids)) {
goto bail_out;
STORE *store;
char ed1[100];
- if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
+ if (jcr->getJobLevel() == L_VIRTUAL_FULL) {
return do_vbackup(jcr);
}
Jmsg(jcr, M_INFO, 0, _("Start Backup JobId %s, Job=%s\n"),
edit_uint64(jcr->JobId, ed1), jcr->Job);
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
Dmsg2(100, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel);
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
*
*/
Dmsg0(110, "Open connection with storage daemon\n");
- set_jcr_job_status(jcr, JS_WaitSD);
+ jcr->setJobStatus(JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
}
Dmsg0(150, "Storage daemon connection OK\n");
- set_jcr_job_status(jcr, JS_WaitFD);
+ jcr->setJobStatus(JS_WaitFD);
if (!connect_to_file_daemon(jcr, 10, FDConnectTimeout, 1)) {
goto bail_out;
}
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
fd = jcr->file_bsock;
if (!send_include_list(jcr)) {
/* Come here only after starting SD thread */
bail_out:
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
Dmsg1(400, "wait for sd. use=%d\n", jcr->use_count());
/* Cancel SD */
wait_for_job_termination(jcr, FDConnectTimeout);
int Encrypt = 0;
btimer_t *tid=NULL;
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
if (fd) {
if (timeout) {
sscanf(fd->msg, OldEndJob, &jcr->FDJobStatus, &JobFiles,
&ReadBytes, &JobBytes, &JobErrors) == 5)) {
fd_ok = true;
- set_jcr_job_status(jcr, jcr->FDJobStatus);
+ jcr->setJobStatus(jcr->FDJobStatus);
Dmsg1(100, "FDStatus=%c\n", (char)jcr->JobStatus);
} else {
Jmsg(jcr, M_WARNING, 0, _("Unexpected Client Job message: %s\n"),
if (is_bnet_error(fd)) {
Jmsg(jcr, M_FATAL, 0, _("Network error with FD during %s: ERR=%s\n"),
- job_type_to_str(jcr->get_JobType()), fd->bstrerror());
+ job_type_to_str(jcr->getJobType()), fd->bstrerror());
}
fd->signal(BNET_TERMINATE); /* tell Client we are terminating */
}
utime_t RunTime;
POOL_MEM base_info;
- if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
+ if (jcr->getJobLevel() == L_VIRTUAL_FULL) {
vbackup_cleanup(jcr, TermCode);
return;
}
memset(&mr, 0, sizeof(mr));
memset(&cr, 0, sizeof(cr));
+#ifdef xxxx
+ if (jcr->getJobStatus() == JS_Terminated &&
+ (jcr->JobErrors || jcr->SDErrors || jcr->JobWarnings)) {
+ TermCode = JS_Warnings;
+ }
+#endif
+
update_job_end(jcr, TermCode);
if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name));
if (!db_get_media_record(jcr, jcr->db, &mr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
mr.VolumeName, db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
update_bootstrap_file(jcr);
HOST_OS, DISTNAME, DISTVER,
jcr->jr.JobId,
jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since,
+ level_to_str(jcr->getJobLevel()), jcr->since,
jcr->client->name(), cr.Uname,
jcr->fileset->name(), jcr->FSCreateTime,
jcr->pool->name(), jcr->pool_source,
fd = bpipe ? bpipe->wfd : NULL;
} else {
/* ***FIXME*** handle BASE */
- fd = fopen(fname, jcr->get_JobLevel()==L_FULL?"w+b":"a+b");
+ fd = fopen(fname, jcr->getJobLevel()==L_FULL?"w+b":"a+b");
}
if (fd) {
VolCount = db_get_job_volume_parameters(jcr, jcr->db, jcr->JobId,
Jmsg(jcr, M_ERROR, 0, _("Could not get Job Volume Parameters to "
"update Bootstrap file. ERR=%s\n"), db_strerror(jcr->db));
if (jcr->SDJobFiles != 0) {
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
}
/* Start output with when and who wrote it */
bstrftimes(edt, sizeof(edt), time(NULL));
fprintf(fd, "# %s - %s - %s%s\n", edt, jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since);
+ level_to_str(jcr->getJobLevel()), jcr->since);
for (int i=0; i < VolCount; i++) {
/* Write the record */
fprintf(fd, "Volume=\"%s\"\n", VolParams[i].VolumeName);
berrno be;
Jmsg(jcr, M_ERROR, 0, _("Could not open WriteBootstrap file:\n"
"%s: ERR=%s\n"), fname, be.bstrerror());
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
free_pool_memory(fname);
}
Dmsg1(dbglvl, "Bacula: return bVarJob=%s\n", jcr->job->hdr.name);
break;
case bVarLevel:
- *((int *)value) = jcr->get_JobLevel();
- Dmsg1(dbglvl, "Bacula: return bVarLevel=%c\n", jcr->get_JobLevel());
+ *((int *)value) = jcr->getJobLevel();
+ Dmsg1(dbglvl, "Bacula: return bVarLevel=%c\n", jcr->getJobLevel());
break;
case bVarType:
- *((int *)value) = jcr->get_JobType();
- Dmsg1(dbglvl, "Bacula: return bVarType=%c\n", jcr->get_JobType());
+ *((int *)value) = jcr->getJobType();
+ Dmsg1(dbglvl, "Bacula: return bVarType=%c\n", jcr->getJobType());
break;
case bVarClient:
*((char **)value) = jcr->client->hdr.name;
if (jcr->eventType == bEventJobInit) {
for (int i=0; ok && joblevels[i].level_name; i++) {
if (strcasecmp(strval, joblevels[i].level_name) == 0) {
- if (joblevels[i].job_type == jcr->get_JobType()) {
+ if (joblevels[i].job_type == jcr->getJobType()) {
jcr->set_JobLevel(joblevels[i].level);
- jcr->jr.JobLevel = jcr->get_JobLevel();
+ jcr->jr.JobLevel = jcr->getJobLevel();
ok = false;
}
}
str = my_name;
break;
case 3: /* level */
- str = job_level_to_str(jcr->get_JobLevel());
+ str = job_level_to_str(jcr->getJobLevel());
break;
case 4: /* type */
- str = job_type_to_str(jcr->get_JobType());
+ str = job_type_to_str(jcr->getJobType());
break;
case 5: /* JobId */
bsnprintf(buf, sizeof(buf), "%d", jcr->JobId);
* Lookup the last FULL backup job to get the time/date for a
* differential or incremental save.
*/
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_DIFFERENTIAL:
case L_INCREMENTAL:
POOLMEM *stime = get_pool_memory(PM_MESSAGE);
Dmsg4(50, "have_full=%d do_full=%d now=%lld full_time=%lld\n", have_full,
do_full, now, last_full_time);
/* Make sure the last diff is recent enough */
- if (have_full && jcr->get_JobLevel() == L_INCREMENTAL && jcr->job->MaxDiffInterval > 0) {
+ if (have_full && jcr->getJobLevel() == L_INCREMENTAL && jcr->job->MaxDiffInterval > 0) {
/* Lookup last diff job */
if (db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, &stime, L_DIFFERENTIAL)) {
last_diff_time = str_to_utime(stime);
Jmsg(jcr, M_INFO, 0, "%s", db_strerror(jcr->db));
Jmsg(jcr, M_INFO, 0, _("No prior or suitable Full backup found in catalog. Doing FULL backup.\n"));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->get_JobLevel()));
+ level_to_str(jcr->getJobLevel()));
jcr->set_JobLevel(jcr->jr.JobLevel = L_FULL);
} else if (do_diff) {
/* No recent diff job found, so upgrade this one to Diff */
Jmsg(jcr, M_INFO, 0, _("No prior or suitable Differential backup found in catalog. Doing Differential backup.\n"));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->get_JobLevel()));
+ level_to_str(jcr->getJobLevel()));
jcr->set_JobLevel(jcr->jr.JobLevel = L_DIFFERENTIAL);
} else {
if (jcr->job->rerun_failed_levels) {
Jmsg(jcr, M_INFO, 0, _("Prior failed job found in catalog. Upgrading to %s.\n"),
level_to_str(JobLevel));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->get_JobLevel()));
+ level_to_str(jcr->getJobLevel()));
jcr->set_JobLevel(jcr->jr.JobLevel = JobLevel);
jcr->jr.JobId = jcr->JobId;
break;
jcr->jr.JobId = jcr->JobId;
break;
}
- Dmsg2(100, "Level=%c last start time=%s\n", jcr->get_JobLevel(), jcr->stime);
+ Dmsg2(100, "Level=%c last start time=%s\n", jcr->getJobLevel(), jcr->stime);
}
static void send_since_time(JCR *jcr)
/*
* Send Level command to File daemon
*/
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_BASE:
fd->fsend(levelcmd, not_accurate, "base", " ", 0);
break;
case L_SINCE:
default:
Jmsg2(jcr, M_FATAL, 0, _("Unimplemented backup level %d %c\n"),
- jcr->get_JobLevel(), jcr->get_JobLevel());
+ jcr->getJobLevel(), jcr->getJobLevel());
return 0;
}
Dmsg1(120, ">filed: %s", fd->msg);
Dmsg0(120, "bdird: sending runscripts to fd\n");
foreach_alist(cmd, jcr->job->RunScripts) {
- if (cmd->can_run_at_level(jcr->get_JobLevel()) && cmd->target) {
+ if (cmd->can_run_at_level(jcr->getJobLevel()) && cmd->target) {
ehost = edit_job_codes(jcr, ehost, cmd->target, "");
Dmsg2(200, "bdird: runscript %s -> %s\n", cmd->target, ehost);
* this allows us to setup a proper job start record for restarting
* in case of later errors.
*/
- switch (jcr->get_JobType()) {
+ switch (jcr->getJobType()) {
case JT_BACKUP:
if (!do_backup_init(jcr)) {
backup_cleanup(jcr, JS_ErrorTerminated);
}
break;
default:
- Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->get_JobType());
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->getJobType());
+ jcr->setJobStatus(JS_ErrorTerminated);
goto bail_out;
}
void update_job_end(JCR *jcr, int TermCode)
{
dequeue_messages(jcr); /* display any queued messages */
- set_jcr_job_status(jcr, TermCode);
+ jcr->setJobStatus(TermCode);
update_job_end_record(jcr);
}
generate_job_event(jcr, "JobRun");
generate_plugin_event(jcr, bEventJobRun);
- switch (jcr->get_JobType()) {
+ switch (jcr->getJobType()) {
case JT_BACKUP:
if (!job_canceled(jcr) && do_backup(jcr)) {
do_autoprune(jcr);
}
break;
default:
- Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->get_JobType());
+ Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->getJobType());
break;
}
char ed1[50];
int32_t old_status = jcr->JobStatus;
- set_jcr_job_status(jcr, JS_Canceled);
+ jcr->setJobStatus(JS_Canceled);
switch (old_status) {
case JS_Created:
/* check MaxWaitTime */
if (job_check_maxwaittime(jcr)) {
- set_jcr_job_status(jcr, JS_Canceled);
+ jcr->setJobStatus(JS_Canceled);
Qmsg(jcr, M_FATAL, 0, _("Max wait time exceeded. Job canceled.\n"));
cancel = true;
/* check MaxRunTime */
} else if (job_check_maxruntime(jcr)) {
- set_jcr_job_status(jcr, JS_Canceled);
+ jcr->setJobStatus(JS_Canceled);
Qmsg(jcr, M_FATAL, 0, _("Max run time exceeded. Job canceled.\n"));
cancel = true;
/* check MaxRunSchedTime */
} else if (job_check_maxschedruntime(jcr)) {
- set_jcr_job_status(jcr, JS_Canceled);
+ jcr->setJobStatus(JS_Canceled);
Qmsg(jcr, M_FATAL, 0, _("Max sched run time exceeded. Job canceled.\n"));
cancel = true;
}
watchdog_time, jcr->start_time, run_time, job->MaxRunTime, job->FullMaxRunTime,
job->IncMaxRunTime, job->DiffMaxRunTime);
- if (jcr->get_JobLevel() == L_FULL && job->FullMaxRunTime != 0 &&
+ if (jcr->getJobLevel() == L_FULL && job->FullMaxRunTime != 0 &&
run_time >= job->FullMaxRunTime) {
Dmsg0(200, "check_maxwaittime: FullMaxcancel\n");
cancel = true;
- } else if (jcr->get_JobLevel() == L_DIFFERENTIAL && job->DiffMaxRunTime != 0 &&
+ } else if (jcr->getJobLevel() == L_DIFFERENTIAL && job->DiffMaxRunTime != 0 &&
run_time >= job->DiffMaxRunTime) {
Dmsg0(200, "check_maxwaittime: DiffMaxcancel\n");
cancel = true;
- } else if (jcr->get_JobLevel() == L_INCREMENTAL && job->IncMaxRunTime != 0 &&
+ } else if (jcr->getJobLevel() == L_INCREMENTAL && job->IncMaxRunTime != 0 &&
run_time >= job->IncMaxRunTime) {
Dmsg0(200, "check_maxwaittime: IncMaxcancel\n");
cancel = true;
/*
* Apply any level related Pool selections
*/
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_FULL:
if (jcr->full_pool) {
jcr->pool = jcr->full_pool;
jcr->jr.SchedTime = jcr->sched_time;
jcr->jr.StartTime = jcr->start_time;
jcr->jr.EndTime = 0; /* perhaps rescheduled, clear it */
- jcr->jr.JobType = jcr->get_JobType();
- jcr->jr.JobLevel = jcr->get_JobLevel();
+ jcr->jr.JobType = jcr->getJobType();
+ jcr->jr.JobLevel = jcr->getJobLevel();
jcr->jr.JobStatus = jcr->JobStatus;
jcr->jr.JobId = jcr->JobId;
bstrncpy(jcr->jr.Name, jcr->job->name(), sizeof(jcr->jr.Name));
jcr->set_JobType(job->JobType);
jcr->JobStatus = JS_Created;
- switch (jcr->get_JobType()) {
+ switch (jcr->getJobType()) {
case JT_ADMIN:
jcr->set_JobLevel(L_NONE);
break;
/* This can be overridden by Console program */
jcr->verify_job = job->verify_job;
/* If no default level given, set one */
- if (jcr->get_JobLevel() == 0) {
- switch (jcr->get_JobType()) {
+ if (jcr->getJobLevel() == 0) {
+ switch (jcr->getJobType()) {
case JT_VERIFY:
jcr->set_JobLevel(L_VERIFY_CATALOG);
break;
if (jcr->job->RescheduleOnError &&
jcr->JobStatus != JS_Terminated &&
jcr->JobStatus != JS_Canceled &&
- jcr->get_JobType() == JT_BACKUP &&
+ jcr->getJobType() == JT_BACKUP &&
(jcr->job->RescheduleTimes == 0 ||
jcr->reschedule_count < jcr->job->RescheduleTimes)) {
char dt[50], dt2[50];
set_jcr_defaults(njcr, jcr->job);
njcr->reschedule_count = jcr->reschedule_count;
njcr->sched_time = jcr->sched_time;
- njcr->set_JobLevel(jcr->get_JobLevel());
+ njcr->set_JobLevel(jcr->getJobLevel());
njcr->pool = jcr->pool;
njcr->run_pool_override = jcr->run_pool_override;
njcr->full_pool = jcr->full_pool;
static const int dbglevel = 10;
-static int get_job_to_migrate(JCR *jcr);
+static int getJob_to_migrate(JCR *jcr);
struct idpkt;
static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
const char *query2, const char *type);
}
/* If we find a job or jobs to migrate it is previous_jr.JobId */
- count = get_job_to_migrate(jcr);
+ count = getJob_to_migrate(jcr);
if (count < 0) {
return false;
}
return true; /* no work */
}
- Dmsg1(dbglevel, "Back from get_job_to_migrate JobId=%d\n", (int)jcr->JobId);
+ Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId);
if (jcr->previous_jr.JobId == 0) {
Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId);
* 0 if no jobs to migrate
* 1 if OK and jcr->previous_jr filled in
*/
-static int get_job_to_migrate(JCR *jcr)
+static int getJob_to_migrate(JCR *jcr)
{
char ed1[30], ed2[30];
POOL_MEM query(PM_MESSAGE);
for (int i=1; i < (int)ids.count; i++) {
JobId = 0;
stat = get_next_jobid_from_list(&p, &JobId);
- Dmsg3(dbglevel, "get_jobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
+ Dmsg3(dbglevel, "getJobid_no=%d stat=%d JobId=%u\n", i, stat, JobId);
if (stat < 0) {
Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
goto bail_out;
POOL_MEM query(PM_MESSAGE);
/* Only a copy job is allowed */
- if (jcr->get_JobType() != JT_COPY) {
+ if (jcr->getJobType() != JT_COPY) {
Jmsg(jcr, M_FATAL, 0,
_("Selection Type 'pooluncopiedjobs' only applies to Copy Jobs"));
goto bail_out;
* - move any Log records to the new JobId
* - Purge the File records from the previous job
*/
- if (jcr->get_JobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, old_jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
* - copy any Log records to the new JobId
* - set type="Job Copy" for the new job
*/
- if (jcr->get_JobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
+ if (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
/* Copy JobLog to new JobId */
Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) "
"SELECT %s, Time, LogText FROM Log WHERE JobId=%s",
break;
}
} else {
- if (jcr->get_JobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
+ if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
/* Mark previous job as migrated */
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
mig_jcr ? edit_uint64(mig_jcr->jr.JobId, ec7) : "0",
edit_uint64(jcr->jr.JobId, ec8),
jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since,
+ level_to_str(jcr->getJobLevel()), jcr->since,
jcr->client->name(),
jcr->fileset->name(), jcr->FSCreateTime,
jcr->rpool->name(), jcr->rpool_source,
}
sd->fsend(jobcmd, edit_int64(jcr->JobId, ed1), jcr->Job,
job_name.c_str(), client_name.c_str(),
- jcr->get_JobType(), jcr->get_JobLevel(),
+ jcr->getJobType(), jcr->getJobLevel(),
fileset_name.c_str(), !jcr->pool->catalog_files,
jcr->job->SpoolAttributes, jcr->fileset->MD5, jcr->spool_data,
jcr->write_part_after_job, jcr->job->PreferMountedVolumes,
/* Do read side of storage daemon */
if (ok && rstore) {
/* For the moment, only migrate, copy and vbackup have rpool */
- if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY ||
- (jcr->get_JobType() == JT_BACKUP && jcr->get_JobLevel() == L_VIRTUAL_FULL)) {
+ if (jcr->getJobType() == JT_MIGRATE || jcr->getJobType() == JT_COPY ||
+ (jcr->getJobType() == JT_BACKUP && jcr->getJobLevel() == L_VIRTUAL_FULL)) {
pm_strcpy(pool_type, jcr->rpool->pool_type);
pm_strcpy(pool_name, jcr->rpool->name());
} else {
case 0: /* Job */
return Py_BuildValue((char *)getvars[i].fmt, jcr->job->hdr.name);
case 1: /* level */
- return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->get_JobLevel()));
+ return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->getJobLevel()));
case 2: /* type */
- return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->get_JobType()));
+ return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->getJobType()));
case 3: /* JobId */
return Py_BuildValue((char *)getvars[i].fmt, jcr->JobId);
case 4: /* Client */
if (strval != NULL) {
for (i=0; joblevels[i].level_name; i++) {
if (strcmp(strval, joblevels[i].level_name) == 0) {
- if (joblevels[i].job_type == jcr->get_JobType()) {
+ if (joblevels[i].job_type == jcr->getJobType()) {
jcr->set_JobLevel(joblevels[i].level);
- jcr->jr.JobLevel = jcr->get_JobLevel();
+ jcr->jr.JobLevel = jcr->getJobLevel();
return 0;
}
}
add_prompt(ua, _("Storage")); /* 1 */
add_prompt(ua, _("Job")); /* 2 */
add_prompt(ua, _("FileSet")); /* 3 */
- if (jcr->get_JobType() == JT_RESTORE) {
+ if (jcr->getJobType() == JT_RESTORE) {
add_prompt(ua, _("Restore Client")); /* 4 */
} else {
add_prompt(ua, _("Client")); /* 4 */
}
add_prompt(ua, _("When")); /* 5 */
add_prompt(ua, _("Priority")); /* 6 */
- if (jcr->get_JobType() == JT_BACKUP ||
- jcr->get_JobType() == JT_COPY ||
- jcr->get_JobType() == JT_MIGRATE ||
- jcr->get_JobType() == JT_VERIFY) {
+ if (jcr->getJobType() == JT_BACKUP ||
+ jcr->getJobType() == JT_COPY ||
+ jcr->getJobType() == JT_MIGRATE ||
+ jcr->getJobType() == JT_VERIFY) {
add_prompt(ua, _("Pool")); /* 7 */
- if (jcr->get_JobType() == JT_VERIFY) {
+ if (jcr->getJobType() == JT_VERIFY) {
add_prompt(ua, _("Verify Job")); /* 8 */
}
- } else if (jcr->get_JobType() == JT_RESTORE) {
+ } else if (jcr->getJobType() == JT_RESTORE) {
add_prompt(ua, _("Bootstrap")); /* 7 */
add_prompt(ua, _("Where")); /* 8 */
add_prompt(ua, _("File Relocation"));/* 9 */
add_prompt(ua, _("Replace")); /* 10 */
add_prompt(ua, _("JobId")); /* 11 */
}
- if (jcr->get_JobType() == JT_BACKUP || jcr->get_JobType() == JT_RESTORE) {
+ if (jcr->getJobType() == JT_BACKUP || jcr->getJobType() == JT_RESTORE) {
add_prompt(ua, _("Plugin Options")); /* 12 */
}
switch (do_prompt(ua, "", _("Select parameter to modify"), NULL, 0)) {
goto try_again;
case 7:
/* Pool or Bootstrap depending on JobType */
- if (jcr->get_JobType() == JT_BACKUP ||
- jcr->get_JobType() == JT_COPY ||
- jcr->get_JobType() == JT_MIGRATE ||
- jcr->get_JobType() == JT_VERIFY) { /* Pool */
+ if (jcr->getJobType() == JT_BACKUP ||
+ jcr->getJobType() == JT_COPY ||
+ jcr->getJobType() == JT_MIGRATE ||
+ jcr->getJobType() == JT_VERIFY) { /* Pool */
rc.pool = select_pool_resource(ua);
if (rc.pool) {
jcr->pool = rc.pool;
goto try_again;
case 8:
/* Verify Job */
- if (jcr->get_JobType() == JT_VERIFY) {
+ if (jcr->getJobType() == JT_VERIFY) {
rc.verify_job = select_job_resource(ua);
if (rc.verify_job) {
jcr->verify_job = rc.verify_job;
/* If pool changed, update migration write storage */
- if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY ||
- (jcr->get_JobType() == JT_BACKUP && jcr->get_JobLevel() == L_VIRTUAL_FULL)) {
+ if (jcr->getJobType() == JT_MIGRATE || jcr->getJobType() == JT_COPY ||
+ (jcr->getJobType() == JT_BACKUP && jcr->getJobLevel() == L_VIRTUAL_FULL)) {
if (!set_migration_wstorage(jcr, rc.pool)) {
return false;
}
static void select_job_level(UAContext *ua, JCR *jcr)
{
- if (jcr->get_JobType() == JT_BACKUP) {
+ if (jcr->getJobType() == JT_BACKUP) {
start_prompt(ua, _("Levels:\n"));
// add_prompt(ua, _("Base"));
add_prompt(ua, _("Full"));
default:
break;
}
- } else if (jcr->get_JobType() == JT_VERIFY) {
+ } else if (jcr->getJobType() == JT_VERIFY) {
start_prompt(ua, _("Levels:\n"));
add_prompt(ua, _("Initialize Catalog"));
add_prompt(ua, _("Verify Catalog"));
static bool display_job_parameters(UAContext *ua, JCR *jcr, JOB *job, const char *verify_list,
char *jid, const char *replace, char *client_name)
{
- Dmsg1(800, "JobType=%c\n", jcr->get_JobType());
- switch (jcr->get_JobType()) {
+ Dmsg1(800, "JobType=%c\n", jcr->getJobType());
+ switch (jcr->getJobType()) {
char ec1[30];
char dt[MAX_TIME_LENGTH];
case JT_ADMIN:
break;
case JT_BACKUP:
case JT_VERIFY:
- if (jcr->get_JobType() == JT_BACKUP) {
+ if (jcr->getJobType() == JT_BACKUP) {
if (ua->api) ua->signal(BNET_RUN_CMD);
ua->send_msg(_("Run %s job\n"
"JobName: %s\n"
"%s%s%s"),
_("Backup"),
job->name(),
- level_to_str(jcr->get_JobLevel()),
+ level_to_str(jcr->getJobLevel()),
jcr->client->name(),
jcr->fileset->name(),
NPRT(jcr->pool->name()), jcr->pool_source,
"Priority: %d\n"),
_("Verify"),
job->name(),
- level_to_str(jcr->get_JobLevel()),
+ level_to_str(jcr->getJobLevel()),
jcr->client->name(),
jcr->fileset->name(),
NPRT(jcr->pool->name()), jcr->pool_source,
case JT_COPY:
case JT_MIGRATE:
char *prt_type;
- if (jcr->get_JobType() == JT_COPY) {
+ if (jcr->getJobType() == JT_COPY) {
prt_type = _("Run Copy job\n");
} else {
prt_type = _("Run Migration job\n");
jcr->JobPriority);
break;
default:
- ua->error_msg(_("Unknown Job Type=%d\n"), jcr->get_JobType());
+ ua->error_msg(_("Unknown Job Type=%d\n"), jcr->getJobType());
return false;
}
return true;
MEDIA_DBR mr;
int orig_jobtype;
- orig_jobtype = jcr->get_JobType();
+ orig_jobtype = jcr->getJobType();
memset(&mr, 0, sizeof(mr));
if (sp->job->JobType == JT_BACKUP) {
jcr->db = NULL;
/* this is a console or other control job. We only show console
* jobs in the status output.
*/
- if (jcr->get_JobType() == JT_CONSOLE && !ua->api) {
+ if (jcr->getJobType() == JT_CONSOLE && !ua->api) {
bstrftime_nc(dt, sizeof(dt), jcr->start_time);
ua->send_msg(_("Console connected at %s\n"), dt);
}
msg = _("Dir inserting Attributes");
break;
}
- switch (jcr->get_JobType()) {
+ switch (jcr->getJobType()) {
case JT_ADMIN:
case JT_RESTORE:
bstrncpy(level, " ", sizeof(level));
break;
default:
- bstrncpy(level, level_to_str(jcr->get_JobLevel()), sizeof(level));
+ bstrncpy(level, level_to_str(jcr->getJobLevel()), sizeof(level));
level[7] = 0;
break;
}
if (!allow_duplicate_job(jcr)) {
return false;
}
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_VERIFY_INIT:
case L_VERIFY_CATALOG:
case L_VERIFY_DISK_TO_CATALOG:
case L_VERIFY_DATA:
break;
default:
- Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->get_JobLevel(),
- jcr->get_JobLevel());
+ Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->getJobLevel(),
+ jcr->getJobLevel());
return false;
}
return true;
* For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the
* last backup Job.
*/
- if (jcr->get_JobLevel() == L_VERIFY_CATALOG ||
- jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_CATALOG ||
+ jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG) {
memcpy(&jr, &jcr->jr, sizeof(jr));
if (jcr->verify_job &&
- (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG)) {
+ (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG)) {
Name = jcr->verify_job->name();
} else {
Name = NULL;
}
Dmsg1(100, "find last jobid for: %s\n", NPRT(Name));
if (!db_find_last_jobid(jcr, jcr->db, Name, &jr)) {
- if (jcr->get_JobLevel() == L_VERIFY_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_CATALOG) {
Jmsg(jcr, M_FATAL, 0, _(
"Unable to find JobId of previous InitCatalog Job.\n"
"Please run a Verify with Level=InitCatalog before\n"
* Now get the job record for the previous backup that interests
* us. We use the verify_jobid that we found above.
*/
- if (jcr->get_JobLevel() == L_VERIFY_CATALOG ||
- jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_CATALOG ||
+ jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG) {
jcr->previous_jr.JobId = verify_jobid;
if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
Jmsg(jcr, M_FATAL, 0, _("Could not get job record for previous Job. ERR=%s"),
* create a dummy authorization key (passed to
* File daemon but not used).
*/
- if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
int stat;
/*
* Note: negative status is an error, zero status, means
jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */
}
- if (jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) {
+ if (jcr->getJobLevel() == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) {
jcr->fileset = jcr->verify_job->fileset;
}
- Dmsg2(100, "ClientId=%u JobLevel=%c\n", jcr->previous_jr.ClientId, jcr->get_JobLevel());
+ Dmsg2(100, "ClientId=%u JobLevel=%c\n", jcr->previous_jr.ClientId, jcr->getJobLevel());
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
/* Print Job Start message */
Jmsg(jcr, M_INFO, 0, _("Start Verify JobId=%s Level=%s Job=%s\n"),
- edit_uint64(jcr->JobId, ed1), level_to_str(jcr->get_JobLevel()), jcr->Job);
+ edit_uint64(jcr->JobId, ed1), level_to_str(jcr->getJobLevel()), jcr->Job);
- if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
BSOCK *sd;
/*
* Start conversation with Storage daemon
* Send Level command to File daemon, as well
* as the Storage address if appropriate.
*/
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_VERIFY_INIT:
level = "init";
break;
level="disk_to_catalog";
break;
default:
- Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->get_JobLevel(),
- jcr->get_JobLevel());
+ Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->getJobLevel(),
+ jcr->getJobLevel());
goto bail_out;
}
* catalog depending on the run type.
*/
/* Compare to catalog */
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_VERIFY_CATALOG:
Dmsg0(10, "Verify level=catalog\n");
jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */
break;
default:
- Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->get_JobLevel());
+ Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->getJobLevel());
goto bail_out;
}
// Dmsg1(100, "Enter verify_cleanup() TermCod=%d\n", TermCode);
- Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->get_JobLevel(),
+ Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->getJobLevel(),
jcr->ExpectedFiles, jcr->JobFiles);
- if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG &&
+ if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG &&
jcr->ExpectedFiles != jcr->JobFiles) {
TermCode = JS_ErrorTerminated;
}
}
jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg));
- if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
Jmsg(jcr, msg_type, 0, _("%s %s %s (%s): %s\n"
" Build OS: %s %s %s\n"
jcr->jr.JobId,
jcr->jr.Job,
jcr->fileset->hdr.name,
- level_to_str(jcr->get_JobLevel()),
+ level_to_str(jcr->getJobLevel()),
jcr->client->hdr.name,
jcr->previous_jr.JobId,
Name,
jcr->jr.JobId,
jcr->jr.Job,
jcr->fileset->hdr.name,
- level_to_str(jcr->get_JobLevel()),
+ level_to_str(jcr->getJobLevel()),
jcr->client->name(),
jcr->previous_jr.JobId,
Name,
}
if (!jcr->fn_printed) {
Qmsg(jcr, M_WARNING, 0, _("The following files are in the Catalog but not on %s:\n"),
- jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk");
+ jcr->getJobLevel() == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk");
jcr->fn_printed = true;
}
Qmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:"");
FF_PKT *ff_pkt;
int stream = STREAM_UNIX_ATTRIBUTES;
- if (!jcr->accurate || jcr->get_JobLevel() != L_FULL) {
+ if (!jcr->accurate || jcr->getJobLevel() != L_FULL) {
return true;
}
{
bool ret=true;
if (jcr->accurate) {
- if (jcr->get_JobLevel() == L_FULL) {
+ if (jcr->getJobLevel() == L_FULL) {
ret = accurate_send_base_file_list(jcr);
} else {
ret = accurate_send_deleted_list(jcr);
}
accurate_free(jcr);
- if (jcr->get_JobLevel() == L_FULL) {
+ if (jcr->getJobLevel() == L_FULL) {
Jmsg(jcr, M_INFO, 0, _("Space saved with Base jobs: %lld MB\n"),
jcr->base_size/(1024*1024));
}
decode_stat(elt.lstat, &statc, &LinkFIc); /* decode catalog stat */
- if (jcr->get_JobLevel() == L_FULL) {
+ if (jcr->getJobLevel() == L_FULL) {
opts = ff_pkt->BaseJobOpts;
} else {
opts = ff_pkt->AccurateOpts;
/* In Incr/Diff accurate mode, we mark all files as seen
* When in Full+Base mode, we mark only if the file match exactly
*/
- if (jcr->get_JobLevel() == L_FULL) {
+ if (jcr->getJobLevel() == L_FULL) {
if (!stat) {
/* compute space saved with basefile */
jcr->base_size += ff_pkt->statp.st_size;
Dmsg1(dbglvl, "Bacula: return my_name=%s\n", my_name);
break;
case bVarLevel:
- *((int *)value) = jcr->get_JobLevel();
- Dmsg1(dbglvl, "Bacula: return bVarJobLevel=%d\n", jcr->get_JobLevel());
+ *((int *)value) = jcr->getJobLevel();
+ Dmsg1(dbglvl, "Bacula: return bVarJobLevel=%d\n", jcr->getJobLevel());
break;
case bVarType:
- *((int *)value) = jcr->get_JobType();
- Dmsg1(dbglvl, "Bacula: return bVarJobType=%d\n", jcr->get_JobType());
+ *((int *)value) = jcr->getJobType();
+ Dmsg1(dbglvl, "Bacula: return bVarJobType=%d\n", jcr->getJobType());
break;
case bVarClient:
*((char **)value) = jcr->client_name;
buf = get_memory(dir->msglen+1);
utime_t since_time, adj;
btime_t his_time, bt_start, rt=0, bt_adj=0;
- if (jcr->get_JobLevel() == L_NONE) {
+ if (jcr->getJobLevel() == L_NONE) {
jcr->set_JobLevel(L_SINCE); /* if no other job level set, do it now */
}
if (sscanf(dir->msg, "level = since_utime %s mtime_only=%d",
if (buf) {
free_memory(buf);
}
- generate_plugin_event(jcr, bEventLevel, (void *)jcr->get_JobLevel());
+ generate_plugin_event(jcr, bEventLevel, (void *)jcr->getJobLevel());
return dir->fsend(OKlevel);
bail_out:
dir->fsend(OKverify);
generate_daemon_event(jcr, "JobStart");
- generate_plugin_event(jcr, bEventLevel, (void *)jcr->get_JobLevel());
+ generate_plugin_event(jcr, bEventLevel, (void *)jcr->getJobLevel());
generate_plugin_event(jcr, bEventStartVerifyJob);
Dmsg1(110, "filed>dird: %s", dir->msg);
- switch (jcr->get_JobLevel()) {
+ switch (jcr->getJobLevel()) {
case L_VERIFY_INIT:
case L_VERIFY_CATALOG:
do_verify(jcr);
njcr->JobId, njcr->Job);
sendit(msg.c_str(), len, sp);
len = Mmsg(msg, _(" %s%s Job started: %s\n"),
- vss, job_type_to_str(njcr->get_JobType()), dt);
+ vss, job_type_to_str(njcr->getJobType()), dt);
}
sendit(msg.c_str(), len, sp);
if (njcr->JobId == 0) {
crypto_digest_update(digest, (uint8_t *)buf, n);
/* Can be used by BaseJobs, update only for Verify jobs */
- if (jcr->get_JobLevel() != L_FULL) {
+ if (jcr->getJobLevel() != L_FULL) {
jcr->JobBytes += n;
jcr->ReadBytes += n;
}
void unlock() {V(mutex); };
void inc_use_count(void) {lock(); _use_count++; unlock(); };
void dec_use_count(void) {lock(); _use_count--; unlock(); };
- int32_t use_count() { return _use_count; };
+ int32_t use_count() const { return _use_count; };
void init_mutex(void) {pthread_mutex_init(&mutex, NULL); };
void destroy_mutex(void) {pthread_mutex_destroy(&mutex); };
bool is_job_canceled() {return job_canceled(this); };
void setJobLevel(int32_t JobLevel) { m_JobLevel = JobLevel; };
void set_JobType(int32_t JobType) { m_JobType = JobType; };
void setJobType(int32_t JobType) { m_JobType = JobType; };
- int32_t get_JobType() { return m_JobType; };
- int32_t getJobType() { return m_JobType; };
- int32_t get_JobLevel() { return m_JobLevel; };
- int32_t getJobLevel() { return m_JobLevel; };
- bool no_client_used() {
+ int32_t getJobType() const { return m_JobType; };
+ int32_t getJobLevel() const { return m_JobLevel; };
+ int32_t getJobStatus() const { return JobStatus; };
+ bool no_client_used() const {
return (m_JobType == JT_MIGRATE || m_JobType == JT_COPY ||
m_JobLevel == L_VIRTUAL_FULL);
};
};
extern struct s_last_job last_job;
-extern DLL_IMP_EXP dlist * last_jobs;
+extern DLL_IMP_EXP dlist *last_jobs;
/* The following routines are found in lib/jcr.c */
Dmsg1(dbglvl, "End job=%d\n", jcr->JobId);
/* Keep some statistics */
- switch (jcr->get_JobType()) {
+ switch (jcr->getJobType()) {
case JT_BACKUP:
case JT_VERIFY:
case JT_RESTORE:
je = (struct s_last_job *)malloc(sizeof(struct s_last_job));
memset(je, 0, sizeof(struct s_last_job)); /* zero in case unset fields */
je->Errors = jcr->JobErrors;
- je->JobType = jcr->get_JobType();
+ je->JobType = jcr->getJobType();
je->JobId = jcr->JobId;
je->VolSessionId = jcr->VolSessionId;
je->VolSessionTime = jcr->VolSessionTime;
je->JobFiles = jcr->JobFiles;
je->JobBytes = jcr->JobBytes;
je->JobStatus = jcr->JobStatus;
- je->JobLevel = jcr->get_JobLevel();
+ je->JobLevel = jcr->getJobLevel();
je->start_time = jcr->start_time;
je->end_time = time(NULL);
jcr, (int)jcr->JobId, jcr->Job, jcr->JobStatus);
fprintf(fp, "\tuse_count=%i\n", jcr->use_count());
fprintf(fp, "\tJobType=%c JobLevel=%c\n",
- jcr->get_JobType(), jcr->get_JobLevel());
+ jcr->getJobType(), jcr->getJobLevel());
bstrftime(buf1, sizeof(buf1), jcr->sched_time);
bstrftime(buf2, sizeof(buf2), jcr->start_time);
bstrftime(buf3, sizeof(buf3), jcr->end_time);
break;
case 'l':
if (jcr) {
- str = job_level_to_str(jcr->get_JobLevel());
+ str = job_level_to_str(jcr->getJobLevel());
} else {
str = _("*none*");
}
break;
case 't':
if (jcr) {
- str = job_type_to_str(jcr->get_JobType());
+ str = job_type_to_str(jcr->getJobType());
} else {
str = _("*none*");
}
dev = dcr->dev;
jcr = dcr->jcr;
if (jcr) Dmsg1(500, "JobId=%u enter attach_dcr_to_dev\n", (uint32_t)jcr->JobId);
- if (!dcr->attached_to_dev && dev->initiated && jcr && jcr->get_JobType() != JT_SYSTEM) {
+ if (!dcr->attached_to_dev && dev->initiated && jcr && jcr->getJobType() != JT_SYSTEM) {
dev->attached_dcrs->append(dcr); /* attach dcr to device */
dcr->attached_to_dev = true;
Dmsg1(500, "JobId=%u attach_dcr_to_dev\n", (uint32_t)jcr->JobId);
POOL_MEM VolumeName;
/* If system job, do not update catalog */
- if (jcr->get_JobType() == JT_SYSTEM) {
+ if (jcr->getJobType() == JT_SYSTEM) {
return true;
}
char ed1[50];
/* If system job, do not update catalog */
- if (jcr->get_JobType() == JT_SYSTEM) {
+ if (jcr->getJobType() == JT_SYSTEM) {
return true;
}
}
if (!write_block_to_dev(dcr)) {
- if (job_canceled(jcr) || jcr->get_JobType() == JT_SYSTEM) {
+ if (job_canceled(jcr) || jcr->getJobType() == JT_SYSTEM) {
stat = false;
} else {
stat = fixup_device_block_write_error(dcr);
}
if (verbose) {
Pmsg3(000, _("Updated Job termination record for JobId=%u Level=%s TermStat=%c\n"),
- jr->JobId, job_level_to_str(mjcr->get_JobLevel()), jr->JobStatus);
+ jr->JobId, job_level_to_str(mjcr->getJobLevel()), jr->JobStatus);
}
if (verbose > 1) {
const char *term_msg;
mjcr->JobId,
mjcr->Job,
mjcr->fileset_name,
- job_level_to_str(mjcr->get_JobLevel()),
+ job_level_to_str(mjcr->getJobLevel()),
mjcr->client_name,
sdt,
edt,
/* Added in VerNum 10 */
ser_string(jcr->Job); /* Unique name of this Job */
ser_string(jcr->fileset_name);
- ser_uint32(jcr->get_JobType());
- ser_uint32(jcr->get_JobLevel());
+ ser_uint32(jcr->getJobType());
+ ser_uint32(jcr->getJobLevel());
/* Added in VerNum 11 */
ser_string(jcr->fileset_md5);
char ec1[50];
DEVICE *dev;
- switch(jcr->get_JobType()) {
+ switch(jcr->getJobType()) {
case JT_MIGRATE:
Type = "Migration";
break;
case EOM_LABEL:
return true; /* don't write vol labels */
}
-// if (jcr->get_JobType() == JT_BACKUP) {
+// if (jcr->getJobType() == JT_BACKUP) {
/*
* For normal migration jobs, FileIndex values are sequential because
* we are dealing with one job. However, for Vbackup (consolidation),
foreach_jcr(jcr) {
if (jcr->JobStatus == JS_WaitFD) {
len = Mmsg(msg, _("%s Job %s waiting for Client connection.\n"),
- job_type_to_str(jcr->get_JobType()), jcr->Job);
+ job_type_to_str(jcr->getJobType()), jcr->Job);
sendit(msg, len, sp);
}
dcr = jcr->dcr;
if (rdcr && rdcr->device) {
len = Mmsg(msg, _("Reading: %s %s job %s JobId=%d Volume=\"%s\"\n"
" pool=\"%s\" device=%s\n"),
- job_level_to_str(jcr->get_JobLevel()),
- job_type_to_str(jcr->get_JobType()),
+ job_level_to_str(jcr->getJobLevel()),
+ job_type_to_str(jcr->getJobType()),
JobName,
jcr->JobId,
rdcr->VolumeName,
if (dcr && dcr->device) {
len = Mmsg(msg, _("Writing: %s %s job %s JobId=%d Volume=\"%s\"\n"
" pool=\"%s\" device=%s\n"),
- job_level_to_str(jcr->get_JobLevel()),
- job_type_to_str(jcr->get_JobType()),
+ job_level_to_str(jcr->getJobLevel()),
+ job_type_to_str(jcr->getJobType()),
JobName,
jcr->JobId,
dcr->VolumeName,