int stat = 0;
char ed1[50], ed2[50], ed3[50];
- if (jcr->JobLevel == L_VERIFY_DISK_TO_CATALOG) {
- Mmsg(mdb->cmd,
+ if (jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG) {
+ Mmsg(mdb->cmd,
"SELECT FileId, LStat, MD5 FROM File,Job WHERE "
"File.JobId=Job.JobId AND File.PathId=%s AND "
"File.FilenameId=%s AND Job.Type='B' AND Job.JobSTATUS='T' AND "
if (jcr->job->PruneJobs || jcr->client->AutoPrune) {
Jmsg(jcr, M_INFO, 0, _("Begin pruning Jobs.\n"));
- prune_jobs(ua, client, jcr->JobType);
+ prune_jobs(ua, client, jcr->get_JobType());
pruned = true;
} else {
pruned = false;
bool do_backup_init(JCR *jcr)
{
- if (jcr->JobLevel == L_VIRTUAL_FULL) {
+ if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
return do_vbackup_init(jcr);
}
free_rstorage(jcr); /* we don't read so release */
{
POOL_MEM buf;
- if (!jcr->accurate || job_canceled(jcr) || jcr->JobLevel==L_FULL) {
+ if (!jcr->accurate || job_canceled(jcr) || jcr->get_JobLevel()==L_FULL) {
return true;
}
POOLMEM *jobids = get_pool_memory(PM_FNAME);
STORE *store;
char ed1[100];
- if (jcr->JobLevel == L_VIRTUAL_FULL) {
+ if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
return do_vbackup(jcr);
}
if (is_bnet_error(fd)) {
Jmsg(jcr, M_FATAL, 0, _("Network error with FD during %s: ERR=%s\n"),
- job_type_to_str(jcr->JobType), fd->bstrerror());
+ job_type_to_str(jcr->get_JobType()), fd->bstrerror());
}
fd->signal(BNET_TERMINATE); /* tell Client we are terminating */
}
double kbps, compression;
utime_t RunTime;
- if (jcr->JobLevel == L_VIRTUAL_FULL) {
+ if (jcr->get_JobLevel() == L_VIRTUAL_FULL) {
vbackup_cleanup(jcr, TermCode);
}
HOST_OS, DISTNAME, DISTVER,
jcr->jr.JobId,
jcr->jr.Job,
- level_to_str(jcr->JobLevel), jcr->since,
+ level_to_str(jcr->get_JobLevel()), jcr->since,
jcr->client->name(), cr.Uname,
jcr->fileset->name(), jcr->FSCreateTime,
jcr->pool->name(), jcr->pool_source,
fd = bpipe ? bpipe->wfd : NULL;
} else {
/* ***FIXME*** handle BASE */
- fd = fopen(fname, jcr->JobLevel==L_FULL?"w+b":"a+b");
+ fd = fopen(fname, jcr->get_JobLevel()==L_FULL?"w+b":"a+b");
}
if (fd) {
VolCount = db_get_job_volume_parameters(jcr, jcr->db, jcr->JobId,
/* Start output with when and who wrote it */
bstrftimes(edt, sizeof(edt), time(NULL));
fprintf(fd, "# %s - %s - %s%s\n", edt, jcr->jr.Job,
- level_to_str(jcr->JobLevel), jcr->since);
+ level_to_str(jcr->get_JobLevel()), jcr->since);
for (int i=0; i < VolCount; i++) {
/* Write the record */
fprintf(fd, "Volume=\"%s\"\n", VolParams[i].VolumeName);
* Hook all active jobs so that they release this table
*/
foreach_jcr(jcr) {
- if (jcr->JobType != JT_SYSTEM) {
+ if (jcr->get_JobType() != JT_SYSTEM) {
reload_table[table].job_count++;
job_end_push(jcr, reload_job_end_cb, (void *)((long int)table));
njobs++;
str = my_name;
break;
case 3: /* level */
- str = job_level_to_str(jcr->JobLevel);
+ str = job_level_to_str(jcr->get_JobLevel());
break;
case 4: /* type */
- str = job_type_to_str(jcr->JobType);
+ str = job_type_to_str(jcr->get_JobType());
break;
case 5: /* JobId */
bsnprintf(buf, sizeof(buf), "%d", jcr->JobId);
* Lookup the last FULL backup job to get the time/date for a
* differential or incremental save.
*/
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_DIFFERENTIAL:
case L_INCREMENTAL:
POOLMEM *stime = get_pool_memory(PM_MESSAGE);
jcr->jr.JobId = 0; /* flag to return since time */
have_full = db_find_job_start_time(jcr, jcr->db, &jcr->jr, &jcr->stime);
/* If there was a successful job, make sure it is recent enough */
- if (jcr->JobLevel == L_INCREMENTAL && have_full && jcr->job->MaxDiffInterval > 0) {
+ if (jcr->get_JobLevel() == L_INCREMENTAL && have_full && jcr->job->MaxDiffInterval > 0) {
/* Lookup last diff job */
if (db_find_last_job_start_time(jcr, jcr->db, &jcr->jr, &stime, L_DIFFERENTIAL)) {
diff_time = str_to_utime(stime);
Jmsg(jcr, M_INFO, 0, "%s", db_strerror(jcr->db));
Jmsg(jcr, M_INFO, 0, _("No prior or suitable Full backup found in catalog. Doing FULL backup.\n"));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->JobLevel));
- jcr->JobLevel = jcr->jr.JobLevel = L_FULL;
+ level_to_str(jcr->get_JobLevel()));
+ jcr->set_JobLevel(jcr->jr.JobLevel = L_FULL);
} else if (do_diff) {
/* No recent diff job found, so upgrade this one to Full */
Jmsg(jcr, M_INFO, 0, _("No prior or suitable Differential backup found in catalog. Doing Differential backup.\n"));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->JobLevel));
- jcr->JobLevel = jcr->jr.JobLevel = L_DIFFERENTIAL;
+ level_to_str(jcr->get_JobLevel()));
+ jcr->set_JobLevel(jcr->jr.JobLevel = L_DIFFERENTIAL);
} else {
if (jcr->job->rerun_failed_levels) {
if (db_find_failed_job_since(jcr, jcr->db, &jcr->jr, jcr->stime, JobLevel)) {
Jmsg(jcr, M_INFO, 0, _("Prior failed job found in catalog. Upgrading to %s.\n"),
level_to_str(JobLevel));
bsnprintf(since, since_len, _(" (upgraded from %s)"),
- level_to_str(jcr->JobLevel));
- jcr->JobLevel = jcr->jr.JobLevel = JobLevel;
+ level_to_str(jcr->get_JobLevel()));
+ jcr->set_JobLevel(jcr->jr.JobLevel = JobLevel);
jcr->jr.JobId = jcr->JobId;
break;
}
jcr->jr.JobId = jcr->JobId;
break;
}
- Dmsg2(100, "Level=%c last start time=%s\n", jcr->JobLevel, jcr->stime);
+ Dmsg2(100, "Level=%c last start time=%s\n", jcr->get_JobLevel(), jcr->stime);
}
static void send_since_time(JCR *jcr)
/*
* Send Level command to File daemon
*/
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_BASE:
fd->fsend(levelcmd, not_accurate, "base", " ", 0);
break;
case L_SINCE:
default:
Jmsg2(jcr, M_FATAL, 0, _("Unimplemented backup level %d %c\n"),
- jcr->JobLevel, jcr->JobLevel);
+ jcr->get_JobLevel(), jcr->get_JobLevel());
return 0;
}
Dmsg1(120, ">filed: %s", fd->msg);
Dmsg0(120, "bdird: sending runscripts to fd\n");
foreach_alist(cmd, jcr->job->RunScripts) {
- if (cmd->can_run_at_level(jcr->JobLevel) && cmd->target) {
+ if (cmd->can_run_at_level(jcr->get_JobLevel()) && cmd->target) {
ehost = edit_job_codes(jcr, ehost, cmd->target, "");
Dmsg2(200, "bdird: runscript %s -> %s\n", cmd->target, ehost);
pm_strcpy(jcr->pool_source, _("unknown source"));
}
- switch (jcr->JobType) {
- case JT_VERIFY:
- case JT_RESTORE:
- case JT_COPY:
- case JT_MIGRATE:
- jcr->JobReads = true;
- break;
- case JT_BACKUP:
- if (jcr->JobLevel == L_VIRTUAL_FULL) {
- jcr->JobReads = true;
- }
- break;
- default:
- break;
- }
- if (jcr->JobReads) {
+ if (jcr->JobReads()) {
if (!jcr->rpool_source) {
jcr->rpool_source = get_pool_memory(PM_MESSAGE);
pm_strcpy(jcr->rpool_source, _("unknown source"));
* this allows us to setup a proper job start record for restarting
* in case of later errors.
*/
- switch (jcr->JobType) {
+ switch (jcr->get_JobType()) {
case JT_BACKUP:
if (!do_backup_init(jcr)) {
backup_cleanup(jcr, JS_ErrorTerminated);
}
break;
default:
- Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->JobType);
+ Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->get_JobType());
set_jcr_job_status(jcr, JS_ErrorTerminated);
break;
}
}
generate_job_event(jcr, "JobRun");
- switch (jcr->JobType) {
+ switch (jcr->get_JobType()) {
case JT_BACKUP:
if (do_backup(jcr)) {
do_autoprune(jcr);
}
break;
default:
- Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->JobType);
+ Pmsg1(0, _("Unimplemented job type: %d\n"), jcr->get_JobType());
break;
}
}
watchdog_time, jcr->start_time, job->MaxRunTime, job->FullMaxRunTime,
job->IncMaxRunTime, job->DiffMaxRunTime);
- if (jcr->JobLevel == L_FULL && job->FullMaxRunTime != 0 &&
+ if (jcr->get_JobLevel() == L_FULL && job->FullMaxRunTime != 0 &&
(watchdog_time - jcr->start_time) >= job->FullMaxRunTime) {
cancel = true;
- } else if (jcr->JobLevel == L_DIFFERENTIAL && job->DiffMaxRunTime != 0 &&
+ } else if (jcr->get_JobLevel() == L_DIFFERENTIAL && job->DiffMaxRunTime != 0 &&
(watchdog_time - jcr->start_time) >= job->DiffMaxRunTime) {
cancel = true;
- } else if (jcr->JobLevel == L_INCREMENTAL && job->IncMaxRunTime != 0 &&
+ } else if (jcr->get_JobLevel() == L_INCREMENTAL && job->IncMaxRunTime != 0 &&
(watchdog_time - jcr->start_time) >= job->IncMaxRunTime) {
cancel = true;
} else if ((watchdog_time - jcr->start_time) >= job->MaxRunTime) {
/*
* Apply any level related Pool selections
*/
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_FULL:
if (jcr->full_pool) {
jcr->pool = jcr->full_pool;
jcr->jr.SchedTime = jcr->sched_time;
jcr->jr.StartTime = jcr->start_time;
jcr->jr.EndTime = 0; /* perhaps rescheduled, clear it */
- jcr->jr.JobType = jcr->JobType;
- jcr->jr.JobLevel = jcr->JobLevel;
+ jcr->jr.JobType = jcr->get_JobType();
+ jcr->jr.JobLevel = jcr->get_JobLevel();
jcr->jr.JobStatus = jcr->JobStatus;
jcr->jr.JobId = jcr->JobId;
bstrncpy(jcr->jr.Name, jcr->job->name(), sizeof(jcr->jr.Name));
void set_jcr_defaults(JCR *jcr, JOB *job)
{
jcr->job = job;
- jcr->JobType = job->JobType;
+ jcr->set_JobType(job->JobType);
jcr->JobStatus = JS_Created;
- switch (jcr->JobType) {
+ switch (jcr->get_JobType()) {
case JT_ADMIN:
- jcr->JobLevel = L_NONE;
+ jcr->set_JobLevel(L_NONE);
break;
default:
- jcr->JobLevel = job->JobLevel;
+ jcr->set_JobLevel(job->JobLevel);
break;
}
/* This can be overridden by Console program */
jcr->verify_job = job->verify_job;
/* If no default level given, set one */
- if (jcr->JobLevel == 0) {
- switch (jcr->JobType) {
+ if (jcr->get_JobLevel() == 0) {
+ switch (jcr->get_JobType()) {
case JT_VERIFY:
- jcr->JobLevel = L_VERIFY_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_CATALOG);
break;
case JT_BACKUP:
- jcr->JobLevel = L_INCREMENTAL;
+ jcr->set_JobLevel(L_INCREMENTAL);
break;
case JT_RESTORE:
case JT_ADMIN:
- jcr->JobLevel = L_NONE;
+ jcr->set_JobLevel(L_NONE);
break;
default:
- jcr->JobLevel = L_FULL;
+ jcr->set_JobLevel(L_FULL);
break;
}
}
*/
void copy_rwstorage(JCR *jcr, alist *storage, const char *where)
{
- if (jcr->JobReads) {
+ if (jcr->JobReads()) {
copy_rstorage(jcr, storage, where);
}
copy_wstorage(jcr, storage, where);
Jmsg(jcr, M_FATAL, 0, _("No storage specified.\n"));
return;
}
- if (jcr->JobReads) {
+ if (jcr->JobReads()) {
set_rstorage(jcr, store);
}
set_wstorage(jcr, store);
if (jcr->job->RescheduleOnError &&
jcr->JobStatus != JS_Terminated &&
jcr->JobStatus != JS_Canceled &&
- jcr->JobType == JT_BACKUP &&
+ jcr->get_JobType() == JT_BACKUP &&
(jcr->job->RescheduleTimes == 0 ||
jcr->reschedule_count < jcr->job->RescheduleTimes)) {
char dt[50], dt2[50];
set_jcr_defaults(njcr, jcr->job);
njcr->reschedule_count = jcr->reschedule_count;
njcr->sched_time = jcr->sched_time;
- njcr->JobLevel = jcr->JobLevel;
+ njcr->set_JobLevel(jcr->get_JobLevel());
njcr->pool = jcr->pool;
njcr->run_pool_override = jcr->run_pool_override;
njcr->full_pool = jcr->full_pool;
/* Print Job Start message */
Jmsg(jcr, M_INFO, 0, _("Start %s JobId %s, Job=%s\n"),
- jcr->JobType == JT_MIGRATE ? "Migration" : "Copy",
+ jcr->get_JobType() == JT_MIGRATE ? "Migration" : "Copy",
edit_uint64(jcr->JobId, ed1), jcr->Job);
}
migration_cleanup(jcr, jcr->JobStatus);
- if (jcr->JobType == JT_MIGRATE && mig_jcr) {
+ if (jcr->get_JobType() == JT_MIGRATE && mig_jcr) {
char jobid[50];
UAContext *ua = new_ua_context(jcr);
edit_uint64(jcr->previous_jr.JobId, jobid);
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
/* Now mark the previous job as migrated if it terminated normally */
- if (jcr->JobType == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
+ if (jcr->get_JobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
break;
}
} else {
- if (jcr->JobType == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
+ if (jcr->get_JobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
/* Mark previous job as migrated */
Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
(char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
mig_jcr ? edit_uint64(mig_jcr->jr.JobId, ec7) : "0",
edit_uint64(jcr->jr.JobId, ec8),
jcr->jr.Job,
- level_to_str(jcr->JobLevel), jcr->since,
+ level_to_str(jcr->get_JobLevel()), jcr->since,
jcr->client->name(),
jcr->fileset->name(), jcr->FSCreateTime,
jcr->rpool->name(), jcr->rpool_source,
}
sd->fsend(jobcmd, edit_int64(jcr->JobId, ed1), jcr->Job,
job_name.c_str(), client_name.c_str(),
- jcr->JobType, jcr->JobLevel,
+ jcr->get_JobType(), jcr->get_JobLevel(),
fileset_name.c_str(), !jcr->pool->catalog_files,
jcr->job->SpoolAttributes, jcr->fileset->MD5, jcr->spool_data,
jcr->write_part_after_job, jcr->job->PreferMountedVolumes,
/* Do read side of storage daemon */
if (ok && rstore) {
/* For the moment, only migrate and copy have rpool */
- if (jcr->JobType == JT_MIGRATE || jcr->JobType == JT_COPY) {
+ if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY) {
pm_strcpy(pool_type, jcr->rpool->pool_type);
pm_strcpy(pool_name, jcr->rpool->name());
} else {
case 0: /* Job */
return Py_BuildValue((char *)getvars[i].fmt, jcr->job->hdr.name);
case 1: /* level */
- return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->JobLevel));
+ return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->get_JobLevel()));
case 2: /* type */
- return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->JobType));
+ return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->get_JobType()));
case 3: /* JobId */
return Py_BuildValue((char *)getvars[i].fmt, jcr->JobId);
case 4: /* Client */
}
for (i=0; joblevels[i].level_name; i++) {
if (strcmp(strval, joblevels[i].level_name) == 0) {
- if (joblevels[i].job_type == jcr->JobType) {
- jcr->JobLevel = joblevels[i].level;
- jcr->jr.JobLevel = jcr->JobLevel;
+ if (joblevels[i].job_type == jcr->get_JobType()) {
+ jcr->set_JobLevel(joblevels[i].level);
+ jcr->jr.JobLevel = jcr->get_JobLevel();
return 0;
}
}
ASSERT(job);
set_jcr_defaults(jcr, job);
if (run->level) {
- jcr->JobLevel = run->level; /* override run level */
+ jcr->set_JobLevel(run->level); /* override run level */
}
if (run->pool) {
jcr->pool = run->pool; /* override pool */
char since[MAXSTRING];
JCR *jcr = ua->jcr;
- jcr->JobLevel = L_FULL;
+ jcr->set_JobLevel(L_FULL);
for (int i=1; i<ua->argc; i++) {
if (strcasecmp(ua->argk[i], NT_("client")) == 0 ||
strcasecmp(ua->argk[i], NT_("fd")) == 0) {
}
jcr->job = job;
- jcr->JobType = JT_BACKUP;
+ jcr->set_JobType(JT_BACKUP);
init_jcr_job_record(jcr);
if (!get_or_create_client_record(jcr)) {
add_prompt(ua, _("Storage")); /* 1 */
add_prompt(ua, _("Job")); /* 2 */
add_prompt(ua, _("FileSet")); /* 3 */
- if (jcr->JobType == JT_RESTORE) {
+ if (jcr->get_JobType() == JT_RESTORE) {
add_prompt(ua, _("Restore Client")); /* 4 */
} else {
add_prompt(ua, _("Client")); /* 4 */
}
add_prompt(ua, _("When")); /* 5 */
add_prompt(ua, _("Priority")); /* 6 */
- if (jcr->JobType == JT_BACKUP ||
- jcr->JobType == JT_COPY ||
- jcr->JobType == JT_MIGRATE ||
- jcr->JobType == JT_VERIFY) {
+ if (jcr->get_JobType() == JT_BACKUP ||
+ jcr->get_JobType() == JT_COPY ||
+ jcr->get_JobType() == JT_MIGRATE ||
+ jcr->get_JobType() == JT_VERIFY) {
add_prompt(ua, _("Pool")); /* 7 */
- if (jcr->JobType == JT_VERIFY) {
+ if (jcr->get_JobType() == JT_VERIFY) {
add_prompt(ua, _("Verify Job")); /* 8 */
}
- } else if (jcr->JobType == JT_RESTORE) {
+ } else if (jcr->get_JobType() == JT_RESTORE) {
add_prompt(ua, _("Bootstrap")); /* 7 */
add_prompt(ua, _("Where")); /* 8 */
add_prompt(ua, _("File Relocation"));/* 9 */
add_prompt(ua, _("Replace")); /* 10 */
add_prompt(ua, _("JobId")); /* 11 */
}
- if (jcr->JobType == JT_BACKUP || jcr->JobType == JT_RESTORE) {
+ if (jcr->get_JobType() == JT_BACKUP || jcr->get_JobType() == JT_RESTORE) {
add_prompt(ua, _("Plugin Options")); /* 12 */
}
switch (do_prompt(ua, "", _("Select parameter to modify"), NULL, 0)) {
goto try_again;
case 7:
/* Pool or Bootstrap depending on JobType */
- if (jcr->JobType == JT_BACKUP ||
- jcr->JobType == JT_COPY ||
- jcr->JobType == JT_MIGRATE ||
- jcr->JobType == JT_VERIFY) { /* Pool */
+ if (jcr->get_JobType() == JT_BACKUP ||
+ jcr->get_JobType() == JT_COPY ||
+ jcr->get_JobType() == JT_MIGRATE ||
+ jcr->get_JobType() == JT_VERIFY) { /* Pool */
rc.pool = select_pool_resource(ua);
if (rc.pool) {
jcr->pool = rc.pool;
goto try_again;
case 8:
/* Verify Job */
- if (jcr->JobType == JT_VERIFY) {
+ if (jcr->get_JobType() == JT_VERIFY) {
rc.verify_job = select_job_resource(ua);
if (rc.verify_job) {
jcr->verify_job = rc.verify_job;
/* If pool changed, update migration write storage */
- if (jcr->JobType == JT_MIGRATE || jcr->JobType == JT_COPY) {
+ if (jcr->get_JobType() == JT_MIGRATE || jcr->get_JobType() == JT_COPY ||
+ (jcr->get_JobType() == JT_BACKUP && jcr->get_JobLevel() == L_VIRTUAL_FULL)) {
if (!set_migration_wstorage(jcr, rc.pool)) {
return false;
}
static void select_job_level(UAContext *ua, JCR *jcr)
{
- if (jcr->JobType == JT_BACKUP) {
+ if (jcr->get_JobType() == JT_BACKUP) {
start_prompt(ua, _("Levels:\n"));
// add_prompt(ua, _("Base"));
add_prompt(ua, _("Full"));
// jcr->JobLevel = L_BASE;
// break;
case 0:
- jcr->JobLevel = L_FULL;
+ jcr->set_JobLevel(L_FULL);
break;
case 1:
- jcr->JobLevel = L_INCREMENTAL;
+ jcr->set_JobLevel(L_INCREMENTAL);
break;
case 2:
- jcr->JobLevel = L_DIFFERENTIAL;
+ jcr->set_JobLevel(L_DIFFERENTIAL);
break;
case 3:
- jcr->JobLevel = L_SINCE;
+ jcr->set_JobLevel(L_SINCE);
break;
case 4:
- jcr->JobLevel = L_VIRTUAL_FULL;
+ jcr->set_JobLevel(L_VIRTUAL_FULL);
break;
default:
break;
}
- } else if (jcr->JobType == JT_VERIFY) {
+ } else if (jcr->get_JobType() == JT_VERIFY) {
start_prompt(ua, _("Levels:\n"));
add_prompt(ua, _("Initialize Catalog"));
add_prompt(ua, _("Verify Catalog"));
add_prompt(ua, _("Verify Volume Data (not yet implemented)"));
switch (do_prompt(ua, "", _("Select level"), NULL, 0)) {
case 0:
- jcr->JobLevel = L_VERIFY_INIT;
+ jcr->set_JobLevel(L_VERIFY_INIT);
break;
case 1:
- jcr->JobLevel = L_VERIFY_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_CATALOG);
break;
case 2:
- jcr->JobLevel = L_VERIFY_VOLUME_TO_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_VOLUME_TO_CATALOG);
break;
case 3:
- jcr->JobLevel = L_VERIFY_DISK_TO_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_DISK_TO_CATALOG);
break;
case 4:
- jcr->JobLevel = L_VERIFY_DATA;
+ jcr->set_JobLevel(L_VERIFY_DATA);
break;
default:
break;
static bool display_job_parameters(UAContext *ua, JCR *jcr, JOB *job, const char *verify_list,
char *jid, const char *replace, char *client_name)
{
- Dmsg1(800, "JobType=%c\n", jcr->JobType);
- switch (jcr->JobType) {
+ Dmsg1(800, "JobType=%c\n", jcr->get_JobType());
+ switch (jcr->get_JobType()) {
char ec1[30];
char dt[MAX_TIME_LENGTH];
case JT_ADMIN:
jcr->wstore?jcr->wstore->name():"*None*",
bstrutime(dt, sizeof(dt), jcr->sched_time),
jcr->JobPriority);
- jcr->JobLevel = L_FULL;
+ jcr->set_JobLevel(L_FULL);
break;
case JT_BACKUP:
case JT_VERIFY:
- if (jcr->JobType == JT_BACKUP) {
+ if (jcr->get_JobType() == JT_BACKUP) {
if (ua->api) ua->signal(BNET_RUN_CMD);
ua->send_msg(_("Run %s job\n"
"JobName: %s\n"
"%s%s%s"),
_("Backup"),
job->name(),
- level_to_str(jcr->JobLevel),
+ level_to_str(jcr->get_JobLevel()),
jcr->client->name(),
jcr->fileset->name(),
NPRT(jcr->pool->name()), jcr->pool_source,
"Priority: %d\n"),
_("Verify"),
job->name(),
- level_to_str(jcr->JobLevel),
+ level_to_str(jcr->get_JobLevel()),
jcr->client->name(),
jcr->fileset->name(),
NPRT(jcr->pool->name()), jcr->pool_source,
jcr->RestoreJobId = ua->int64_val;
}
}
- jcr->JobLevel = L_FULL; /* default level */
+ jcr->set_JobLevel(L_FULL); /* default level */
Dmsg1(800, "JobId to restore=%d\n", jcr->RestoreJobId);
if (jcr->RestoreJobId == 0) {
if (ua->api) ua->signal(BNET_RUN_CMD);
case JT_COPY:
case JT_MIGRATE:
char *prt_type;
- if (jcr->JobType == JT_COPY) {
+ if (jcr->get_JobType() == JT_COPY) {
prt_type = _("Run Copy job\n");
} else {
prt_type = _("Run Migration job\n");
}
- jcr->JobLevel = L_FULL; /* default level */
+ jcr->set_JobLevel(L_FULL); /* default level */
if (ua->api) ua->signal(BNET_RUN_CMD);
ua->send_msg("%s"
"JobName: %s\n"
jcr->JobPriority);
break;
default:
- ua->error_msg(_("Unknown Job Type=%d\n"), jcr->JobType);
+ ua->error_msg(_("Unknown Job Type=%d\n"), jcr->get_JobType());
return false;
}
return true;
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2001-2007 Free Software Foundation Europe e.V.
+ Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
bool found = false;
for (int i=0; joblevels[i].level_name; i++) {
if (strcasecmp(level_name, joblevels[i].level_name) == 0) {
- jcr->JobLevel = joblevels[i].level;
+ jcr->set_JobLevel(joblevels[i].level);
found = true;
break;
}
jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */
create_unique_job_name(jcr, base_name);
jcr->sched_time = jcr->start_time;
- jcr->JobType = job_type;
- jcr->JobLevel = L_NONE;
+ jcr->set_JobType(job_type);
+ jcr->set_JobLevel(L_NONE);
set_jcr_job_status(jcr, JS_Running);
jcr->JobId = 0;
return jcr;
} else {
store = get_storage_resource(ua, false/*no default*/);
if (store) {
- if (find_arg(ua, NT_("slots")) > 0) {
- status_slots(ua, store);
- } else {
- do_storage_status(ua, store, NULL);
- }
+ if (find_arg(ua, NT_("slots")) > 0) {
+ status_slots(ua, store);
+ } else {
+ do_storage_status(ua, store, NULL);
+ }
}
return 1;
}
MEDIA_DBR mr;
int orig_jobtype;
- orig_jobtype = jcr->JobType;
+ orig_jobtype = jcr->get_JobType();
memset(&mr, 0, sizeof(mr));
if (sp->job->JobType == JT_BACKUP) {
jcr->db = NULL;
db_close_database(jcr, jcr->db);
}
jcr->db = ua->db; /* restore ua db to jcr */
- jcr->JobType = orig_jobtype;
+ jcr->set_JobType(orig_jobtype);
}
/*
/* this is a console or other control job. We only show console
* jobs in the status output.
*/
- if (jcr->JobType == JT_CONSOLE && !ua->api) {
+ if (jcr->get_JobType() == JT_CONSOLE && !ua->api) {
bstrftime_nc(dt, sizeof(dt), jcr->start_time);
ua->send_msg(_("Console connected at %s\n"), dt);
}
msg = _("Dir inserting Attributes");
break;
}
- switch (jcr->JobType) {
+ switch (jcr->get_JobType()) {
case JT_ADMIN:
case JT_RESTORE:
bstrncpy(level, " ", sizeof(level));
break;
default:
- bstrncpy(level, level_to_str(jcr->JobLevel), sizeof(level));
+ bstrncpy(level, level_to_str(jcr->get_JobLevel()), sizeof(level));
level[7] = 0;
break;
}
memset(&mr, 0, sizeof(mr));
memset(&cr, 0, sizeof(cr));
- jcr->JobLevel = L_FULL; /* we want this to appear as a Full backup */
+ jcr->set_JobLevel(L_FULL); /* we want this to appear as a Full backup */
jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */
jcr->JobFiles = jcr->SDJobFiles;
jcr->JobBytes = jcr->SDJobBytes;
HOST_OS, DISTNAME, DISTVER,
jcr->jr.JobId,
jcr->jr.Job,
- level_to_str(jcr->JobLevel), jcr->since,
+ level_to_str(jcr->get_JobLevel()), jcr->since,
jcr->client->name(), cr.Uname,
jcr->fileset->name(), jcr->FSCreateTime,
jcr->pool->name(), jcr->pool_source,
if (!allow_duplicate_job(jcr)) {
return false;
}
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_VERIFY_INIT:
case L_VERIFY_CATALOG:
case L_VERIFY_DISK_TO_CATALOG:
case L_VERIFY_DATA:
break;
default:
- Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->JobLevel,
- jcr->JobLevel);
+ Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->get_JobLevel(),
+ jcr->get_JobLevel());
return false;
}
return true;
* For VERIFY_VOLUME_TO_CATALOG, we want the JobId of the
* last backup Job.
*/
- if (jcr->JobLevel == L_VERIFY_CATALOG ||
- jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->JobLevel == L_VERIFY_DISK_TO_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_CATALOG ||
+ jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG) {
memcpy(&jr, &jcr->jr, sizeof(jr));
if (jcr->verify_job &&
- (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->JobLevel == L_VERIFY_DISK_TO_CATALOG)) {
+ (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG)) {
Name = jcr->verify_job->name();
} else {
Name = NULL;
}
Dmsg1(100, "find last jobid for: %s\n", NPRT(Name));
if (!db_find_last_jobid(jcr, jcr->db, Name, &jr)) {
- if (jcr->JobLevel == L_VERIFY_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_CATALOG) {
Jmsg(jcr, M_FATAL, 0, _(
"Unable to find JobId of previous InitCatalog Job.\n"
"Please run a Verify with Level=InitCatalog before\n"
* Now get the job record for the previous backup that interests
* us. We use the verify_jobid that we found above.
*/
- if (jcr->JobLevel == L_VERIFY_CATALOG ||
- jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG ||
- jcr->JobLevel == L_VERIFY_DISK_TO_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_CATALOG ||
+ jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ||
+ jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG) {
jcr->previous_jr.JobId = verify_jobid;
if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
Jmsg(jcr, M_FATAL, 0, _("Could not get job record for previous Job. ERR=%s"),
* create a dummy authorization key (passed to
* File daemon but not used).
*/
- if (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
if (!create_restore_bootstrap_file(jcr)) {
return false;
}
jcr->sd_auth_key = bstrdup("dummy"); /* dummy Storage daemon key */
}
- if (jcr->JobLevel == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) {
+ if (jcr->get_JobLevel() == L_VERIFY_DISK_TO_CATALOG && jcr->verify_job) {
jcr->fileset = jcr->verify_job->fileset;
}
- Dmsg2(100, "ClientId=%u JobLevel=%c\n", jcr->previous_jr.ClientId, jcr->JobLevel);
+ Dmsg2(100, "ClientId=%u JobLevel=%c\n", jcr->previous_jr.ClientId, jcr->get_JobLevel());
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
/* Print Job Start message */
Jmsg(jcr, M_INFO, 0, _("Start Verify JobId=%s Level=%s Job=%s\n"),
- edit_uint64(jcr->JobId, ed1), level_to_str(jcr->JobLevel), jcr->Job);
+ edit_uint64(jcr->JobId, ed1), level_to_str(jcr->get_JobLevel()), jcr->Job);
- if (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
/*
* Start conversation with Storage daemon
*/
* Send Level command to File daemon, as well
* as the Storage address if appropriate.
*/
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_VERIFY_INIT:
level = "init";
break;
level="disk_to_catalog";
break;
default:
- Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->JobLevel,
- jcr->JobLevel);
+ Jmsg2(jcr, M_FATAL, 0, _("Unimplemented Verify level %d(%c)\n"), jcr->get_JobLevel(),
+ jcr->get_JobLevel());
goto bail_out;
}
* catalog depending on the run type.
*/
/* Compare to catalog */
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_VERIFY_CATALOG:
Dmsg0(10, "Verify level=catalog\n");
jcr->sd_msg_thread_done = true; /* no SD msg thread, so it is done */
break;
default:
- Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->JobLevel);
+ Jmsg1(jcr, M_FATAL, 0, _("Unimplemented verify level %d\n"), jcr->get_JobLevel());
goto bail_out;
}
// Dmsg1(100, "Enter verify_cleanup() TermCod=%d\n", TermCode);
- Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->JobLevel,
+ Dmsg3(900, "JobLevel=%c Expected=%u JobFiles=%u\n", jcr->get_JobLevel(),
jcr->ExpectedFiles, jcr->JobFiles);
- if (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG &&
+ if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG &&
jcr->ExpectedFiles != jcr->JobFiles) {
TermCode = JS_ErrorTerminated;
}
/* If no files were expected, there can be no error */
- if (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG &&
+ if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG &&
jcr->ExpectedFiles == 0) {
TermCode = JS_Terminated;
}
}
jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg));
- if (jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG) {
+ if (jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG) {
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
Jmsg(jcr, msg_type, 0, _("Bacula %s %s (%s): %s\n"
" Build OS: %s %s %s\n"
jcr->jr.JobId,
jcr->jr.Job,
jcr->fileset->hdr.name,
- level_to_str(jcr->JobLevel),
+ level_to_str(jcr->get_JobLevel()),
jcr->client->hdr.name,
jcr->previous_jr.JobId,
Name,
jcr->jr.JobId,
jcr->jr.Job,
jcr->fileset->hdr.name,
- level_to_str(jcr->JobLevel),
+ level_to_str(jcr->get_JobLevel()),
jcr->client->name(),
jcr->previous_jr.JobId,
Name,
}
if (!jcr->fn_printed) {
Qmsg(jcr, M_INFO, 0, _("\nThe following files are in the Catalog but not on %s:\n"),
- jcr->JobLevel == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk");
+ jcr->get_JobLevel() == L_VERIFY_VOLUME_TO_CATALOG ? "the Volume(s)" : "disk");
jcr->fn_printed = true;
}
Qmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:"");
#ifndef USE_TCADB
hlink link;
#endif
- char *fname; /* not stored with tchdb mode */
+ char *fname; /* not stored with tchdb mode */
time_t ctime;
time_t mtime;
bool seen;
} CurFile;
#ifdef USE_TCADB
-static void realfree(void *p); /* used by tokyo code */
+static void realfree(void *p); /* used by tokyo code */
/*
* Update hash element seen=1
elt->seen = 1;
if (!tcadbput(jcr->file_list,
- elt->fname, strlen(elt->fname)+1,
- elt, sizeof(CurFile)))
+ elt->fname, strlen(elt->fname)+1,
+ elt, sizeof(CurFile)))
{ /* TODO: disabling accurate mode ? */
Jmsg(jcr, M_ERROR, 1, _("Can't update accurate hash disk\n"));
ret = false;
CurFile *elt;
elt = (CurFile*)tcadbget(jcr->file_list,
- fname, strlen(fname)+1, &size);
+ fname, strlen(fname)+1, &size);
if (elt)
{
/* TODO: don't malloc/free results */
//
// tchdbsetcache(jcr->file_list, 300000);
// tchdbtune(jcr->file_list,
-// nbfile, /* nb bucket 0.5n to 4n */
-// 6, /* size of element 2^x */
-// 16,
-// 0); /* options like compression */
+// nbfile, /* nb bucket 0.5n to 4n */
+// 6, /* size of element 2^x */
+// 16,
+// 0); /* options like compression */
//
jcr->hash_name = get_pool_memory(PM_MESSAGE);
POOLMEM *temp = get_pool_memory(PM_MESSAGE);
make_unique_filename(&jcr->hash_name, jcr->JobId, "accurate");
pm_strcat(jcr->hash_name, ".tcb");
Mmsg(temp, "%s#bnum=%i#mode=e#opts=l",
- jcr->hash_name, nbfile*4);
+ jcr->hash_name, nbfile*4);
Dmsg1(dbglvl, "Doing accurate hash on disk %s\n", jcr->hash_name);
} else {
Dmsg0(dbglvl, "Doing accurate hash on memory\n");
FF_PKT *ff_pkt;
int stream = STREAM_UNIX_ATTRIBUTES;
- if (!jcr->accurate || jcr->JobLevel == L_FULL) {
+ if (!jcr->accurate || jcr->get_JobLevel() == L_FULL) {
goto bail_out;
}
tcadbiterinit(jcr->file_list);
while((key = tcadbiternext2(jcr->file_list)) != NULL) {
elt = (CurFile *) tcadbget(jcr->file_list,
- key, strlen(key)+1, &size);
+ key, strlen(key)+1, &size);
if (elt)
{
- if (!elt->seen) { /* already seen */
- ff_pkt->fname = key;
- ff_pkt->statp.st_mtime = elt->mtime;
- ff_pkt->statp.st_ctime = elt->ctime;
- encode_and_send_attributes(jcr, ff_pkt, stream);
- }
- realfree(elt);
+ if (!elt->seen) { /* already seen */
+ ff_pkt->fname = key;
+ ff_pkt->statp.st_mtime = elt->mtime;
+ ff_pkt->statp.st_ctime = elt->ctime;
+ encode_and_send_attributes(jcr, ff_pkt, stream);
+ }
+ realfree(elt);
}
- realfree(key); /* tokyo cabinet have to use real free() */
+ realfree(key); /* tokyo cabinet have to use real free() */
}
term_find_files(ff_pkt);
/* TODO: clean htable when this function is not reached ? */
if (jcr->file_list) {
if(!tcadbclose(jcr->file_list)){
- Jmsg(jcr, M_ERROR, 1, _("Can't close accurate hash disk\n"));
+ Jmsg(jcr, M_ERROR, 1, _("Can't close accurate hash disk\n"));
}
/* delete the object */
tcadbdel(jcr->file_list);
if (!bstrcmp(jcr->hash_name, "*")) {
- unlink(jcr->hash_name);
+ unlink(jcr->hash_name);
}
free_pool_memory(jcr->hash_name);
static bool accurate_mark_file_as_seen(JCR *jcr, CurFile *elt)
{
CurFile *temp = (CurFile *)jcr->file_list->lookup(elt->fname);
- temp->seen = 1; /* records are in memory */
+ temp->seen = 1; /* records are in memory */
return true;
}
FF_PKT *ff_pkt;
int stream = STREAM_UNIX_ATTRIBUTES;
- if (!jcr->accurate || jcr->JobLevel == L_FULL) {
+ if (!jcr->accurate || jcr->get_JobLevel() == L_FULL) {
goto bail_out;
}
#ifdef USE_TCADB
if (!tcadbput(jcr->file_list,
- fname, strlen(fname)+1,
- &elt, sizeof(CurFile)))
+ fname, strlen(fname)+1,
+ &elt, sizeof(CurFile)))
{
Jmsg(jcr, M_ERROR, 1, _("Can't update accurate hash disk ERR=%s\n"));
ret = false;
char *fname;
CurFile elt;
- if (!jcr->accurate || jcr->JobLevel == L_FULL) {
+ if (!jcr->accurate || jcr->get_JobLevel() == L_FULL) {
return true;
}
int len;
int32_t nb;
- if (!jcr->accurate || job_canceled(jcr) || jcr->JobLevel==L_FULL) {
+ if (!jcr->accurate || job_canceled(jcr) || jcr->get_JobLevel()==L_FULL) {
return true;
}
while (dir->recv() >= 0) {
len = strlen(dir->msg) + 1;
if (len < dir->msglen) {
- accurate_add_file(jcr, dir->msg, dir->msg + len);
+ accurate_add_file(jcr, dir->msg, dir->msg + len);
}
}
}
/* Base backup requested? */
if (strcmp(level, "base") == 0) {
- jcr->JobLevel = L_BASE;
+ jcr->set_JobLevel(L_BASE);
/* Full backup requested? */
} else if (strcmp(level, "full") == 0) {
- jcr->JobLevel = L_FULL;
+ jcr->set_JobLevel(L_FULL);
} else if (strstr(level, "differential")) {
- jcr->JobLevel = L_DIFFERENTIAL;
+ jcr->set_JobLevel(L_DIFFERENTIAL);
free_memory(level);
return 1;
} else if (strstr(level, "incremental")) {
- jcr->JobLevel = L_INCREMENTAL;
+ jcr->set_JobLevel(L_INCREMENTAL);
free_memory(level);
return 1;
/*
buf = get_memory(dir->msglen+1);
utime_t since_time, adj;
btime_t his_time, bt_start, rt=0, bt_adj=0;
- if (jcr->JobLevel == L_NONE) {
- jcr->JobLevel = L_SINCE; /* if no other job level set, do it now */
+ if (jcr->get_JobLevel() == L_NONE) {
+ jcr->set_JobLevel(L_SINCE); /* if no other job level set, do it now */
}
if (sscanf(dir->msg, "level = since_utime %s mtime_only=%d",
buf, &mtime_only) != 2) {
if (buf) {
free_memory(buf);
}
- generate_plugin_event(jcr, bEventLevel, (void *)jcr->JobLevel);
+ generate_plugin_event(jcr, bEventLevel, (void *)jcr->get_JobLevel());
return dir->fsend(OKlevel);
bail_out:
#endif
set_jcr_job_status(jcr, JS_Blocked);
- jcr->JobType = JT_BACKUP;
+ jcr->set_JobType(JT_BACKUP);
Dmsg1(100, "begin backup ff=%p\n", jcr->ff);
if (sd == NULL) {
BSOCK *sd = jcr->store_bsock;
char level[100];
- jcr->JobType = JT_VERIFY;
+ jcr->set_JobType(JT_VERIFY);
if (sscanf(dir->msg, verifycmd, level) != 1) {
dir->fsend(_("2994 Bad verify command: %s\n"), dir->msg);
return 0;
}
if (strcasecmp(level, "init") == 0) {
- jcr->JobLevel = L_VERIFY_INIT;
+ jcr->set_JobLevel(L_VERIFY_INIT);
} else if (strcasecmp(level, "catalog") == 0){
- jcr->JobLevel = L_VERIFY_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_CATALOG);
} else if (strcasecmp(level, "volume") == 0){
- jcr->JobLevel = L_VERIFY_VOLUME_TO_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_VOLUME_TO_CATALOG);
} else if (strcasecmp(level, "data") == 0){
- jcr->JobLevel = L_VERIFY_DATA;
+ jcr->set_JobLevel(L_VERIFY_DATA);
} else if (strcasecmp(level, "disk_to_catalog") == 0) {
- jcr->JobLevel = L_VERIFY_DISK_TO_CATALOG;
+ jcr->set_JobLevel(L_VERIFY_DISK_TO_CATALOG);
} else {
dir->fsend(_("2994 Bad verify level: %s\n"), dir->msg);
return 0;
dir->fsend(OKverify);
generate_daemon_event(jcr, "JobStart");
- generate_plugin_event(jcr, bEventLevel, (void *)jcr->JobLevel);
+ generate_plugin_event(jcr, bEventLevel, (void *)jcr->get_JobLevel());
generate_plugin_event(jcr, bEventStartVerifyJob);
Dmsg1(110, "bfiled>dird: %s", dir->msg);
- switch (jcr->JobLevel) {
+ switch (jcr->get_JobLevel()) {
case L_VERIFY_INIT:
case L_VERIFY_CATALOG:
do_verify(jcr);
dir->fsend(OKrestore);
Dmsg1(110, "bfiled>dird: %s", dir->msg);
- jcr->JobType = JT_RESTORE;
+ jcr->set_JobType(JT_RESTORE);
set_jcr_job_status(jcr, JS_Blocked);
case 0: /* FD's name */
return Py_BuildValue((char *)getvars[i].fmt, my_name);
case 1: /* level */
- return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->JobLevel));
+ return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->get_JobLevel()));
case 2: /* type */
- return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->JobType));
+ return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->get_JobType()));
case 3: /* JobId */
return Py_BuildValue((char *)getvars[i].fmt, jcr->JobId);
case 4: /* Client */
njcr->JobId, njcr->Job);
sendit(msg.c_str(), len, sp);
len = Mmsg(msg, _(" %s%s Job started: %s\n"),
- vss, job_type_to_str(njcr->JobType), dt);
+ vss, job_type_to_str(njcr->get_JobType()), dt);
}
sendit(msg.c_str(), len, sp);
if (njcr->JobId == 0) {
private:
pthread_mutex_t mutex; /* jcr mutex */
volatile int32_t _use_count; /* use count */
+ int32_t m_JobType; /* backup, restore, verify ... */
+ int32_t m_JobLevel; /* Job level */
public:
void lock() {P(mutex); };
void unlock() {V(mutex); };
void init_mutex(void) {pthread_mutex_init(&mutex, NULL); };
void destroy_mutex(void) {pthread_mutex_destroy(&mutex); };
bool is_job_canceled() {return job_canceled(this); };
+ int32_t get_JobType() { return m_JobType; };
+ int32_t get_JobLevel() { return m_JobLevel; };
+
+ void set_JobLevel(int32_t JobLevel); /* in lib/jcr.c */
+ void set_JobType(int32_t JobType); /* in lib/jcr.c */
+ bool JobReads(); /* in lib/jcr.c */
/* Global part of JCR common to all daemons */
dlink link; /* JCR chain link */
uint64_t ReadBytes; /* Bytes read -- before compression */
uint32_t Errors; /* Number of non-fatal errors */
volatile int32_t JobStatus; /* ready, running, blocked, terminated */
- int32_t JobType; /* backup, restore, verify ... */
- int32_t JobLevel; /* Job level */
int32_t JobPriority; /* Job priority */
- bool JobReads; /* Set if job reads Volumes */
time_t sched_time; /* job schedule time, i.e. when it should start */
time_t start_time; /* when job actually started */
time_t run_time; /* used for computing speed */
V(last_jobs_mutex);
}
+/* Set Job type in JCR and also set appropriate read flag */
+void JCR::set_JobType(int32_t JobType)
+{
+ m_JobType = JobType;
+}
+
+/* Set Job level in JCR and also set appropriate read flag */
+void JCR::set_JobLevel(int32_t JobLevel)
+{
+ m_JobLevel = JobLevel;
+}
+
+bool JCR::JobReads()
+{
+ switch (m_JobType) {
+ case JT_VERIFY:
+ case JT_RESTORE:
+ case JT_COPY:
+ case JT_MIGRATE:
+ return true;
+ case JT_BACKUP:
+ if (m_JobLevel == L_VIRTUAL_FULL) {
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
/*
* Push a subroutine address into the job end callback stack
*/
/* Setup some dummy values */
bstrncpy(jcr->Job, "*System*", sizeof(jcr->Job));
jcr->JobId = 0;
- jcr->JobType = JT_SYSTEM; /* internal job until defined */
- jcr->JobLevel = L_NONE;
+ jcr->set_JobType(JT_SYSTEM); /* internal job until defined */
+ jcr->set_JobLevel(L_NONE);
set_jcr_job_status(jcr, JS_Created); /* ready to run */
set_jcr_in_tsd(jcr);
sigtimer.sa_flags = 0;
Dmsg1(dbglvl, "End job=%d\n", jcr->JobId);
/* Keep some statistics */
- switch (jcr->JobType) {
+ switch (jcr->get_JobType()) {
case JT_BACKUP:
case JT_VERIFY:
case JT_RESTORE:
je = (struct s_last_job *)malloc(sizeof(struct s_last_job));
memset(je, 0, sizeof(struct s_last_job)); /* zero in case unset fields */
je->Errors = jcr->Errors;
- je->JobType = jcr->JobType;
+ je->JobType = jcr->get_JobType();
je->JobId = jcr->JobId;
je->VolSessionId = jcr->VolSessionId;
je->VolSessionTime = jcr->VolSessionTime;
je->JobFiles = jcr->JobFiles;
je->JobBytes = jcr->JobBytes;
je->JobStatus = jcr->JobStatus;
- je->JobLevel = jcr->JobLevel;
+ je->JobLevel = jcr->get_JobLevel();
je->start_time = jcr->start_time;
je->end_time = time(NULL);
if (status) {
for (pos = 0 ; vs[pos] ; pos += 2) {
if ( !strcmp(vs[pos],status) ) {
- return vs[pos+1];
+ return vs[pos+1];
}
}
}
break;
case 'l':
if (jcr) {
- str = job_level_to_str(jcr->JobLevel);
+ str = job_level_to_str(jcr->get_JobLevel());
} else {
str = _("*none*");
}
break;
case 't':
if (jcr) {
- str = job_type_to_str(jcr->JobType);
+ str = job_type_to_str(jcr->get_JobType());
} else {
str = _("*none*");
}
JCR *jcr = dcr->jcr;
if (jcr) Dmsg1(500, "JobId=%u enter attach_dcr_to_dev\n", (uint32_t)jcr->JobId);
- if (!dcr->attached_to_dev && dev->initiated && jcr && jcr->JobType != JT_SYSTEM) {
+ if (!dcr->attached_to_dev && dev->initiated && jcr && jcr->get_JobType() != JT_SYSTEM) {
dev->attached_dcrs->append(dcr); /* attach dcr to device */
dcr->attached_to_dev = true;
Dmsg1(500, "JobId=%u attach_dcr_to_dev\n", (uint32_t)jcr->JobId);
POOL_MEM VolumeName;
/* If system job, do not update catalog */
- if (jcr->JobType == JT_SYSTEM) {
+ if (jcr->get_JobType() == JT_SYSTEM) {
return true;
}
char ed1[50];
/* If system job, do not update catalog */
- if (jcr->JobType == JT_SYSTEM) {
+ if (jcr->get_JobType() == JT_SYSTEM) {
return true;
}
}
if (!write_block_to_dev(dcr)) {
- if (job_canceled(jcr) || jcr->JobType == JT_SYSTEM) {
+ if (job_canceled(jcr) || jcr->get_JobType() == JT_SYSTEM) {
stat = false;
} else {
stat = fixup_device_block_write_error(dcr);
jr.PoolId = pr.PoolId;
mjcr->start_time = jr.StartTime;
- mjcr->JobLevel = jr.JobLevel;
+ mjcr->set_JobLevel(jr.JobLevel);
mjcr->client_name = get_pool_memory(PM_FNAME);
pm_strcpy(mjcr->client_name, label.ClientName);
}
if (verbose) {
Pmsg3(000, _("Updated Job termination record for JobId=%u Level=%s TermStat=%c\n"),
- jr->JobId, job_level_to_str(mjcr->JobLevel), jr->JobStatus);
+ jr->JobId, job_level_to_str(mjcr->get_JobLevel()), jr->JobStatus);
}
if (verbose > 1) {
const char *term_msg;
mjcr->JobId,
mjcr->Job,
mjcr->fileset_name,
- job_level_to_str(mjcr->JobLevel),
+ job_level_to_str(mjcr->get_JobLevel()),
mjcr->client_name,
sdt,
edt,
* the JobId and the ClientId.
*/
jobjcr = new_jcr(sizeof(JCR), bscan_free_jcr);
- jobjcr->JobType = jr->JobType;
- jobjcr->JobLevel = jr->JobLevel;
+ jobjcr->set_JobType(jr->JobType);
+ jobjcr->set_JobLevel(jr->JobLevel);
jobjcr->JobStatus = jr->JobStatus;
bstrncpy(jobjcr->Job, jr->Job, sizeof(jobjcr->Job));
jobjcr->JobId = JobId; /* this is JobId on tape */
jcr->NumReadVolumes = 0;
jcr->NumWriteVolumes = 0;
jcr->JobId = 0;
- jcr->JobType = JT_CONSOLE;
- jcr->JobLevel = L_FULL;
+ jcr->set_JobType(JT_CONSOLE);
+ jcr->set_JobLevel(L_FULL);
jcr->JobStatus = JS_Terminated;
jcr->where = bstrdup("");
jcr->job_name = get_pool_memory(PM_FNAME);
Dmsg1(120, "Append data: %s", fd->msg);
if (jcr->session_opened) {
Dmsg1(110, "<bfiled: %s", fd->msg);
- jcr->JobType = JT_BACKUP;
+ jcr->set_JobType(JT_BACKUP);
if (do_append_data(jcr)) {
return true;
} else {
}
jcr->session_opened = true;
- jcr->JobType = JT_RESTORE;
+ jcr->set_JobType(JT_RESTORE);
/* Send "Ticket" to File Daemon */
fd->fsend(OK_open, jcr->VolSessionId);
unbash_spaces(fileset_name);
jcr->fileset_name = get_pool_memory(PM_NAME);
pm_strcpy(jcr->fileset_name, fileset_name);
- jcr->JobType = JobType;
- jcr->JobLevel = level;
+ jcr->set_JobType(JobType);
+ jcr->set_JobLevel(level);
jcr->no_attributes = no_attributes;
jcr->spool_attributes = spool_attributes;
jcr->spool_data = spool_data;
/* Added in VerNum 10 */
ser_string(jcr->Job); /* Unique name of this Job */
ser_string(jcr->fileset_name);
- ser_uint32(jcr->JobType);
- ser_uint32(jcr->JobLevel);
+ ser_uint32(jcr->get_JobType());
+ ser_uint32(jcr->get_JobLevel());
/* Added in VerNum 11 */
ser_string(jcr->fileset_md5);
char ec1[50];
DEVICE *dev;
- switch(jcr->JobType) {
+ switch(jcr->get_JobType()) {
case JT_MIGRATE:
Type = "Migration";
break;
case 1: /* SD's name */
return Py_BuildValue((char *)getvars[i].fmt, my_name);
case 2: /* level */
- return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->JobLevel));
+ return Py_BuildValue((char *)getvars[i].fmt, job_level_to_str(jcr->get_JobLevel()));
case 3: /* type */
- return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->JobType));
+ return Py_BuildValue((char *)getvars[i].fmt, job_type_to_str(jcr->get_JobType()));
case 4: /* JobId */
return Py_BuildValue((char *)getvars[i].fmt, jcr->JobId);
case 5: /* Client */
foreach_jcr(jcr) {
if (jcr->JobStatus == JS_WaitFD) {
len = Mmsg(msg, _("%s Job %s waiting for Client connection.\n"),
- job_type_to_str(jcr->JobType), jcr->Job);
+ job_type_to_str(jcr->get_JobType()), jcr->Job);
sendit(msg, len, sp);
}
dcr = jcr->dcr;
if (rdcr && rdcr->device) {
len = Mmsg(msg, _("Reading: %s %s job %s JobId=%d Volume=\"%s\"\n"
" pool=\"%s\" device=%s\n"),
- job_level_to_str(jcr->JobLevel),
- job_type_to_str(jcr->JobType),
+ job_level_to_str(jcr->get_JobLevel()),
+ job_type_to_str(jcr->get_JobType()),
JobName,
jcr->JobId,
rdcr->VolumeName,
if (dcr && dcr->device) {
len = Mmsg(msg, _("Writing: %s %s job %s JobId=%d Volume=\"%s\"\n"
" pool=\"%s\" device=%s\n"),
- job_level_to_str(jcr->JobLevel),
- job_type_to_str(jcr->JobType),
+ job_level_to_str(jcr->get_JobLevel()),
+ job_type_to_str(jcr->get_JobType()),
JobName,
jcr->JobId,
dcr->VolumeName,
pthread_detach(pthread_self());
jcr = new_jcr(sizeof(JCR), stored_free_jcr);
- jcr->JobType = JT_SYSTEM;
+ jcr->set_JobType(JT_SYSTEM);
/* Initialize FD start condition variable */
int errstat = pthread_cond_init(&jcr->job_start_wait, NULL);
if (errstat != 0) {
break;
case 'f':
- if (nb < 10 ) {
- files[nb++] = optarg;
- }
+ if (nb < 10 ) {
+ files[nb++] = optarg;
+ }
break;
case '?':
bjcr->NumReadVolumes = 0;
bjcr->NumWriteVolumes = 0;
bjcr->JobId = getpid();
- bjcr->JobType = JT_CONSOLE;
- bjcr->JobLevel = L_FULL;
+ bjcr->set_JobType(JT_CONSOLE);
+ bjcr->set_JobLevel(L_FULL);
bjcr->JobStatus = JS_Running;
bjcr->where = bstrdup(files[i]);
bjcr->job_name = get_pool_memory(PM_FNAME);
pm_strcpy(bjcr->fileset_md5, "Dummy.fileset.md5");
if ((db=db_init_database(NULL, db_name, db_user, db_password,
- db_host, 0, NULL, 0)) == NULL) {
- Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
+ db_host, 0, NULL, 0)) == NULL) {
+ Emsg0(M_ERROR_TERM, 0, _("Could not init Bacula database\n"));
}
if (!db_open_database(NULL, db)) {
- Emsg0(M_ERROR_TERM, 0, db_strerror(db));
+ Emsg0(M_ERROR_TERM, 0, db_strerror(db));
}
Dmsg0(200, "Database opened\n");
if (verbose) {
- Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user);
+ Pmsg2(000, _("Using Database: %s, User: %s\n"), db_name, db_user);
}
bjcr->db = db;
for(p = b = data; *p; p++) {
if (*p == ';') {
- *p = '\0';
- switch (index) {
- case 0:
- ar->FileIndex = str_to_int64(b);
- break;
- case 1:
- ar->fname = b;
- break;
- case 2:
- ar->attr = b;
- break;
- case 3:
- ar->Digest = b;
- break;
- }
- index++;
- b = ++p;
+ *p = '\0';
+ switch (index) {
+ case 0:
+ ar->FileIndex = str_to_int64(b);
+ break;
+ case 1:
+ ar->fname = b;
+ break;
+ case 2:
+ ar->attr = b;
+ break;
+ case 3:
+ ar->Digest = b;
+ break;
+ }
+ index++;
+ b = ++p;
}
}
}
strip_trailing_newline(data);
lineno++;
if (verbose && ((lineno % 5000) == 1)) {
- printf("\r%i", lineno);
+ printf("\r%i", lineno);
}
fill_attr(&ar, data);
if (!db_create_file_attributes_record(bjcr, bjcr->db, &ar)) {
- Emsg0(M_ERROR_TERM, 0, _("Error while inserting file\n"));
+ Emsg0(M_ERROR_TERM, 0, _("Error while inserting file\n"));
}
}
fclose(fd);
printf("\rbegin = %s, end = %s\n", edit_int64(begin, ed1),edit_int64(end, ed2));
printf("Insert time = %sms\n", edit_int64((end - begin) / 10000, ed1));
printf("Create %u files at %.2f/s\n", lineno,
- (lineno / ((float)((end - begin) / 1000000))));
+ (lineno / ((float)((end - begin) / 1000000))));
nb--;
V(mutex);
pthread_exit(NULL);
#undef VERSION
#define VERSION "2.5.2"
-#define BDATE "15 July 2008"
-#define LSMDATE "15Jul08"
+#define BDATE "175 July 2008"
+#define LSMDATE "175Jul08"
#define PROG_COPYRIGHT "Copyright (C) %d-2008 Free Software Foundation Europe e.V.\n"
#define BYEAR "2008" /* year for copyright messages in progs */
General:
17Jul08
+kes Move setting JobLevel and JobType into a method, which should
+ allow completing Virtual Backups.
kes Fix verify jobs to work again.
16Jul08
kes Virtual Backup tweaks -- it is close to working.