*/
/*
- Copyright (C) 2000-2003 Kern Sibbald and John Walker
+ Copyright (C) 2000-2004 Kern Sibbald and John Walker
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
#include "bacula.h"
#include "dird.h"
-#include "ua.h"
extern char my_name[];
extern time_t daemon_start_time;
-extern struct s_last_job last_job;
+extern int num_jobs_run;
-static void print_jobs_scheduled(UAContext *ua);
+static void list_scheduled_jobs(UAContext *ua);
+static void list_running_jobs(UAContext *ua);
+static void list_terminated_jobs(UAContext *ua);
static void do_storage_status(UAContext *ua, STORE *store);
static void do_client_status(UAContext *ua, CLIENT *client);
-static void do_director_status(UAContext *ua, char *cmd);
-static void do_all_status(UAContext *ua, char *cmd);
+static void do_director_status(UAContext *ua);
+static void do_all_status(UAContext *ua);
/*
* status command
*/
-int statuscmd(UAContext *ua, char *cmd)
+int status_cmd(UAContext *ua, const char *cmd)
{
STORE *store;
CLIENT *client;
for (i=1; i<ua->argc; i++) {
if (strcasecmp(ua->argk[i], _("all")) == 0) {
- do_all_status(ua, cmd);
+ do_all_status(ua);
return 1;
} else if (strcasecmp(ua->argk[i], _("dir")) == 0 ||
strcasecmp(ua->argk[i], _("director")) == 0) {
- do_director_status(ua, cmd);
+ do_director_status(ua);
return 1;
} else if (strcasecmp(ua->argk[i], _("client")) == 0) {
client = get_client_resource(ua);
}
return 1;
} else {
- store = get_storage_resource(ua, cmd);
+ store = get_storage_resource(ua, 0);
if (store) {
do_storage_status(ua, store);
}
}
/* If no args, ask for status type */
if (ua->argc == 1) {
+ char prmt[MAX_NAME_LENGTH];
+
start_prompt(ua, _("Status available for:\n"));
add_prompt(ua, _("Director"));
add_prompt(ua, _("Storage"));
add_prompt(ua, _("Client"));
add_prompt(ua, _("All"));
Dmsg0(20, "do_prompt: select daemon\n");
- if ((item=do_prompt(ua, _("Select daemon type for status"), cmd, MAX_NAME_LENGTH)) < 0) {
+ if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
return 1;
}
Dmsg1(20, "item=%d\n", item);
switch (item) {
case 0: /* Director */
- do_director_status(ua, cmd);
+ do_director_status(ua);
break;
case 1:
store = select_storage_resource(ua);
}
break;
case 3:
- do_all_status(ua, cmd);
+ do_all_status(ua);
break;
default:
break;
return 1;
}
-static void do_all_status(UAContext *ua, char *cmd)
+static void do_all_status(UAContext *ua)
{
STORE *store, **unique_store;
CLIENT *client, **unique_client;
- int i, j, found;
+ int i, j;
+ bool found;
- do_director_status(ua, cmd);
+ do_director_status(ua);
/* Count Storage items */
LockRes();
- store = NULL;
- for (i=0; (store = (STORE *)GetNextRes(R_STORAGE, (RES *)store)); i++)
- { }
+ i = 0;
+ foreach_res(store, R_STORAGE) {
+ i++;
+ }
unique_store = (STORE **) malloc(i * sizeof(STORE));
/* Find Unique Storage address/port */
- store = (STORE *)GetNextRes(R_STORAGE, NULL);
i = 0;
- unique_store[i++] = store;
- while ((store = (STORE *)GetNextRes(R_STORAGE, (RES *)store))) {
- found = 0;
+ foreach_res(store, R_STORAGE) {
+ found = false;
+ if (!acl_access_ok(ua, Storage_ACL, store->hdr.name)) {
+ continue;
+ }
for (j=0; j<i; j++) {
if (strcmp(unique_store[j]->address, store->address) == 0 &&
unique_store[j]->SDport == store->SDport) {
- found = 1;
+ found = true;
break;
}
}
/* Count Client items */
LockRes();
- client = NULL;
- for (i=0; (client = (CLIENT *)GetNextRes(R_CLIENT, (RES *)client)); i++)
- { }
- unique_client = (CLIENT **) malloc(i * sizeof(CLIENT));
+ i = 0;
+ foreach_res(client, R_CLIENT) {
+ i++;
+ }
+ unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
/* Find Unique Client address/port */
- client = (CLIENT *)GetNextRes(R_CLIENT, NULL);
i = 0;
- unique_client[i++] = client;
- while ((client = (CLIENT *)GetNextRes(R_CLIENT, (RES *)client))) {
- found = 0;
+ foreach_res(client, R_CLIENT) {
+ found = false;
+ if (!acl_access_ok(ua, Client_ACL, client->hdr.name)) {
+ continue;
+ }
for (j=0; j<i; j++) {
if (strcmp(unique_client[j]->address, client->address) == 0 &&
unique_client[j]->FDport == client->FDport) {
- found = 1;
+ found = true;
break;
}
}
}
-static void do_director_status(UAContext *ua, char *cmd)
+static void do_director_status(UAContext *ua)
{
- JCR *jcr;
- int njobs = 0;
- char *msg;
- char dt[MAX_TIME_LENGTH], b1[30], b2[30];
- int pool_mem = FALSE;
-
- Dmsg0(200, "Doing status\n");
- bsendmsg(ua, "%s Version: " VERSION " (" DATE ")\n", my_name);
- bstrftime(dt, sizeof(dt), daemon_start_time);
- bsendmsg(ua, _("Daemon started %s, %d Job%s run.\n"), dt, last_job.NumJobs,
- last_job.NumJobs == 1 ? "" : "s");
- if (last_job.NumJobs > 0) {
- char termstat[30];
-
- bstrftime(dt, sizeof(dt), last_job.end_time);
- bsendmsg(ua, _("Last Job %s finished at %s\n"), last_job.Job, dt);
- jobstatus_to_ascii(last_job.JobStatus, termstat, sizeof(termstat));
-
- bsendmsg(ua, _(" Files=%s Bytes=%s Termination Status=%s\n"),
- edit_uint64_with_commas(last_job.JobFiles, b1),
- edit_uint64_with_commas(last_job.JobBytes, b2),
- termstat);
- }
- lock_jcr_chain();
- for (jcr=NULL; (jcr=get_next_jcr(jcr)); njobs++) {
- if (jcr->JobId == 0) { /* this is us */
- bstrftime(dt, sizeof(dt), jcr->start_time);
- bsendmsg(ua, _("Console connected at %s\n"), dt);
- free_locked_jcr(jcr);
- njobs--;
- continue;
- }
- switch (jcr->JobStatus) {
- case JS_Created:
- msg = _("is waiting execution");
- break;
- case JS_Running:
- msg = _("is running");
- break;
- case JS_Blocked:
- msg = _("is blocked");
- break;
- case JS_Terminated:
- msg = _("has terminated");
- break;
- case JS_ErrorTerminated:
- msg = _("has erred");
- break;
- case JS_Cancelled:
- msg = _("has been canceled");
- break;
- case JS_WaitFD:
- msg = (char *) get_pool_memory(PM_FNAME);
- Mmsg(&msg, _("is waiting on Client %s"), jcr->client->hdr.name);
- pool_mem = TRUE;
- break;
- case JS_WaitSD:
- msg = (char *) get_pool_memory(PM_FNAME);
- Mmsg(&msg, _("is waiting on Storage %s"), jcr->store->hdr.name);
- pool_mem = TRUE;
- break;
- default:
- msg = (char *) get_pool_memory(PM_FNAME);
- Mmsg(&msg, _("is in unknown state %c"), jcr->JobStatus);
- pool_mem = TRUE;
- break;
- }
- switch (jcr->SDJobStatus) {
- case JS_WaitMount:
- if (pool_mem) {
- free_pool_memory(msg);
- pool_mem = FALSE;
- }
- msg = _("is waiting for a mount request");
- break;
- case JS_WaitMedia:
- if (pool_mem) {
- free_pool_memory(msg);
- pool_mem = FALSE;
- }
- msg = _("is waiting for an appendable Volume");
- break;
- case JS_WaitFD:
- if (!pool_mem) {
- msg = (char *) get_pool_memory(PM_FNAME);
- pool_mem = TRUE;
- }
- Mmsg(&msg, _("is waiting for Client %s to connect to Storage %s"),
- jcr->client->hdr.name, jcr->store->hdr.name);
- break;
-
- }
- bsendmsg(ua, _("JobId %d Job %s %s.\n"), jcr->JobId, jcr->Job, msg);
- if (pool_mem) {
- free_pool_memory(msg);
- pool_mem = FALSE;
- }
- free_locked_jcr(jcr);
+ char dt[MAX_TIME_LENGTH];
+
+ bsendmsg(ua, "%s Version: " VERSION " (" BDATE ") %s %s %s\n", my_name,
+ HOST_OS, DISTNAME, DISTVER);
+ bstrftime_nc(dt, sizeof(dt), daemon_start_time);
+ bsendmsg(ua, _("Daemon started %s, %d Job%s run since started.\n"),
+ dt, num_jobs_run, num_jobs_run == 1 ? "" : "s");
+ if (debug_level > 0) {
+ char b1[35], b2[35], b3[35], b4[35];
+ bsendmsg(ua, _(" Heap: bytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
+ edit_uint64_with_commas(sm_bytes, b1),
+ edit_uint64_with_commas(sm_max_bytes, b2),
+ edit_uint64_with_commas(sm_buffers, b3),
+ edit_uint64_with_commas(sm_max_buffers, b4));
}
- unlock_jcr_chain();
-
- if (njobs == 0) {
- bsendmsg(ua, _("No jobs are running.\n"));
- }
- print_jobs_scheduled(ua);
+ /*
+ * List scheduled Jobs
+ */
+ list_scheduled_jobs(ua);
+
+ /*
+ * List running jobs
+ */
+ list_running_jobs(ua);
+
+ /*
+ * List terminated jobs
+ */
+ list_terminated_jobs(ua);
bsendmsg(ua, "====\n");
}
if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
bsendmsg(ua, _("\nFailed to connect to Storage daemon %s.\n====\n"),
store->hdr.name);
+ if (ua->jcr->store_bsock) {
+ bnet_close(ua->jcr->store_bsock);
+ ua->jcr->store_bsock = NULL;
+ }
return;
}
Dmsg0(20, _("Connected to storage daemon\n"));
/* Connect to File daemon */
ua->jcr->client = client;
+ /* Release any old dummy key */
+ if (ua->jcr->sd_auth_key) {
+ free(ua->jcr->sd_auth_key);
+ }
+ /* Create a new dummy SD auth key */
+ ua->jcr->sd_auth_key = bstrdup("dummy");
+
/* Try to connect for 15 seconds */
bsendmsg(ua, _("Connecting to Client %s at %s:%d\n"),
client->hdr.name, client->address, client->FDport);
if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
bsendmsg(ua, _("Failed to connect to Client %s.\n====\n"),
client->hdr.name);
+ if (ua->jcr->file_bsock) {
+ bnet_close(ua->jcr->file_bsock);
+ ua->jcr->file_bsock = NULL;
+ }
return;
}
Dmsg0(20, _("Connected to file daemon\n"));
static void prt_runhdr(UAContext *ua)
{
- bsendmsg(ua, _("Level Type Scheduled Name\n"));
- bsendmsg(ua, _("=================================================================\n"));
+ bsendmsg(ua, _("\nScheduled Jobs:\n"));
+ bsendmsg(ua, _("Level Type Pri Scheduled Name Volume\n"));
+ bsendmsg(ua, _("===================================================================================\n"));
}
-static void prt_runtime(UAContext *ua, JOB *job, int level, time_t runtime)
+/* Scheduling packet */
+struct sched_pkt {
+ dlink link; /* keep this as first item!!! */
+ JOB *job;
+ int level;
+ int priority;
+ time_t runtime;
+ POOL *pool;
+};
+
+static void prt_runtime(UAContext *ua, sched_pkt *sp)
{
char dt[MAX_TIME_LENGTH];
+ const char *level_ptr;
+ bool ok = false;
+ bool close_db = false;
+ JCR *jcr = ua->jcr;
+ MEDIA_DBR mr;
+ memset(&mr, 0, sizeof(mr));
+ if (sp->job->JobType == JT_BACKUP) {
+ jcr->db = NULL;
+ ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
+ if (jcr->db) {
+ close_db = true; /* new db opened, remember to close it */
+ }
+ if (ok) {
+ ok = find_next_volume_for_append(jcr, &mr, 0);
+ }
+ if (!ok) {
+ bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
+ }
+ }
+ bstrftime_nc(dt, sizeof(dt), sp->runtime);
+ switch (sp->job->JobType) {
+ case JT_ADMIN:
+ case JT_RESTORE:
+ level_ptr = " ";
+ break;
+ default:
+ level_ptr = level_to_str(sp->level);
+ break;
+ }
+ bsendmsg(ua, _("%-14s %-8s %3d %-18s %-18s %s\n"),
+ level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
+ sp->job->hdr.name, mr.VolumeName);
+ if (close_db) {
+ db_close_database(jcr, jcr->db);
+ }
+ jcr->db = ua->db; /* restore ua db to jcr */
- bstrftime(dt, sizeof(dt), runtime);
- bsendmsg(ua, _("%-14s %-8s %-18s %s\n"),
- level_to_str(level), job_type_to_str(job->JobType), dt, job->hdr.name);
+}
+
+/*
+ * Sort items by runtime, priority
+ */
+static int my_compare(void *item1, void *item2)
+{
+ sched_pkt *p1 = (sched_pkt *)item1;
+ sched_pkt *p2 = (sched_pkt *)item2;
+ if (p1->runtime < p2->runtime) {
+ return -1;
+ } else if (p1->runtime > p2->runtime) {
+ return 1;
+ }
+ if (p1->priority < p2->priority) {
+ return -1;
+ } else if (p1->priority > p2->priority) {
+ return 1;
+ }
+ return 0;
}
/*
- * Find all jobs to be run this hour
- * and the next hour.
+ * Find all jobs to be run in roughly the
+ * next 24 hours.
*/
-static void print_jobs_scheduled(UAContext *ua)
+static void list_scheduled_jobs(UAContext *ua)
{
- time_t now, runtime, tomorrow;
+ time_t runtime;
RUN *run;
JOB *job;
- SCHED *sched;
- struct tm tm;
- int mday, wday, month, wpos, tmday, twday, tmonth, twpos, i, hour;
- int tod, tom;
- int found;
- int hdr_printed = FALSE;
- int level;
+ int level, num_jobs = 0;
+ int priority;
+ bool hdr_printed = false;
+ dlist sched;
+ sched_pkt *sp;
- Dmsg0(200, "enter find_runs()\n");
-
- now = time(NULL);
- localtime_r(&now, &tm);
- mday = tm.tm_mday - 1;
- wday = tm.tm_wday;
- month = tm.tm_mon;
- wpos = (tm.tm_mday - 1) / 7;
-
- tomorrow = now + 60 * 60 * 24;
- localtime_r(&tomorrow, &tm);
- tmday = tm.tm_mday - 1;
- twday = tm.tm_wday;
- tmonth = tm.tm_mon;
- twpos = (tm.tm_mday - 1) / 7;
+ Dmsg0(200, "enter list_sched_jobs()\n");
/* Loop through all jobs */
LockRes();
- for (job=NULL; (job=(JOB *)GetNextRes(R_JOB, (RES *)job)); ) {
- level = job->level;
- sched = job->schedule;
- if (sched == NULL) { /* scheduled? */
- continue; /* no, skip this job */
+ foreach_res(job, R_JOB) {
+ if (!acl_access_ok(ua, Job_ACL, job->hdr.name)) {
+ continue;
}
- for (run=sched->run; run; run=run->next) {
+ for (run=NULL; (run = find_next_run(run, job, runtime)); ) {
+ level = job->level;
if (run->level) {
level = run->level;
}
- /*
- * Find runs in next 24 hours
+ priority = job->Priority;
+ if (run->Priority) {
+ priority = run->Priority;
+ }
+ if (!hdr_printed) {
+ prt_runhdr(ua);
+ hdr_printed = true;
+ }
+ sp = (sched_pkt *)malloc(sizeof(sched_pkt));
+ sp->job = job;
+ sp->level = level;
+ sp->priority = priority;
+ sp->runtime = runtime;
+ sp->pool = run->pool;
+ sched.binary_insert(sp, my_compare);
+ num_jobs++;
+ }
+ } /* end for loop over resources */
+ UnlockRes();
+ foreach_dlist(sp, &sched) {
+ prt_runtime(ua, sp);
+ }
+ if (num_jobs == 0) {
+ bsendmsg(ua, _("No Scheduled Jobs.\n"));
+ }
+ bsendmsg(ua, "====\n");
+ Dmsg0(200, "Leave list_sched_jobs_runs()\n");
+}
+
+static void list_running_jobs(UAContext *ua)
+{
+ JCR *jcr;
+ int njobs = 0;
+ const char *msg;
+ char *emsg; /* edited message */
+ char dt[MAX_TIME_LENGTH];
+ char level[10];
+ bool pool_mem = false;
+
+ Dmsg0(200, "enter list_run_jobs()\n");
+ bsendmsg(ua, _("\nRunning Jobs:\n"));
+ lock_jcr_chain();
+ foreach_jcr(jcr) {
+ njobs++;
+ if (jcr->JobId == 0) { /* this is us */
+ /* this is a console or other control job. We only show console
+ * jobs in the status output.
*/
- tod = (bit_is_set(mday, run->mday) || bit_is_set(wday, run->wday)) &&
- bit_is_set(month, run->month) && bit_is_set(wpos, run->wpos);
-
- tom = (bit_is_set(tmday, run->mday) || bit_is_set(twday, run->wday)) &&
- bit_is_set(tmonth, run->month) && bit_is_set(wpos, run->wpos);
-
- Dmsg2(200, "tod=%d tom=%d\n", tod, tom);
- found = FALSE;
- if (tod) {
- /* find time (time_t) job is to be run */
- localtime_r(&now, &tm);
- hour = 0;
- for (i=tm.tm_hour; i < 24; i++) {
- if (bit_is_set(i, run->hour)) {
- tm.tm_hour = i;
- tm.tm_min = run->minute;
- tm.tm_sec = 0;
- runtime = mktime(&tm);
- if (runtime > now) {
- if (!hdr_printed) {
- hdr_printed = TRUE;
- prt_runhdr(ua);
- }
- prt_runtime(ua, job, level, runtime);
- found = TRUE;
- break;
- }
- }
- }
+ if (jcr->JobType == JT_CONSOLE) {
+ bstrftime_nc(dt, sizeof(dt), jcr->start_time);
+ bsendmsg(ua, _("Console connected at %s\n"), dt);
}
+ njobs--;
+ }
+ free_locked_jcr(jcr);
+ }
+ if (njobs == 0) {
+ unlock_jcr_chain();
+ /* Note the following message is used in regress -- don't change */
+ bsendmsg(ua, _("No Jobs running.\n====\n"));
+ Dmsg0(200, "leave list_run_jobs()\n");
+ return;
+ }
+ njobs = 0;
+ bsendmsg(ua, _(" JobId Level Name Status\n"));
+ bsendmsg(ua, _("======================================================================\n"));
+ foreach_jcr(jcr) {
+ if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->hdr.name)) {
+ free_locked_jcr(jcr);
+ continue;
+ }
+ njobs++;
+ switch (jcr->JobStatus) {
+ case JS_Created:
+ msg = _("is waiting execution");
+ break;
+ case JS_Running:
+ msg = _("is running");
+ break;
+ case JS_Blocked:
+ msg = _("is blocked");
+ break;
+ case JS_Terminated:
+ msg = _("has terminated");
+ break;
+ case JS_ErrorTerminated:
+ msg = _("has erred");
+ break;
+ case JS_Error:
+ msg = _("has errors");
+ break;
+ case JS_FatalError:
+ msg = _("has a fatal error");
+ break;
+ case JS_Differences:
+ msg = _("has verify differences");
+ break;
+ case JS_Canceled:
+ msg = _("has been canceled");
+ break;
+ case JS_WaitFD:
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ Mmsg(&emsg, _("is waiting on Client %s"), jcr->client->hdr.name);
+ pool_mem = true;
+ msg = emsg;
+ break;
+ case JS_WaitSD:
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ Mmsg(&emsg, _("is waiting on Storage %s"), jcr->store->hdr.name);
+ pool_mem = true;
+ msg = emsg;
+ break;
+ case JS_WaitStoreRes:
+ msg = _("is waiting on max Storage jobs");
+ break;
+ case JS_WaitClientRes:
+ msg = _("is waiting on max Client jobs");
+ break;
+ case JS_WaitJobRes:
+ msg = _("is waiting on max Job jobs");
+ break;
+ case JS_WaitMaxJobs:
+ msg = _("is waiting on max total jobs");
+ break;
+ case JS_WaitStartTime:
+ msg = _("is waiting for its start time");
+ break;
+ case JS_WaitPriority:
+ msg = _("is waiting for higher priority jobs to finish");
+ break;
-// Dmsg2(200, "runtime=%d now=%d\n", runtime, now);
- if (!found && tom) {
- localtime_r(&tomorrow, &tm);
- hour = 0;
- for (i=0; i < 24; i++) {
- if (bit_is_set(i, run->hour)) {
- hour = i;
- break;
- }
- }
- tm.tm_hour = hour;
- tm.tm_min = run->minute;
- tm.tm_sec = 0;
- runtime = mktime(&tm);
- Dmsg2(200, "truntime=%d now=%d\n", runtime, now);
- if (runtime < tomorrow) {
- if (!hdr_printed) {
- hdr_printed = TRUE;
- prt_runhdr(ua);
- }
- prt_runtime(ua, job, level, runtime);
- }
+ default:
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ Mmsg(&emsg, _("is in unknown state %c"), jcr->JobStatus);
+ pool_mem = true;
+ msg = emsg;
+ break;
+ }
+ /*
+ * Now report Storage daemon status code
+ */
+ switch (jcr->SDJobStatus) {
+ case JS_WaitMount:
+ if (pool_mem) {
+ free_pool_memory(emsg);
+ pool_mem = false;
+ }
+ msg = _("is waiting for a mount request");
+ break;
+ case JS_WaitMedia:
+ if (pool_mem) {
+ free_pool_memory(emsg);
+ pool_mem = false;
}
- }
+ msg = _("is waiting for an appendable Volume");
+ break;
+ case JS_WaitFD:
+ if (!pool_mem) {
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ pool_mem = true;
+ }
+ Mmsg(&emsg, _("is waiting for Client %s to connect to Storage %s"),
+ jcr->client->hdr.name, jcr->store->hdr.name);
+ msg = emsg;
+ break;
+ }
+ switch (jcr->JobType) {
+ case JT_ADMIN:
+ case JT_RESTORE:
+ bstrncpy(level, " ", sizeof(level));
+ break;
+ default:
+ bstrncpy(level, level_to_str(jcr->JobLevel), sizeof(level));
+ level[7] = 0;
+ break;
+ }
+
+ bsendmsg(ua, _("%6d %-6s %-20s %s\n"),
+ jcr->JobId,
+ level,
+ jcr->Job,
+ msg);
+
+ if (pool_mem) {
+ free_pool_memory(emsg);
+ pool_mem = false;
+ }
+ free_locked_jcr(jcr);
}
- UnlockRes();
- Dmsg0(200, "Leave find_runs()\n");
+ unlock_jcr_chain();
+ bsendmsg(ua, "====\n");
+ Dmsg0(200, "leave list_run_jobs()\n");
+}
+
+static void list_terminated_jobs(UAContext *ua)
+{
+ char dt[MAX_TIME_LENGTH], b1[30], b2[30];
+ char level[10];
+
+ if (last_jobs->empty()) {
+ bsendmsg(ua, _("No Terminated Jobs.\n"));
+ return;
+ }
+ lock_last_jobs_list();
+ struct s_last_job *je;
+ bsendmsg(ua, _("\nTerminated Jobs:\n"));
+ bsendmsg(ua, _(" JobId Level Files Bytes Status Finished Name \n"));
+ bsendmsg(ua, _("========================================================================\n"));
+ foreach_dlist(je, last_jobs) {
+ char JobName[MAX_NAME_LENGTH];
+ const char *termstat;
+
+ bstrftime_nc(dt, sizeof(dt), je->end_time);
+ switch (je->JobType) {
+ case JT_ADMIN:
+ case JT_RESTORE:
+ bstrncpy(level, " ", sizeof(level));
+ break;
+ default:
+ bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
+ level[4] = 0;
+ break;
+ }
+ switch (je->JobStatus) {
+ case JS_Created:
+ termstat = "Created";
+ break;
+ case JS_FatalError:
+ case JS_ErrorTerminated:
+ termstat = "Error";
+ break;
+ case JS_Differences:
+ termstat = "Diffs";
+ break;
+ case JS_Canceled:
+ termstat = "Cancel";
+ break;
+ case JS_Terminated:
+ termstat = "OK";
+ break;
+ default:
+ termstat = "Other";
+ break;
+ }
+ bstrncpy(JobName, je->Job, sizeof(JobName));
+ /* There are three periods after the Job name */
+ char *p;
+ for (int i=0; i<3; i++) {
+ if ((p=strrchr(JobName, '.')) != NULL) {
+ *p = 0;
+ }
+ }
+ bsendmsg(ua, _("%6d %-6s %8s %14s %-7s %-8s %s\n"),
+ je->JobId,
+ level,
+ edit_uint64_with_commas(je->JobFiles, b1),
+ edit_uint64_with_commas(je->JobBytes, b2),
+ termstat,
+ dt, JobName);
+ }
+ bsendmsg(ua, "\n");
+ unlock_last_jobs_list();
}