X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fdird%2Fua_status.c;h=92b3eba8537ad546c51d2f3b44b07a409ce47b3e;hb=402ce8e2a74f492b8e1419be83cfe46aefaa9acf;hp=918302ad80c278fe605856236c28502d4a469227;hpb=c369a18cfaa0cb4fe7c93398f8c55a356f857719;p=bacula%2Fbacula diff --git a/bacula/src/dird/ua_status.c b/bacula/src/dird/ua_status.c index 918302ad80..92b3eba853 100644 --- a/bacula/src/dird/ua_status.c +++ b/bacula/src/dird/ua_status.c @@ -1,3 +1,30 @@ +/* + Bacula® - The Network Backup Solution + + Copyright (C) 2001-2008 Free Software Foundation Europe e.V. + + The main author of Bacula is Kern Sibbald, with contributions from + many others, a complete list can be found in the file AUTHORS. + This program is Free Software; you can redistribute it and/or + modify it under the terms of version two of the GNU General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + + Bacula® is a registered trademark of John Walker. + The licensor of Bacula is the Free Software Foundation Europe + (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, + Switzerland, email:ftf@fsfeurope.org. +*/ /* * * Bacula Director -- User Agent Status Command @@ -7,143 +34,206 @@ * Version $Id$ */ -/* - Copyright (C) 2000, 2001, 2002 Kern Sibbald and John Walker - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2 of - the License, or (at your option) any later version. +#include "bacula.h" +#include "dird.h" - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. +extern void *start_heap; + +static void list_scheduled_jobs(UAContext *ua); +static void list_running_jobs(UAContext *ua); +static void list_terminated_jobs(UAContext *ua); +static void do_storage_status(UAContext *ua, STORE *store, char *cmd); +static void do_client_status(UAContext *ua, CLIENT *client, char *cmd); +static void do_director_status(UAContext *ua); +static void do_all_status(UAContext *ua); - You should have received a copy of the GNU General Public - License along with this program; if not, write to the Free - Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, - MA 02111-1307, USA. +static char OKqstatus[] = "1000 OK .status\n"; +static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n"; +/* + * .status command */ -#include "bacula.h" -#include "dird.h" -#include "ua.h" +bool dot_status_cmd(UAContext *ua, const char *cmd) +{ + STORE *store; + CLIENT *client; + JCR* njcr = NULL; + s_last_job* job; + char ed1[50]; + + Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc); + + if (ua->argc < 3) { + ua->send_msg("1900 Bad .status command, missing arguments.\n"); + return false; + } + + if (strcasecmp(ua->argk[1], "dir") == 0) { + if (strcasecmp(ua->argk[2], "current") == 0) { + ua->send_msg(OKqstatus, ua->argk[2]); + foreach_jcr(njcr) { + if (njcr->JobId != 0 && acl_access_ok(ua, Job_ACL, njcr->job->name())) { + ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1), + njcr->JobStatus, njcr->JobErrors); + } + } + endeach_jcr(njcr); + } else if (strcasecmp(ua->argk[2], "last") == 0) { + ua->send_msg(OKqstatus, ua->argk[2]); + if ((last_jobs) && (last_jobs->size() > 0)) { + job = (s_last_job*)last_jobs->last(); + if (acl_access_ok(ua, Job_ACL, job->Job)) { + ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1), + job->JobStatus, job->Errors); + } + } + } else if (strcasecmp(ua->argk[2], "header") == 0) { + list_dir_status_header(ua); + } else if (strcasecmp(ua->argk[2], "scheduled") == 0) { + list_scheduled_jobs(ua); + } else if (strcasecmp(ua->argk[2], "running") == 0) { + list_running_jobs(ua); + } else if (strcasecmp(ua->argk[2], "terminated") == 0) { + list_terminated_jobs(ua); + } else { + ua->send_msg("1900 Bad .status command, wrong argument.\n"); + return false; + } + } else if (strcasecmp(ua->argk[1], "client") == 0) { + client = get_client_resource(ua); + if (client) { + Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2])); + do_client_status(ua, client, ua->argk[2]); + } + } else if (strcasecmp(ua->argk[1], "storage") == 0) { + store = get_storage_resource(ua, false /*no default*/); + if (store) { + do_storage_status(ua, store, ua->argk[2]); + } + } else { + ua->send_msg("1900 Bad .status command, wrong argument.\n"); + return false; + } -extern char my_name[]; -extern time_t daemon_start_time; -extern struct s_last_job last_job; + return true; +} -static void print_jobs_scheduled(UAContext *ua); -static void do_storage_status(UAContext *ua, STORE *store); -static void do_client_status(UAContext *ua, CLIENT *client); -static void do_director_status(UAContext *ua, char *cmd); -static void do_all_status(UAContext *ua, char *cmd); +/* This is the *old* command handler, so we must return + * 1 or it closes the connection + */ +int qstatus_cmd(UAContext *ua, const char *cmd) +{ + dot_status_cmd(ua, cmd); + return 1; +} /* * status command */ -int statuscmd(UAContext *ua, char *cmd) +int status_cmd(UAContext *ua, const char *cmd) { STORE *store; CLIENT *client; int item, i; - if (!open_db(ua)) { - return 1; - } Dmsg1(20, "status:%s:\n", cmd); for (i=1; iargc; i++) { - if (strcasecmp(ua->argk[i], _("all")) == 0) { - do_all_status(ua, cmd); - return 1; - } else if (strcasecmp(ua->argk[i], _("dir")) == 0 || - strcasecmp(ua->argk[i], _("director")) == 0) { - do_director_status(ua, cmd); - return 1; - } else if (strcasecmp(ua->argk[i], _("client")) == 0) { - client = get_client_resource(ua); - if (client) { - do_client_status(ua, client); - } - return 1; + if (strcasecmp(ua->argk[i], NT_("all")) == 0) { + do_all_status(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 || + strcasecmp(ua->argk[i], NT_("director")) == 0) { + do_director_status(ua); + return 1; + } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) { + client = get_client_resource(ua); + if (client) { + do_client_status(ua, client, NULL); + } + return 1; } else { - store = get_storage_resource(ua, cmd); - if (store) { - do_storage_status(ua, store); - } - return 1; + store = get_storage_resource(ua, false/*no default*/); + if (store) { + do_storage_status(ua, store, NULL); + } + return 1; } } /* If no args, ask for status type */ - if (ua->argc == 1) { + if (ua->argc == 1) { + char prmt[MAX_NAME_LENGTH]; + start_prompt(ua, _("Status available for:\n")); - add_prompt(ua, _("Director")); - add_prompt(ua, _("Storage")); - add_prompt(ua, _("Client")); - add_prompt(ua, _("All")); + add_prompt(ua, NT_("Director")); + add_prompt(ua, NT_("Storage")); + add_prompt(ua, NT_("Client")); + add_prompt(ua, NT_("All")); Dmsg0(20, "do_prompt: select daemon\n"); - if ((item=do_prompt(ua, _("Select daemon type for status"), cmd)) < 0) { - return 1; + if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) { + return 1; } Dmsg1(20, "item=%d\n", item); - switch (item) { - case 0: /* Director */ - do_director_status(ua, cmd); - break; + switch (item) { + case 0: /* Director */ + do_director_status(ua); + break; case 1: - store = select_storage_resource(ua); - if (store) { - do_storage_status(ua, store); - } - break; + store = select_storage_resource(ua); + if (store) { + do_storage_status(ua, store, NULL); + } + break; case 2: - client = select_client_resource(ua); - if (client) { - do_client_status(ua, client); - } - break; + client = select_client_resource(ua); + if (client) { + do_client_status(ua, client, NULL); + } + break; case 3: - do_all_status(ua, cmd); - break; + do_all_status(ua); + break; default: - break; + break; } } return 1; } -static void do_all_status(UAContext *ua, char *cmd) +static void do_all_status(UAContext *ua) { STORE *store, **unique_store; CLIENT *client, **unique_client; - int i, j, found; + int i, j; + bool found; - do_director_status(ua, cmd); + do_director_status(ua); /* Count Storage items */ LockRes(); - store = NULL; - for (i=0; (store = (STORE *)GetNextRes(R_STORAGE, (RES *)store)); i++) - { } + i = 0; + foreach_res(store, R_STORAGE) { + i++; + } unique_store = (STORE **) malloc(i * sizeof(STORE)); - /* Find Unique Storage address/port */ - store = (STORE *)GetNextRes(R_STORAGE, NULL); + /* Find Unique Storage address/port */ i = 0; - unique_store[i++] = store; - while ((store = (STORE *)GetNextRes(R_STORAGE, (RES *)store))) { - found = 0; + foreach_res(store, R_STORAGE) { + found = false; + if (!acl_access_ok(ua, Storage_ACL, store->name())) { + continue; + } for (j=0; jaddress, store->address) == 0 && - unique_store[j]->SDport == store->SDport) { - found = 1; - break; - } + if (strcmp(unique_store[j]->address, store->address) == 0 && + unique_store[j]->SDport == store->SDport) { + found = true; + break; + } } if (!found) { - unique_store[i++] = store; + unique_store[i++] = store; Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport); } } @@ -151,31 +241,33 @@ static void do_all_status(UAContext *ua, char *cmd) /* Call each unique Storage daemon */ for (j=0; jname())) { + continue; + } for (j=0; jaddress, client->address) == 0 && - unique_client[j]->FDport == client->FDport) { - found = 1; - break; - } + if (strcmp(unique_client[j]->address, client->address) == 0 && + unique_client[j]->FDport == client->FDport) { + found = true; + break; + } } if (!found) { - unique_client[i++] = client; + unique_client[i++] = client; Dmsg2(40, "Stuffing: %s:%d\n", client->address, client->FDport); } } @@ -183,272 +275,587 @@ static void do_all_status(UAContext *ua, char *cmd) /* Call each unique File daemon */ for (j=0; j 0) { - char termstat[30]; - - bstrftime(dt, sizeof(dt), last_job.end_time); - bsendmsg(ua, _("Last Job %s finished at %s\n"), last_job.Job, dt); - jobstatus_to_ascii(last_job.JobStatus, termstat, sizeof(termstat)); - - bsendmsg(ua, _(" Files=%s Bytes=%s Termination Status=%s\n"), - edit_uint64_with_commas(last_job.JobFiles, b1), - edit_uint64_with_commas(last_job.JobBytes, b2), - termstat); - } - lock_jcr_chain(); - for (jcr=NULL; (jcr=get_next_jcr(jcr)); njobs++) { - if (jcr->JobId == 0) { /* this is us */ - bstrftime(dt, sizeof(dt), jcr->start_time); - bsendmsg(ua, _("Console connected at %s\n"), dt); - free_locked_jcr(jcr); - njobs--; - continue; - } - switch (jcr->JobStatus) { - case JS_Created: - msg = _("is waiting execution"); - break; - case JS_Running: - msg = _("is running"); - break; - case JS_Blocked: - msg = _("is blocked"); - break; - case JS_Terminated: - msg = _("has terminated"); - break; - case JS_ErrorTerminated: - msg = _("has erred"); - break; - case JS_Cancelled: - msg = _("has been canceled"); - break; - case JS_WaitFD: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is waiting on Client %s"), jcr->client->hdr.name); - pool_mem = TRUE; - break; - case JS_WaitSD: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is waiting on Storage %s"), jcr->store->hdr.name); - pool_mem = TRUE; - break; - default: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is in unknown state %c"), jcr->JobStatus); - pool_mem = TRUE; - break; - } - switch (jcr->SDJobStatus) { - case JS_WaitMount: - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - msg = _("is waiting for a mount request"); - break; - case JS_WaitMedia: - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - msg = _("is waiting for an appendable Volume"); - break; - case JS_WaitFD: - if (!pool_mem) { - msg = (char *) get_pool_memory(PM_FNAME); - pool_mem = TRUE; - } - Mmsg(&msg, _("is waiting for Client %s to connect to Storage %s"), - jcr->client->hdr.name, jcr->store->hdr.name); - break; - - } - bsendmsg(ua, _("JobId %d Job %s %s.\n"), jcr->JobId, jcr->Job, msg); - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - free_locked_jcr(jcr); + char dt[MAX_TIME_LENGTH]; + char b1[35], b2[35], b3[35], b4[35], b5[35]; + + ua->send_msg(_("%s Version: %s (%s) %s %s %s\n"), my_name, VERSION, BDATE, + HOST_OS, DISTNAME, DISTVER); + bstrftime_nc(dt, sizeof(dt), daemon_start_time); + if (num_jobs_run == 1) { + ua->send_msg(_("Daemon started %s, 1 Job run since started.\n"), dt); } - unlock_jcr_chain(); - - if (njobs == 0) { - bsendmsg(ua, _("No jobs are running.\n")); + else { + ua->send_msg(_("Daemon started %s, %d Jobs run since started.\n"), + dt, num_jobs_run); } - print_jobs_scheduled(ua); - bsendmsg(ua, "====\n"); + ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), + edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1), + edit_uint64_with_commas(sm_bytes, b2), + edit_uint64_with_commas(sm_max_bytes, b3), + edit_uint64_with_commas(sm_buffers, b4), + edit_uint64_with_commas(sm_max_buffers, b5)); } -static void do_storage_status(UAContext *ua, STORE *store) +static void do_director_status(UAContext *ua) +{ + list_dir_status_header(ua); + + /* + * List scheduled Jobs + */ + list_scheduled_jobs(ua); + + /* + * List running jobs + */ + list_running_jobs(ua); + + /* + * List terminated jobs + */ + list_terminated_jobs(ua); + ua->send_msg("====\n"); +} + +static void do_storage_status(UAContext *ua, STORE *store, char *cmd) { BSOCK *sd; + USTORE lstore; - ua->jcr->store = store; + lstore.store = store; + pm_strcpy(lstore.store_source, _("unknown source")); + set_wstorage(ua->jcr, &lstore); /* Try connecting for up to 15 seconds */ - bsendmsg(ua, _("Connecting to Storage daemon %s at %s:%d\n"), - store->hdr.name, store->address, store->SDport); + if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"), + store->name(), store->address, store->SDport); if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) { - bsendmsg(ua, _("\nFailed to connect to Storage daemon %s.\n====\n"), - store->hdr.name); + ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"), + store->name()); + if (ua->jcr->store_bsock) { + bnet_close(ua->jcr->store_bsock); + ua->jcr->store_bsock = NULL; + } return; } Dmsg0(20, _("Connected to storage daemon\n")); sd = ua->jcr->store_bsock; - bnet_fsend(sd, "status"); - while (bnet_recv(sd) >= 0) { - bsendmsg(ua, "%s", sd->msg); + if (cmd) { + sd->fsend(".status %s", cmd); + } else { + sd->fsend("status"); } - bnet_sig(sd, BNET_TERMINATE); - bnet_close(sd); + while (sd->recv() >= 0) { + ua->send_msg("%s", sd->msg); + } + sd->signal( BNET_TERMINATE); + sd->close(); ua->jcr->store_bsock = NULL; - return; + return; } - -static void do_client_status(UAContext *ua, CLIENT *client) + +static void do_client_status(UAContext *ua, CLIENT *client, char *cmd) { BSOCK *fd; /* Connect to File daemon */ ua->jcr->client = client; + /* Release any old dummy key */ + if (ua->jcr->sd_auth_key) { + free(ua->jcr->sd_auth_key); + } + /* Create a new dummy SD auth key */ + ua->jcr->sd_auth_key = bstrdup("dummy"); + /* Try to connect for 15 seconds */ - bsendmsg(ua, _("Connecting to Client %s at %s:%d\n"), - client->hdr.name, client->address, client->FDport); + if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"), + client->name(), client->address, client->FDport); if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) { - bsendmsg(ua, _("Failed to connect to Client %s.\n====\n"), - client->hdr.name); + ua->send_msg(_("Failed to connect to Client %s.\n====\n"), + client->name()); + if (ua->jcr->file_bsock) { + bnet_close(ua->jcr->file_bsock); + ua->jcr->file_bsock = NULL; + } return; } Dmsg0(20, _("Connected to file daemon\n")); fd = ua->jcr->file_bsock; - bnet_fsend(fd, "status"); - while (bnet_recv(fd) >= 0) { - bsendmsg(ua, "%s", fd->msg); + if (cmd) { + fd->fsend(".status %s", cmd); + } else { + fd->fsend("status"); + } + while (fd->recv() >= 0) { + ua->send_msg("%s", fd->msg); } - bnet_sig(fd, BNET_TERMINATE); - bnet_close(fd); + fd->signal(BNET_TERMINATE); + fd->close(); ua->jcr->file_bsock = NULL; - return; + return; } -static void prt_runtime(UAContext *ua, JOB *job, time_t runtime) +static void prt_runhdr(UAContext *ua) { - char dt[MAX_TIME_LENGTH]; + if (!ua->api) { + ua->send_msg(_("\nScheduled Jobs:\n")); + ua->send_msg(_("Level Type Pri Scheduled Name Volume\n")); + ua->send_msg(_("===================================================================================\n")); + } +} - bstrftime(dt, sizeof(dt), runtime); - bsendmsg(ua, _("%s job \"%s\" scheduled for %s\n"), - job_type_to_str(job->JobType), job->hdr.name, dt); +/* Scheduling packet */ +struct sched_pkt { + dlink link; /* keep this as first item!!! */ + JOB *job; + int level; + int priority; + time_t runtime; + POOL *pool; + STORE *store; +}; + +static void prt_runtime(UAContext *ua, sched_pkt *sp) +{ + char dt[MAX_TIME_LENGTH]; + const char *level_ptr; + bool ok = false; + bool close_db = false; + JCR *jcr = ua->jcr; + MEDIA_DBR mr; + + memset(&mr, 0, sizeof(mr)); + if (sp->job->JobType == JT_BACKUP) { + jcr->db = NULL; + ok = complete_jcr_for_job(jcr, sp->job, sp->pool); + Dmsg1(250, "Using pool=%s\n", jcr->pool->name()); + if (jcr->db) { + close_db = true; /* new db opened, remember to close it */ + } + if (ok) { + mr.PoolId = jcr->jr.PoolId; + mr.StorageId = sp->store->StorageId; + jcr->wstore = sp->store; + Dmsg0(250, "call find_next_volume_for_append\n"); + ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune); + } + if (!ok) { + bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName)); + } + } + bstrftime_nc(dt, sizeof(dt), sp->runtime); + switch (sp->job->JobType) { + case JT_ADMIN: + case JT_RESTORE: + level_ptr = " "; + break; + default: + level_ptr = level_to_str(sp->level); + break; + } + if (ua->api) { + ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"), + level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt, + sp->job->name(), mr.VolumeName); + } else { + ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"), + level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt, + sp->job->name(), mr.VolumeName); + } + if (close_db) { + db_close_database(jcr, jcr->db); + } + jcr->db = ua->db; /* restore ua db to jcr */ } -/* - * Find all jobs to be run this hour - * and the next hour. +/* + * Sort items by runtime, priority */ -static void print_jobs_scheduled(UAContext *ua) +static int my_compare(void *item1, void *item2) { - time_t now, runtime, tomorrow; + sched_pkt *p1 = (sched_pkt *)item1; + sched_pkt *p2 = (sched_pkt *)item2; + if (p1->runtime < p2->runtime) { + return -1; + } else if (p1->runtime > p2->runtime) { + return 1; + } + if (p1->priority < p2->priority) { + return -1; + } else if (p1->priority > p2->priority) { + return 1; + } + return 0; +} + +/* + * Find all jobs to be run in roughly the + * next 24 hours. + */ +static void list_scheduled_jobs(UAContext *ua) +{ + time_t runtime; RUN *run; JOB *job; - SCHED *sched; - struct tm tm; - int mday, wday, month, tmday, twday, tmonth, i, hour; - int tod, tom; - int found; - - Dmsg0(200, "enter find_runs()\n"); - - now = time(NULL); - localtime_r(&now, &tm); - mday = tm.tm_mday - 1; - wday = tm.tm_wday; - month = tm.tm_mon; - - tomorrow = now + 60 * 60 * 24; - localtime_r(&tomorrow, &tm); - tmday = tm.tm_mday - 1; - twday = tm.tm_wday; - tmonth = tm.tm_mon; + int level, num_jobs = 0; + int priority; + bool hdr_printed = false; + dlist sched; + sched_pkt *sp; + int days, i; + + Dmsg0(200, "enter list_sched_jobs()\n"); + + days = 1; + i = find_arg_with_value(ua, NT_("days")); + if (i >= 0) { + days = atoi(ua->argv[i]); + if ((days < 0) || (days > 500) && !ua->api) { + ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n")); + days = 1; + } + } /* Loop through all jobs */ LockRes(); - for (job=NULL; (job=(JOB *)GetNextRes(R_JOB, (RES *)job)); ) { - sched = job->schedule; - if (sched == NULL) { /* scheduled? */ - continue; /* no, skip this job */ + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) { + continue; } - for (run=sched->run; run; run=run->next) { - /* - * Find runs in next 24 hours - */ - tod = (bit_is_set(mday, run->mday) || bit_is_set(wday, run->wday)) && - bit_is_set(month, run->month); - - tom = (bit_is_set(tmday, run->mday) || bit_is_set(twday, run->wday)) && - bit_is_set(tmonth, run->month); - - Dmsg2(200, "tod=%d tom=%d\n", tod, tom); - found = FALSE; - if (tod) { - /* find time (time_t) job is to be run */ - localtime_r(&now, &tm); - hour = 0; - for (i=tm.tm_hour; i < 24; i++) { - if (bit_is_set(i, run->hour)) { - tm.tm_hour = i; - tm.tm_min = run->minute; - tm.tm_sec = 0; - runtime = mktime(&tm); - if (runtime > now) { - prt_runtime(ua, job, runtime); - found = TRUE; - break; - } - } - } - } - -// Dmsg2(200, "runtime=%d now=%d\n", runtime, now); - if (!found && tom) { - localtime_r(&tomorrow, &tm); - hour = 0; - for (i=0; i < 24; i++) { - if (bit_is_set(i, run->hour)) { - hour = i; - break; - } - } - tm.tm_hour = hour; - tm.tm_min = run->minute; - tm.tm_sec = 0; - runtime = mktime(&tm); - Dmsg2(200, "truntime=%d now=%d\n", runtime, now); - if (runtime < tomorrow) { - prt_runtime(ua, job, runtime); - } - } - } - } + for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) { + USTORE store; + level = job->JobLevel; + if (run->level) { + level = run->level; + } + priority = job->Priority; + if (run->Priority) { + priority = run->Priority; + } + if (!hdr_printed) { + prt_runhdr(ua); + hdr_printed = true; + } + sp = (sched_pkt *)malloc(sizeof(sched_pkt)); + sp->job = job; + sp->level = level; + sp->priority = priority; + sp->runtime = runtime; + sp->pool = run->pool; + get_job_storage(&store, job, run); + sp->store = store.store; + Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type); + sched.binary_insert_multiple(sp, my_compare); + num_jobs++; + } + } /* end for loop over resources */ UnlockRes(); - Dmsg0(200, "Leave find_runs()\n"); + foreach_dlist(sp, &sched) { + prt_runtime(ua, sp); + } + if (num_jobs == 0 && !ua->api) { + ua->send_msg(_("No Scheduled Jobs.\n")); + } + if (!ua->api) ua->send_msg("====\n"); + Dmsg0(200, "Leave list_sched_jobs_runs()\n"); +} + +static void list_running_jobs(UAContext *ua) +{ + JCR *jcr; + int njobs = 0; + const char *msg; + char *emsg; /* edited message */ + char dt[MAX_TIME_LENGTH]; + char level[10]; + bool pool_mem = false; + + Dmsg0(200, "enter list_run_jobs()\n"); + if (!ua->api) ua->send_msg(_("\nRunning Jobs:\n")); + foreach_jcr(jcr) { + if (jcr->JobId == 0) { /* this is us */ + /* this is a console or other control job. We only show console + * jobs in the status output. + */ + if (jcr->JobType == JT_CONSOLE && !ua->api) { + bstrftime_nc(dt, sizeof(dt), jcr->start_time); + ua->send_msg(_("Console connected at %s\n"), dt); + } + continue; + } + njobs++; + } + endeach_jcr(jcr); + + if (njobs == 0) { + /* Note the following message is used in regress -- don't change */ + if (!ua->api) ua->send_msg(_("No Jobs running.\n====\n")); + Dmsg0(200, "leave list_run_jobs()\n"); + return; + } + njobs = 0; + if (!ua->api) { + ua->send_msg(_(" JobId Level Name Status\n")); + ua->send_msg(_("======================================================================\n")); + } + foreach_jcr(jcr) { + if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->name())) { + continue; + } + njobs++; + switch (jcr->JobStatus) { + case JS_Created: + msg = _("is waiting execution"); + break; + case JS_Running: + msg = _("is running"); + break; + case JS_Blocked: + msg = _("is blocked"); + break; + case JS_Terminated: + msg = _("has terminated"); + break; + case JS_ErrorTerminated: + msg = _("has erred"); + break; + case JS_Error: + msg = _("has errors"); + break; + case JS_FatalError: + msg = _("has a fatal error"); + break; + case JS_Differences: + msg = _("has verify differences"); + break; + case JS_Canceled: + msg = _("has been canceled"); + break; + case JS_WaitFD: + emsg = (char *) get_pool_memory(PM_FNAME); + if (!jcr->client) { + Mmsg(emsg, _("is waiting on Client")); + } else { + Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name()); + } + pool_mem = true; + msg = emsg; + break; + case JS_WaitSD: + emsg = (char *) get_pool_memory(PM_FNAME); + if (jcr->wstore) { + Mmsg(emsg, _("is waiting on Storage %s"), jcr->wstore->name()); + } else if (jcr->rstore) { + Mmsg(emsg, _("is waiting on Storage %s"), jcr->rstore->name()); + } else { + Mmsg(emsg, _("is waiting on Storage")); + } + pool_mem = true; + msg = emsg; + break; + case JS_WaitStoreRes: + msg = _("is waiting on max Storage jobs"); + break; + case JS_WaitClientRes: + msg = _("is waiting on max Client jobs"); + break; + case JS_WaitJobRes: + msg = _("is waiting on max Job jobs"); + break; + case JS_WaitMaxJobs: + msg = _("is waiting on max total jobs"); + break; + case JS_WaitStartTime: + msg = _("is waiting for its start time"); + break; + case JS_WaitPriority: + msg = _("is waiting for higher priority jobs to finish"); + break; + case JS_DataCommitting: + msg = _("SD committing Data"); + break; + case JS_DataDespooling: + msg = _("SD despooling Data"); + break; + case JS_AttrDespooling: + msg = _("SD despooling Attributes"); + break; + case JS_AttrInserting: + msg = _("Dir inserting Attributes"); + break; + + default: + emsg = (char *)get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus); + pool_mem = true; + msg = emsg; + break; + } + /* + * Now report Storage daemon status code + */ + switch (jcr->SDJobStatus) { + case JS_WaitMount: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for a mount request"); + break; + case JS_WaitMedia: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for an appendable Volume"); + break; + case JS_WaitFD: + if (!pool_mem) { + emsg = (char *)get_pool_memory(PM_FNAME); + pool_mem = true; + } + if (!jcr->client || !jcr->wstore) { + Mmsg(emsg, _("is waiting for Client to connect to Storage daemon")); + } else { + Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"), + jcr->client->name(), jcr->wstore->name()); + } + msg = emsg; + break; + case JS_DataCommitting: + msg = _("SD committing Data"); + break; + case JS_DataDespooling: + msg = _("SD despooling Data"); + break; + case JS_AttrDespooling: + msg = _("SD despooling Attributes"); + break; + case JS_AttrInserting: + msg = _("Dir inserting Attributes"); + break; + } + switch (jcr->JobType) { + case JT_ADMIN: + case JT_RESTORE: + bstrncpy(level, " ", sizeof(level)); + break; + default: + bstrncpy(level, level_to_str(jcr->JobLevel), sizeof(level)); + level[7] = 0; + break; + } + + if (ua->api) { + ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\n"), + jcr->JobId, level, jcr->Job, msg); + } else { + ua->send_msg(_("%6d %-6s %-20s %s\n"), + jcr->JobId, level, jcr->Job, msg); + } + + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + } + endeach_jcr(jcr); + if (!ua->api) ua->send_msg("====\n"); + Dmsg0(200, "leave list_run_jobs()\n"); +} + +static void list_terminated_jobs(UAContext *ua) +{ + char dt[MAX_TIME_LENGTH], b1[30], b2[30]; + char level[10]; + + if (last_jobs->empty()) { + if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n")); + return; + } + lock_last_jobs_list(); + struct s_last_job *je; + if (!ua->api) { + ua->send_msg(_("\nTerminated Jobs:\n")); + ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n")); + ua->send_msg(_("====================================================================\n")); + } + foreach_dlist(je, last_jobs) { + char JobName[MAX_NAME_LENGTH]; + const char *termstat; + + bstrncpy(JobName, je->Job, sizeof(JobName)); + /* There are three periods after the Job name */ + char *p; + for (int i=0; i<3; i++) { + if ((p=strrchr(JobName, '.')) != NULL) { + *p = 0; + } + } + + if (!acl_access_ok(ua, Job_ACL, JobName)) { + continue; + } + + bstrftime_nc(dt, sizeof(dt), je->end_time); + switch (je->JobType) { + case JT_ADMIN: + case JT_RESTORE: + bstrncpy(level, " ", sizeof(level)); + break; + default: + bstrncpy(level, level_to_str(je->JobLevel), sizeof(level)); + level[4] = 0; + break; + } + switch (je->JobStatus) { + case JS_Created: + termstat = _("Created"); + break; + case JS_FatalError: + case JS_ErrorTerminated: + termstat = _("Error"); + break; + case JS_Differences: + termstat = _("Diffs"); + break; + case JS_Canceled: + termstat = _("Cancel"); + break; + case JS_Terminated: + termstat = _("OK"); + break; + default: + termstat = _("Other"); + break; + } + if (ua->api) { + ua->send_msg(_("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + } else { + ua->send_msg(_("%6d %-6s %8s %10s %-7s %-8s %s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_suffix(je->JobBytes, b2), + termstat, + dt, JobName); + } + } + if (!ua->api) ua->send_msg(_("\n")); + unlock_last_jobs_list(); }