X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fdird%2Fua_status.c;h=d7ef5b6416ac8f9d7224d37da667517e34d35856;hb=c6e0b63823f177d08c5e7cf8afc677da569bd772;hp=d2eddedcb9783c188c5001ad7a153f4f3e311052;hpb=65e81a4da0d0175163ef1b8f67a6dd3d7b4fd06b;p=bacula%2Fbacula diff --git a/bacula/src/dird/ua_status.c b/bacula/src/dird/ua_status.c index d2eddedcb9..d7ef5b6416 100644 --- a/bacula/src/dird/ua_status.c +++ b/bacula/src/dird/ua_status.c @@ -8,7 +8,7 @@ */ /* - Copyright (C) 2000-2003 Kern Sibbald and John Walker + Copyright (C) 2000-2004 Kern Sibbald and John Walker This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as @@ -32,18 +32,67 @@ extern char my_name[]; extern time_t daemon_start_time; -extern struct s_last_job last_job; +extern int num_jobs_run; -static void print_jobs_scheduled(UAContext *ua); +static void list_scheduled_jobs(UAContext *ua); +static void list_running_jobs(UAContext *ua); +static void list_terminated_jobs(UAContext *ua); static void do_storage_status(UAContext *ua, STORE *store); static void do_client_status(UAContext *ua, CLIENT *client); -static void do_director_status(UAContext *ua, char *cmd); -static void do_all_status(UAContext *ua, char *cmd); +static void do_director_status(UAContext *ua); +static void do_all_status(UAContext *ua); + +static char OKqstatus[] = "1000 OK .status\n"; +static char DotStatusJob[] = "JobId=%d JobStatus=%c JobErrors=%d\n"; + +/* + * .status command + */ +int qstatus_cmd(UAContext *ua, const char *cmd) +{ + JCR* njcr; + s_last_job* job; + + if (!open_db(ua)) { + return 1; + } + Dmsg1(20, "status:%s:\n", cmd); + + if ((ua->argc != 3) || (strcasecmp(ua->argk[1], "dir"))) { + bsendmsg(ua, "1900 Bad .status command, missing arguments.\n"); + return 1; + } + + if (strcasecmp(ua->argk[2], "current") == 0) { + bsendmsg(ua, OKqstatus, ua->argk[2]); + lock_jcr_chain(); + foreach_jcr(njcr) { + if (njcr->JobId != 0) { + bsendmsg(ua, DotStatusJob, njcr->JobId, njcr->JobStatus, njcr->JobErrors); + } + free_locked_jcr(njcr); + } + unlock_jcr_chain(); + } + else if (strcasecmp(ua->argk[2], "last") == 0) { + bsendmsg(ua, OKqstatus, ua->argk[2]); + if ((last_jobs) && (last_jobs->size() > 0)) { + job = (s_last_job*)last_jobs->last(); + bsendmsg(ua, DotStatusJob, job->JobId, job->JobStatus, job->Errors); + } + } + else { + bsendmsg(ua, "1900 Bad .status command, wrong argument.\n"); + return 1; + } + + return 1; +} /* * status command */ -int status_cmd(UAContext *ua, char *cmd) +int status_cmd(UAContext *ua, const char *cmd) { STORE *store; CLIENT *client; @@ -56,11 +105,11 @@ int status_cmd(UAContext *ua, char *cmd) for (i=1; iargc; i++) { if (strcasecmp(ua->argk[i], _("all")) == 0) { - do_all_status(ua, cmd); + do_all_status(ua); return 1; } else if (strcasecmp(ua->argk[i], _("dir")) == 0 || strcasecmp(ua->argk[i], _("director")) == 0) { - do_director_status(ua, cmd); + do_director_status(ua); return 1; } else if (strcasecmp(ua->argk[i], _("client")) == 0) { client = get_client_resource(ua); @@ -78,19 +127,21 @@ int status_cmd(UAContext *ua, char *cmd) } /* If no args, ask for status type */ if (ua->argc == 1) { + char prmt[MAX_NAME_LENGTH]; + start_prompt(ua, _("Status available for:\n")); add_prompt(ua, _("Director")); add_prompt(ua, _("Storage")); add_prompt(ua, _("Client")); add_prompt(ua, _("All")); Dmsg0(20, "do_prompt: select daemon\n"); - if ((item=do_prompt(ua, "", _("Select daemon type for status"), cmd, MAX_NAME_LENGTH)) < 0) { + if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) { return 1; } Dmsg1(20, "item=%d\n", item); switch (item) { case 0: /* Director */ - do_director_status(ua, cmd); + do_director_status(ua); break; case 1: store = select_storage_resource(ua); @@ -105,7 +156,7 @@ int status_cmd(UAContext *ua, char *cmd) } break; case 3: - do_all_status(ua, cmd); + do_all_status(ua); break; default: break; @@ -114,30 +165,33 @@ int status_cmd(UAContext *ua, char *cmd) return 1; } -static void do_all_status(UAContext *ua, char *cmd) +static void do_all_status(UAContext *ua) { STORE *store, **unique_store; CLIENT *client, **unique_client; - int i, j, found; + int i, j; + bool found; - do_director_status(ua, cmd); + do_director_status(ua); /* Count Storage items */ LockRes(); - store = NULL; - for (i=0; (store = (STORE *)GetNextRes(R_STORAGE, (RES *)store)); i++) - { } + i = 0; + foreach_res(store, R_STORAGE) { + i++; + } unique_store = (STORE **) malloc(i * sizeof(STORE)); /* Find Unique Storage address/port */ - store = (STORE *)GetNextRes(R_STORAGE, NULL); i = 0; - unique_store[i++] = store; - while ((store = (STORE *)GetNextRes(R_STORAGE, (RES *)store))) { - found = 0; + foreach_res(store, R_STORAGE) { + found = false; + if (!acl_access_ok(ua, Storage_ACL, store->hdr.name)) { + continue; + } for (j=0; jaddress, store->address) == 0 && unique_store[j]->SDport == store->SDport) { - found = 1; + found = true; break; } } @@ -156,20 +210,22 @@ static void do_all_status(UAContext *ua, char *cmd) /* Count Client items */ LockRes(); - client = NULL; - for (i=0; (client = (CLIENT *)GetNextRes(R_CLIENT, (RES *)client)); i++) - { } + i = 0; + foreach_res(client, R_CLIENT) { + i++; + } unique_client = (CLIENT **)malloc(i * sizeof(CLIENT)); /* Find Unique Client address/port */ - client = (CLIENT *)GetNextRes(R_CLIENT, NULL); i = 0; - unique_client[i++] = client; - while ((client = (CLIENT *)GetNextRes(R_CLIENT, (RES *)client))) { - found = 0; + foreach_res(client, R_CLIENT) { + found = false; + if (!acl_access_ok(ua, Client_ACL, client->hdr.name)) { + continue; + } for (j=0; jaddress, client->address) == 0 && unique_client[j]->FDport == client->FDport) { - found = 1; + found = true; break; } } @@ -188,143 +244,37 @@ static void do_all_status(UAContext *ua, char *cmd) } -static void do_director_status(UAContext *ua, char *cmd) +static void do_director_status(UAContext *ua) { - JCR *jcr; - int njobs = 0; - char *msg; - char dt[MAX_TIME_LENGTH], b1[30], b2[30]; - int pool_mem = FALSE; + char dt[MAX_TIME_LENGTH]; bsendmsg(ua, "%s Version: " VERSION " (" BDATE ") %s %s %s\n", my_name, HOST_OS, DISTNAME, DISTVER); - bstrftime(dt, sizeof(dt), daemon_start_time); - bsendmsg(ua, _("Daemon started %s, %d Job%s run.\n"), dt, last_job.NumJobs, - last_job.NumJobs == 1 ? "" : "s"); - if (last_job.NumJobs > 0) { - char termstat[30]; - - bstrftime(dt, sizeof(dt), last_job.end_time); - bsendmsg(ua, _("Last Job %s finished at %s\n"), last_job.Job, dt); - jobstatus_to_ascii(last_job.JobStatus, termstat, sizeof(termstat)); - - bsendmsg(ua, _(" Files=%s Bytes=%s Termination Status=%s\n"), - edit_uint64_with_commas(last_job.JobFiles, b1), - edit_uint64_with_commas(last_job.JobBytes, b2), - termstat); - } - lock_jcr_chain(); - for (jcr=NULL; (jcr=get_next_jcr(jcr)); njobs++) { - if (jcr->JobId == 0) { /* this is us */ - bstrftime(dt, sizeof(dt), jcr->start_time); - bsendmsg(ua, _("Console connected at %s\n"), dt); - free_locked_jcr(jcr); - njobs--; - continue; - } - switch (jcr->JobStatus) { - case JS_Created: - msg = _("is waiting execution"); - break; - case JS_Running: - msg = _("is running"); - break; - case JS_Blocked: - msg = _("is blocked"); - break; - case JS_Terminated: - msg = _("has terminated"); - break; - case JS_ErrorTerminated: - msg = _("has erred"); - break; - case JS_Error: - msg = _("has errors"); - break; - case JS_FatalError: - msg = _("has a fatal error"); - break; - case JS_Differences: - msg = _("has verify differences"); - break; - case JS_Canceled: - msg = _("has been canceled"); - break; - case JS_WaitFD: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is waiting on Client %s"), jcr->client->hdr.name); - pool_mem = TRUE; - break; - case JS_WaitSD: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is waiting on Storage %s"), jcr->store->hdr.name); - pool_mem = TRUE; - break; - case JS_WaitStoreRes: - msg = _("is waiting on max Storage jobs"); - break; - case JS_WaitClientRes: - msg = _("is waiting on max Client jobs"); - break; - case JS_WaitJobRes: - msg = _("is waiting on max Job jobs"); - break; - case JS_WaitMaxJobs: - msg = _("is waiting on max total jobs"); - break; - case JS_WaitStartTime: - msg = _("is waiting for its start time"); - break; - case JS_WaitPriority: - msg = _("is waiting for higher priority jobs to finish"); - break; - - default: - msg = (char *) get_pool_memory(PM_FNAME); - Mmsg(&msg, _("is in unknown state %c"), jcr->JobStatus); - pool_mem = TRUE; - break; - } - /* - * Now report Storage daemon status code - */ - switch (jcr->SDJobStatus) { - case JS_WaitMount: - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - msg = _("is waiting for a mount request"); - break; - case JS_WaitMedia: - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - msg = _("is waiting for an appendable Volume"); - break; - case JS_WaitFD: - if (!pool_mem) { - msg = (char *) get_pool_memory(PM_FNAME); - pool_mem = TRUE; - } - Mmsg(&msg, _("is waiting for Client %s to connect to Storage %s"), - jcr->client->hdr.name, jcr->store->hdr.name); - break; - } - bsendmsg(ua, _("JobId %d Job %s %s.\n"), jcr->JobId, jcr->Job, msg); - if (pool_mem) { - free_pool_memory(msg); - pool_mem = FALSE; - } - free_locked_jcr(jcr); - } - unlock_jcr_chain(); - - if (njobs == 0) { - bsendmsg(ua, _("No jobs are running.\n")); + bstrftime_nc(dt, sizeof(dt), daemon_start_time); + bsendmsg(ua, _("Daemon started %s, %d Job%s run since started.\n"), + dt, num_jobs_run, num_jobs_run == 1 ? "" : "s"); + if (debug_level > 0) { + char b1[35], b2[35], b3[35], b4[35]; + bsendmsg(ua, _(" Heap: bytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"), + edit_uint64_with_commas(sm_bytes, b1), + edit_uint64_with_commas(sm_max_bytes, b2), + edit_uint64_with_commas(sm_buffers, b3), + edit_uint64_with_commas(sm_max_buffers, b4)); } - print_jobs_scheduled(ua); + /* + * List scheduled Jobs + */ + list_scheduled_jobs(ua); + + /* + * List running jobs + */ + list_running_jobs(ua); + + /* + * List terminated jobs + */ + list_terminated_jobs(ua); bsendmsg(ua, "====\n"); } @@ -399,21 +349,32 @@ static void do_client_status(UAContext *ua, CLIENT *client) static void prt_runhdr(UAContext *ua) { bsendmsg(ua, _("\nScheduled Jobs:\n")); - bsendmsg(ua, _("Level Type Scheduled Name Volume\n")); - bsendmsg(ua, _("===============================================================================\n")); + bsendmsg(ua, _("Level Type Pri Scheduled Name Volume\n")); + bsendmsg(ua, _("===================================================================================\n")); } -static void prt_runtime(UAContext *ua, JOB *job, int level, time_t runtime, POOL *pool) +/* Scheduling packet */ +struct sched_pkt { + dlink link; /* keep this as first item!!! */ + JOB *job; + int level; + int priority; + time_t runtime; + POOL *pool; +}; + +static void prt_runtime(UAContext *ua, sched_pkt *sp) { char dt[MAX_TIME_LENGTH]; + const char *level_ptr; bool ok = false; bool close_db = false; JCR *jcr = ua->jcr; MEDIA_DBR mr; memset(&mr, 0, sizeof(mr)); - if (job->JobType == JT_BACKUP) { + if (sp->job->JobType == JT_BACKUP) { jcr->db = NULL; - ok = complete_jcr_for_job(jcr, job, pool); + ok = complete_jcr_for_job(jcr, sp->job, sp->pool); if (jcr->db) { close_db = true; /* new db opened, remember to close it */ } @@ -424,10 +385,19 @@ static void prt_runtime(UAContext *ua, JOB *job, int level, time_t runtime, POOL bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName)); } } - bstrftime(dt, sizeof(dt), runtime); - bsendmsg(ua, _("%-14s %-8s %-18s %-18s %s\n"), - level_to_str(level), job_type_to_str(job->JobType), dt, job->hdr.name, - mr.VolumeName); + bstrftime_nc(dt, sizeof(dt), sp->runtime); + switch (sp->job->JobType) { + case JT_ADMIN: + case JT_RESTORE: + level_ptr = " "; + break; + default: + level_ptr = level_to_str(sp->level); + break; + } + bsendmsg(ua, _("%-14s %-8s %3d %-18s %-18s %s\n"), + level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt, + sp->job->hdr.name, mr.VolumeName); if (close_db) { db_close_database(jcr, jcr->db); } @@ -435,38 +405,314 @@ static void prt_runtime(UAContext *ua, JOB *job, int level, time_t runtime, POOL } +/* + * Sort items by runtime, priority + */ +static int my_compare(void *item1, void *item2) +{ + sched_pkt *p1 = (sched_pkt *)item1; + sched_pkt *p2 = (sched_pkt *)item2; + if (p1->runtime < p2->runtime) { + return -1; + } else if (p1->runtime > p2->runtime) { + return 1; + } + if (p1->priority < p2->priority) { + return -1; + } else if (p1->priority > p2->priority) { + return 1; + } + return 0; +} + /* - * Find all jobs to be run this hour - * and the next hour. + * Find all jobs to be run in roughly the + * next 24 hours. */ -static void print_jobs_scheduled(UAContext *ua) +static void list_scheduled_jobs(UAContext *ua) { time_t runtime; RUN *run; JOB *job; + int level, num_jobs = 0; + int priority; bool hdr_printed = false; - int level; + dlist sched; + sched_pkt *sp; - Dmsg0(200, "enter find_runs()\n"); + Dmsg0(200, "enter list_sched_jobs()\n"); /* Loop through all jobs */ LockRes(); - for (job=NULL; (job=(JOB *)GetNextRes(R_JOB, (RES *)job)); ) { - level = job->level; - run = find_next_run(job, runtime); - if (!run) { + foreach_res(job, R_JOB) { + if (!acl_access_ok(ua, Job_ACL, job->hdr.name)) { continue; } - if (run->level) { - level = run->level; - } - if (!hdr_printed) { - hdr_printed = true; - prt_runhdr(ua); + for (run=NULL; (run = find_next_run(run, job, runtime)); ) { + level = job->JobLevel; + if (run->level) { + level = run->level; + } + priority = job->Priority; + if (run->Priority) { + priority = run->Priority; + } + if (!hdr_printed) { + prt_runhdr(ua); + hdr_printed = true; + } + sp = (sched_pkt *)malloc(sizeof(sched_pkt)); + sp->job = job; + sp->level = level; + sp->priority = priority; + sp->runtime = runtime; + sp->pool = run->pool; + sched.binary_insert(sp, my_compare); + num_jobs++; } - prt_runtime(ua, job, level, runtime, run->pool); - } /* end for loop over resources */ UnlockRes(); - Dmsg0(200, "Leave find_runs()\n"); + foreach_dlist(sp, &sched) { + prt_runtime(ua, sp); + } + if (num_jobs == 0) { + bsendmsg(ua, _("No Scheduled Jobs.\n")); + } + bsendmsg(ua, "====\n"); + Dmsg0(200, "Leave list_sched_jobs_runs()\n"); +} + +static void list_running_jobs(UAContext *ua) +{ + JCR *jcr; + int njobs = 0; + const char *msg; + char *emsg; /* edited message */ + char dt[MAX_TIME_LENGTH]; + char level[10]; + bool pool_mem = false; + + Dmsg0(200, "enter list_run_jobs()\n"); + bsendmsg(ua, _("\nRunning Jobs:\n")); + lock_jcr_chain(); + foreach_jcr(jcr) { + njobs++; + if (jcr->JobId == 0) { /* this is us */ + /* this is a console or other control job. We only show console + * jobs in the status output. + */ + if (jcr->JobType == JT_CONSOLE) { + bstrftime_nc(dt, sizeof(dt), jcr->start_time); + bsendmsg(ua, _("Console connected at %s\n"), dt); + } + njobs--; + } + free_locked_jcr(jcr); + } + if (njobs == 0) { + unlock_jcr_chain(); + /* Note the following message is used in regress -- don't change */ + bsendmsg(ua, _("No Jobs running.\n====\n")); + Dmsg0(200, "leave list_run_jobs()\n"); + return; + } + njobs = 0; + bsendmsg(ua, _(" JobId Level Name Status\n")); + bsendmsg(ua, _("======================================================================\n")); + foreach_jcr(jcr) { + if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->hdr.name)) { + free_locked_jcr(jcr); + continue; + } + njobs++; + switch (jcr->JobStatus) { + case JS_Created: + msg = _("is waiting execution"); + break; + case JS_Running: + msg = _("is running"); + break; + case JS_Blocked: + msg = _("is blocked"); + break; + case JS_Terminated: + msg = _("has terminated"); + break; + case JS_ErrorTerminated: + msg = _("has erred"); + break; + case JS_Error: + msg = _("has errors"); + break; + case JS_FatalError: + msg = _("has a fatal error"); + break; + case JS_Differences: + msg = _("has verify differences"); + break; + case JS_Canceled: + msg = _("has been canceled"); + break; + case JS_WaitFD: + emsg = (char *) get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is waiting on Client %s"), jcr->client->hdr.name); + pool_mem = true; + msg = emsg; + break; + case JS_WaitSD: + emsg = (char *) get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is waiting on Storage %s"), jcr->store->hdr.name); + pool_mem = true; + msg = emsg; + break; + case JS_WaitStoreRes: + msg = _("is waiting on max Storage jobs"); + break; + case JS_WaitClientRes: + msg = _("is waiting on max Client jobs"); + break; + case JS_WaitJobRes: + msg = _("is waiting on max Job jobs"); + break; + case JS_WaitMaxJobs: + msg = _("is waiting on max total jobs"); + break; + case JS_WaitStartTime: + msg = _("is waiting for its start time"); + break; + case JS_WaitPriority: + msg = _("is waiting for higher priority jobs to finish"); + break; + + default: + emsg = (char *) get_pool_memory(PM_FNAME); + Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus); + pool_mem = true; + msg = emsg; + break; + } + /* + * Now report Storage daemon status code + */ + switch (jcr->SDJobStatus) { + case JS_WaitMount: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for a mount request"); + break; + case JS_WaitMedia: + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + msg = _("is waiting for an appendable Volume"); + break; + case JS_WaitFD: + if (!pool_mem) { + emsg = (char *) get_pool_memory(PM_FNAME); + pool_mem = true; + } + Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"), + jcr->client->hdr.name, jcr->store->hdr.name); + msg = emsg; + break; + } + switch (jcr->JobType) { + case JT_ADMIN: + case JT_RESTORE: + bstrncpy(level, " ", sizeof(level)); + break; + default: + bstrncpy(level, level_to_str(jcr->JobLevel), sizeof(level)); + level[7] = 0; + break; + } + + bsendmsg(ua, _("%6d %-6s %-20s %s\n"), + jcr->JobId, + level, + jcr->Job, + msg); + + if (pool_mem) { + free_pool_memory(emsg); + pool_mem = false; + } + free_locked_jcr(jcr); + } + unlock_jcr_chain(); + bsendmsg(ua, "====\n"); + Dmsg0(200, "leave list_run_jobs()\n"); +} + +static void list_terminated_jobs(UAContext *ua) +{ + char dt[MAX_TIME_LENGTH], b1[30], b2[30]; + char level[10]; + + if (last_jobs->empty()) { + bsendmsg(ua, _("No Terminated Jobs.\n")); + return; + } + lock_last_jobs_list(); + struct s_last_job *je; + bsendmsg(ua, _("\nTerminated Jobs:\n")); + bsendmsg(ua, _(" JobId Level Files Bytes Status Finished Name \n")); + bsendmsg(ua, _("========================================================================\n")); + foreach_dlist(je, last_jobs) { + char JobName[MAX_NAME_LENGTH]; + const char *termstat; + + bstrftime_nc(dt, sizeof(dt), je->end_time); + switch (je->JobType) { + case JT_ADMIN: + case JT_RESTORE: + bstrncpy(level, " ", sizeof(level)); + break; + default: + bstrncpy(level, level_to_str(je->JobLevel), sizeof(level)); + level[4] = 0; + break; + } + switch (je->JobStatus) { + case JS_Created: + termstat = "Created"; + break; + case JS_FatalError: + case JS_ErrorTerminated: + termstat = "Error"; + break; + case JS_Differences: + termstat = "Diffs"; + break; + case JS_Canceled: + termstat = "Cancel"; + break; + case JS_Terminated: + termstat = "OK"; + break; + default: + termstat = "Other"; + break; + } + bstrncpy(JobName, je->Job, sizeof(JobName)); + /* There are three periods after the Job name */ + char *p; + for (int i=0; i<3; i++) { + if ((p=strrchr(JobName, '.')) != NULL) { + *p = 0; + } + } + bsendmsg(ua, _("%6d %-6s %8s %14s %-7s %-8s %s\n"), + je->JobId, + level, + edit_uint64_with_commas(je->JobFiles, b1), + edit_uint64_with_commas(je->JobBytes, b2), + termstat, + dt, JobName); + } + bsendmsg(ua, "\n"); + unlock_last_jobs_list(); }