2 Bacula® - The Network Backup Solution
4 Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version three of the GNU Affero General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU Affero General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * Bacula Director -- User Agent Status Command
32 * Kern Sibbald, August MMI
41 extern void *start_heap;
43 static void list_scheduled_jobs(UAContext *ua);
44 static void list_running_jobs(UAContext *ua);
45 static void list_terminated_jobs(UAContext *ua);
46 static void do_storage_status(UAContext *ua, STORE *store, char *cmd);
47 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd);
48 static void do_director_status(UAContext *ua);
49 static void do_all_status(UAContext *ua);
50 void status_slots(UAContext *ua, STORE *store);
51 void status_content(UAContext *ua, STORE *store);
53 static char OKqstatus[] = "1000 OK .status\n";
54 static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n";
60 bool dot_status_cmd(UAContext *ua, const char *cmd)
68 Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc);
71 ua->send_msg("1900 Bad .status command, missing arguments.\n");
75 if (strcasecmp(ua->argk[1], "dir") == 0) {
76 if (strcasecmp(ua->argk[2], "current") == 0) {
77 ua->send_msg(OKqstatus, ua->argk[2]);
79 if (njcr->JobId != 0 && acl_access_ok(ua, Job_ACL, njcr->job->name())) {
80 ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1),
81 njcr->JobStatus, njcr->JobErrors);
85 } else if (strcasecmp(ua->argk[2], "last") == 0) {
86 ua->send_msg(OKqstatus, ua->argk[2]);
87 if ((last_jobs) && (last_jobs->size() > 0)) {
88 job = (s_last_job*)last_jobs->last();
89 if (acl_access_ok(ua, Job_ACL, job->Job)) {
90 ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1),
91 job->JobStatus, job->Errors);
94 } else if (strcasecmp(ua->argk[2], "header") == 0) {
95 list_dir_status_header(ua);
96 } else if (strcasecmp(ua->argk[2], "scheduled") == 0) {
97 list_scheduled_jobs(ua);
98 } else if (strcasecmp(ua->argk[2], "running") == 0) {
99 list_running_jobs(ua);
100 } else if (strcasecmp(ua->argk[2], "terminated") == 0) {
101 list_terminated_jobs(ua);
103 ua->send_msg("1900 Bad .status command, wrong argument.\n");
106 } else if (strcasecmp(ua->argk[1], "client") == 0) {
107 client = get_client_resource(ua);
109 Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2]));
110 do_client_status(ua, client, ua->argk[2]);
112 } else if (strcasecmp(ua->argk[1], "storage") == 0) {
113 store = get_storage_resource(ua, false /*no default*/);
115 do_storage_status(ua, store, ua->argk[2]);
118 ua->send_msg("1900 Bad .status command, wrong argument.\n");
125 /* This is the *old* command handler, so we must return
126 * 1 or it closes the connection
128 int qstatus_cmd(UAContext *ua, const char *cmd)
130 dot_status_cmd(ua, cmd);
137 int status_cmd(UAContext *ua, const char *cmd)
143 Dmsg1(20, "status:%s:\n", cmd);
145 for (i=1; i<ua->argc; i++) {
146 if (strcasecmp(ua->argk[i], NT_("all")) == 0) {
149 } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 ||
150 strcasecmp(ua->argk[i], NT_("director")) == 0) {
151 do_director_status(ua);
153 } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) {
154 client = get_client_resource(ua);
156 do_client_status(ua, client, NULL);
160 store = get_storage_resource(ua, false/*no default*/);
162 if (find_arg(ua, NT_("slots")) > 0) {
163 status_slots(ua, store);
165 do_storage_status(ua, store, NULL);
171 /* If no args, ask for status type */
173 char prmt[MAX_NAME_LENGTH];
175 start_prompt(ua, _("Status available for:\n"));
176 add_prompt(ua, NT_("Director"));
177 add_prompt(ua, NT_("Storage"));
178 add_prompt(ua, NT_("Client"));
179 add_prompt(ua, NT_("All"));
180 Dmsg0(20, "do_prompt: select daemon\n");
181 if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
184 Dmsg1(20, "item=%d\n", item);
186 case 0: /* Director */
187 do_director_status(ua);
190 store = select_storage_resource(ua);
192 do_storage_status(ua, store, NULL);
196 client = select_client_resource(ua);
198 do_client_status(ua, client, NULL);
211 static void do_all_status(UAContext *ua)
213 STORE *store, **unique_store;
214 CLIENT *client, **unique_client;
218 do_director_status(ua);
220 /* Count Storage items */
223 foreach_res(store, R_STORAGE) {
226 unique_store = (STORE **) malloc(i * sizeof(STORE));
227 /* Find Unique Storage address/port */
229 foreach_res(store, R_STORAGE) {
231 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
234 for (j=0; j<i; j++) {
235 if (strcmp(unique_store[j]->address, store->address) == 0 &&
236 unique_store[j]->SDport == store->SDport) {
242 unique_store[i++] = store;
243 Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport);
248 /* Call each unique Storage daemon */
249 for (j=0; j<i; j++) {
250 do_storage_status(ua, unique_store[j], NULL);
254 /* Count Client items */
257 foreach_res(client, R_CLIENT) {
260 unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
261 /* Find Unique Client address/port */
263 foreach_res(client, R_CLIENT) {
265 if (!acl_access_ok(ua, Client_ACL, client->name())) {
268 for (j=0; j<i; j++) {
269 if (strcmp(unique_client[j]->address, client->address) == 0 &&
270 unique_client[j]->FDport == client->FDport) {
276 unique_client[i++] = client;
277 Dmsg2(40, "Stuffing: %s:%d\n", client->address, client->FDport);
282 /* Call each unique File daemon */
283 for (j=0; j<i; j++) {
284 do_client_status(ua, unique_client[j], NULL);
290 void list_dir_status_header(UAContext *ua)
292 char dt[MAX_TIME_LENGTH];
293 char b1[35], b2[35], b3[35], b4[35], b5[35];
295 ua->send_msg(_("%s Version: %s (%s) %s %s %s\n"), my_name, VERSION, BDATE,
296 HOST_OS, DISTNAME, DISTVER);
297 bstrftime_nc(dt, sizeof(dt), daemon_start_time);
298 ua->send_msg(_("Daemon started %s. Jobs: run=%d, running=%d\n"), dt,
299 num_jobs_run, job_count());
300 ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
301 edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
302 edit_uint64_with_commas(sm_bytes, b2),
303 edit_uint64_with_commas(sm_max_bytes, b3),
304 edit_uint64_with_commas(sm_buffers, b4),
305 edit_uint64_with_commas(sm_max_buffers, b5));
307 /* TODO: use this function once for all daemons */
308 if (debug_level > 0 && plugin_list->size() > 0) {
311 POOL_MEM msg(PM_FNAME);
312 pm_strcpy(msg, " Plugin: ");
313 foreach_alist(plugin, plugin_list) {
314 len = pm_strcat(msg, plugin->file);
316 pm_strcat(msg, "\n ");
321 ua->send_msg("%s\n", msg.c_str());
325 static void do_director_status(UAContext *ua)
327 list_dir_status_header(ua);
330 * List scheduled Jobs
332 list_scheduled_jobs(ua);
337 list_running_jobs(ua);
340 * List terminated jobs
342 list_terminated_jobs(ua);
343 ua->send_msg("====\n");
346 static void do_storage_status(UAContext *ua, STORE *store, char *cmd)
351 lstore.store = store;
352 pm_strcpy(lstore.store_source, _("unknown source"));
353 set_wstorage(ua->jcr, &lstore);
354 /* Try connecting for up to 15 seconds */
355 if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"),
356 store->name(), store->address, store->SDport);
357 if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
358 ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"),
360 if (ua->jcr->store_bsock) {
361 bnet_close(ua->jcr->store_bsock);
362 ua->jcr->store_bsock = NULL;
366 Dmsg0(20, _("Connected to storage daemon\n"));
367 sd = ua->jcr->store_bsock;
369 sd->fsend(".status %s", cmd);
373 while (sd->recv() >= 0) {
374 ua->send_msg("%s", sd->msg);
376 sd->signal( BNET_TERMINATE);
378 ua->jcr->store_bsock = NULL;
382 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd)
386 /* Connect to File daemon */
388 ua->jcr->client = client;
389 /* Release any old dummy key */
390 if (ua->jcr->sd_auth_key) {
391 free(ua->jcr->sd_auth_key);
393 /* Create a new dummy SD auth key */
394 ua->jcr->sd_auth_key = bstrdup("dummy");
396 /* Try to connect for 15 seconds */
397 if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
398 client->name(), client->address, client->FDport);
399 if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
400 ua->send_msg(_("Failed to connect to Client %s.\n====\n"),
402 if (ua->jcr->file_bsock) {
403 bnet_close(ua->jcr->file_bsock);
404 ua->jcr->file_bsock = NULL;
408 Dmsg0(20, _("Connected to file daemon\n"));
409 fd = ua->jcr->file_bsock;
411 fd->fsend(".status %s", cmd);
415 while (fd->recv() >= 0) {
416 ua->send_msg("%s", fd->msg);
418 fd->signal(BNET_TERMINATE);
420 ua->jcr->file_bsock = NULL;
425 static void prt_runhdr(UAContext *ua)
428 ua->send_msg(_("\nScheduled Jobs:\n"));
429 ua->send_msg(_("Level Type Pri Scheduled Name Volume\n"));
430 ua->send_msg(_("===================================================================================\n"));
434 /* Scheduling packet */
436 dlink link; /* keep this as first item!!! */
445 static void prt_runtime(UAContext *ua, sched_pkt *sp)
447 char dt[MAX_TIME_LENGTH];
448 const char *level_ptr;
450 bool close_db = false;
455 orig_jobtype = jcr->getJobType();
456 memset(&mr, 0, sizeof(mr));
457 if (sp->job->JobType == JT_BACKUP) {
459 ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
460 Dmsg1(250, "Using pool=%s\n", jcr->pool->name());
462 close_db = true; /* new db opened, remember to close it */
465 mr.PoolId = jcr->jr.PoolId;
466 mr.StorageId = sp->store->StorageId;
467 jcr->wstore = sp->store;
468 Dmsg0(250, "call find_next_volume_for_append\n");
469 /* no need to set ScratchPoolId, since we use fnv_no_create_vol */
470 ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune);
473 bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
476 bstrftime_nc(dt, sizeof(dt), sp->runtime);
477 switch (sp->job->JobType) {
483 level_ptr = level_to_str(sp->level);
487 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
488 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
489 sp->job->name(), mr.VolumeName);
491 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
492 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
493 sp->job->name(), mr.VolumeName);
496 db_close_database(jcr, jcr->db);
498 jcr->db = ua->db; /* restore ua db to jcr */
499 jcr->set_JobType(orig_jobtype);
503 * Sort items by runtime, priority
505 static int my_compare(void *item1, void *item2)
507 sched_pkt *p1 = (sched_pkt *)item1;
508 sched_pkt *p2 = (sched_pkt *)item2;
509 if (p1->runtime < p2->runtime) {
511 } else if (p1->runtime > p2->runtime) {
514 if (p1->priority < p2->priority) {
516 } else if (p1->priority > p2->priority) {
523 * Find all jobs to be run in roughly the
526 static void list_scheduled_jobs(UAContext *ua)
531 int level, num_jobs = 0;
533 bool hdr_printed = false;
538 Dmsg0(200, "enter list_sched_jobs()\n");
541 i = find_arg_with_value(ua, NT_("days"));
543 days = atoi(ua->argv[i]);
544 if (((days < 0) || (days > 500)) && !ua->api) {
545 ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n"));
550 /* Loop through all jobs */
552 foreach_res(job, R_JOB) {
553 if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) {
556 for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
558 level = job->JobLevel;
562 priority = job->Priority;
564 priority = run->Priority;
570 sp = (sched_pkt *)malloc(sizeof(sched_pkt));
573 sp->priority = priority;
574 sp->runtime = runtime;
575 sp->pool = run->pool;
576 get_job_storage(&store, job, run);
577 sp->store = store.store;
578 Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type);
579 sched.binary_insert_multiple(sp, my_compare);
582 } /* end for loop over resources */
584 foreach_dlist(sp, &sched) {
587 if (num_jobs == 0 && !ua->api) {
588 ua->send_msg(_("No Scheduled Jobs.\n"));
590 if (!ua->api) ua->send_msg("====\n");
591 Dmsg0(200, "Leave list_sched_jobs_runs()\n");
594 static void list_running_jobs(UAContext *ua)
599 char *emsg; /* edited message */
600 char dt[MAX_TIME_LENGTH];
602 bool pool_mem = false;
604 Dmsg0(200, "enter list_run_jobs()\n");
605 if (!ua->api) ua->send_msg(_("\nRunning Jobs:\n"));
607 if (jcr->JobId == 0) { /* this is us */
608 /* this is a console or other control job. We only show console
609 * jobs in the status output.
611 if (jcr->getJobType() == JT_CONSOLE && !ua->api) {
612 bstrftime_nc(dt, sizeof(dt), jcr->start_time);
613 ua->send_msg(_("Console connected at %s\n"), dt);
622 /* Note the following message is used in regress -- don't change */
623 if (!ua->api) ua->send_msg(_("No Jobs running.\n====\n"));
624 Dmsg0(200, "leave list_run_jobs()\n");
629 ua->send_msg(_(" JobId Level Name Status\n"));
630 ua->send_msg(_("======================================================================\n"));
633 if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
637 switch (jcr->JobStatus) {
639 msg = _("is waiting execution");
642 msg = _("is running");
645 msg = _("is blocked");
648 msg = _("has terminated");
651 msg = _("has terminated with warnings");
653 case JS_ErrorTerminated:
654 msg = _("has erred");
657 msg = _("has errors");
660 msg = _("has a fatal error");
663 msg = _("has verify differences");
666 msg = _("has been canceled");
669 emsg = (char *) get_pool_memory(PM_FNAME);
671 Mmsg(emsg, _("is waiting on Client"));
673 Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
679 emsg = (char *) get_pool_memory(PM_FNAME);
681 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->wstore->name());
682 } else if (jcr->rstore) {
683 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->rstore->name());
685 Mmsg(emsg, _("is waiting on Storage"));
690 case JS_WaitStoreRes:
691 msg = _("is waiting on max Storage jobs");
693 case JS_WaitClientRes:
694 msg = _("is waiting on max Client jobs");
697 msg = _("is waiting on max Job jobs");
700 msg = _("is waiting on max total jobs");
702 case JS_WaitStartTime:
703 msg = _("is waiting for its start time");
705 case JS_WaitPriority:
706 msg = _("is waiting for higher priority jobs to finish");
708 case JS_DataCommitting:
709 msg = _("SD committing Data");
711 case JS_DataDespooling:
712 msg = _("SD despooling Data");
714 case JS_AttrDespooling:
715 msg = _("SD despooling Attributes");
717 case JS_AttrInserting:
718 msg = _("Dir inserting Attributes");
722 emsg = (char *)get_pool_memory(PM_FNAME);
723 Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus);
729 * Now report Storage daemon status code
731 switch (jcr->SDJobStatus) {
734 free_pool_memory(emsg);
737 msg = _("is waiting for a mount request");
741 free_pool_memory(emsg);
744 msg = _("is waiting for an appendable Volume");
748 emsg = (char *)get_pool_memory(PM_FNAME);
751 if (!jcr->client || !jcr->wstore) {
752 Mmsg(emsg, _("is waiting for Client to connect to Storage daemon"));
754 Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"),
755 jcr->client->name(), jcr->wstore->name());
759 case JS_DataCommitting:
760 msg = _("SD committing Data");
762 case JS_DataDespooling:
763 msg = _("SD despooling Data");
765 case JS_AttrDespooling:
766 msg = _("SD despooling Attributes");
768 case JS_AttrInserting:
769 msg = _("Dir inserting Attributes");
772 switch (jcr->getJobType()) {
775 bstrncpy(level, " ", sizeof(level));
778 bstrncpy(level, level_to_str(jcr->getJobLevel()), sizeof(level));
784 bash_spaces(jcr->comment);
785 ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\t%s\n"),
786 jcr->JobId, level, jcr->Job, msg, jcr->comment);
787 unbash_spaces(jcr->comment);
789 ua->send_msg(_("%6d %-6s %-20s %s\n"),
790 jcr->JobId, level, jcr->Job, msg);
791 /* Display comments if any */
793 ua->send_msg(_(" %-30s\n"), jcr->comment);
798 free_pool_memory(emsg);
803 if (!ua->api) ua->send_msg("====\n");
804 Dmsg0(200, "leave list_run_jobs()\n");
807 static void list_terminated_jobs(UAContext *ua)
809 char dt[MAX_TIME_LENGTH], b1[30], b2[30];
812 if (last_jobs->empty()) {
813 if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n"));
816 lock_last_jobs_list();
817 struct s_last_job *je;
819 ua->send_msg(_("\nTerminated Jobs:\n"));
820 ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n"));
821 ua->send_msg(_("====================================================================\n"));
823 foreach_dlist(je, last_jobs) {
824 char JobName[MAX_NAME_LENGTH];
825 const char *termstat;
827 bstrncpy(JobName, je->Job, sizeof(JobName));
828 /* There are three periods after the Job name */
830 for (int i=0; i<3; i++) {
831 if ((p=strrchr(JobName, '.')) != NULL) {
836 if (!acl_access_ok(ua, Job_ACL, JobName)) {
840 bstrftime_nc(dt, sizeof(dt), je->end_time);
841 switch (je->JobType) {
844 bstrncpy(level, " ", sizeof(level));
847 bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
851 switch (je->JobStatus) {
853 termstat = _("Created");
856 case JS_ErrorTerminated:
857 termstat = _("Error");
860 termstat = _("Diffs");
863 termstat = _("Cancel");
869 termstat = _("OK -- with warnings");
872 termstat = _("Other");
876 ua->send_msg(_("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"),
879 edit_uint64_with_commas(je->JobFiles, b1),
880 edit_uint64_with_suffix(je->JobBytes, b2),
884 ua->send_msg(_("%6d %-6s %8s %10s %-7s %-8s %s\n"),
887 edit_uint64_with_commas(je->JobFiles, b1),
888 edit_uint64_with_suffix(je->JobBytes, b2),
893 if (!ua->api) ua->send_msg(_("\n"));
894 unlock_last_jobs_list();