2 Bacula® - The Network Backup Solution
4 Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version two of the GNU General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * Bacula Director -- User Agent Status Command
32 * Kern Sibbald, August MMI
41 extern void *start_heap;
43 static void list_scheduled_jobs(UAContext *ua);
44 static void list_running_jobs(UAContext *ua);
45 static void list_terminated_jobs(UAContext *ua);
46 static void do_storage_status(UAContext *ua, STORE *store, char *cmd);
47 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd);
48 static void do_director_status(UAContext *ua);
49 static void do_all_status(UAContext *ua);
50 void status_slots(UAContext *ua, STORE *store);
52 static char OKqstatus[] = "1000 OK .status\n";
53 static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n";
59 bool dot_status_cmd(UAContext *ua, const char *cmd)
67 Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc);
70 ua->send_msg("1900 Bad .status command, missing arguments.\n");
74 if (strcasecmp(ua->argk[1], "dir") == 0) {
75 if (strcasecmp(ua->argk[2], "current") == 0) {
76 ua->send_msg(OKqstatus, ua->argk[2]);
78 if (njcr->JobId != 0 && acl_access_ok(ua, Job_ACL, njcr->job->name())) {
79 ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1),
80 njcr->JobStatus, njcr->JobErrors);
84 } else if (strcasecmp(ua->argk[2], "last") == 0) {
85 ua->send_msg(OKqstatus, ua->argk[2]);
86 if ((last_jobs) && (last_jobs->size() > 0)) {
87 job = (s_last_job*)last_jobs->last();
88 if (acl_access_ok(ua, Job_ACL, job->Job)) {
89 ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1),
90 job->JobStatus, job->Errors);
93 } else if (strcasecmp(ua->argk[2], "header") == 0) {
94 list_dir_status_header(ua);
95 } else if (strcasecmp(ua->argk[2], "scheduled") == 0) {
96 list_scheduled_jobs(ua);
97 } else if (strcasecmp(ua->argk[2], "running") == 0) {
98 list_running_jobs(ua);
99 } else if (strcasecmp(ua->argk[2], "terminated") == 0) {
100 list_terminated_jobs(ua);
102 ua->send_msg("1900 Bad .status command, wrong argument.\n");
105 } else if (strcasecmp(ua->argk[1], "client") == 0) {
106 client = get_client_resource(ua);
108 Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2]));
109 do_client_status(ua, client, ua->argk[2]);
111 } else if (strcasecmp(ua->argk[1], "storage") == 0) {
112 store = get_storage_resource(ua, false /*no default*/);
114 do_storage_status(ua, store, ua->argk[2]);
117 ua->send_msg("1900 Bad .status command, wrong argument.\n");
124 /* This is the *old* command handler, so we must return
125 * 1 or it closes the connection
127 int qstatus_cmd(UAContext *ua, const char *cmd)
129 dot_status_cmd(ua, cmd);
136 int status_cmd(UAContext *ua, const char *cmd)
142 Dmsg1(20, "status:%s:\n", cmd);
144 for (i=1; i<ua->argc; i++) {
145 if (strcasecmp(ua->argk[i], NT_("all")) == 0) {
148 } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 ||
149 strcasecmp(ua->argk[i], NT_("director")) == 0) {
150 do_director_status(ua);
152 } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) {
153 client = get_client_resource(ua);
155 do_client_status(ua, client, NULL);
159 store = get_storage_resource(ua, false/*no default*/);
161 if (find_arg(ua, NT_("slots")) > 0) {
162 status_slots(ua, store);
164 do_storage_status(ua, store, NULL);
170 /* If no args, ask for status type */
172 char prmt[MAX_NAME_LENGTH];
174 start_prompt(ua, _("Status available for:\n"));
175 add_prompt(ua, NT_("Director"));
176 add_prompt(ua, NT_("Storage"));
177 add_prompt(ua, NT_("Client"));
178 add_prompt(ua, NT_("All"));
179 Dmsg0(20, "do_prompt: select daemon\n");
180 if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
183 Dmsg1(20, "item=%d\n", item);
185 case 0: /* Director */
186 do_director_status(ua);
189 store = select_storage_resource(ua);
191 do_storage_status(ua, store, NULL);
195 client = select_client_resource(ua);
197 do_client_status(ua, client, NULL);
210 static void do_all_status(UAContext *ua)
212 STORE *store, **unique_store;
213 CLIENT *client, **unique_client;
217 do_director_status(ua);
219 /* Count Storage items */
222 foreach_res(store, R_STORAGE) {
225 unique_store = (STORE **) malloc(i * sizeof(STORE));
226 /* Find Unique Storage address/port */
228 foreach_res(store, R_STORAGE) {
230 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
233 for (j=0; j<i; j++) {
234 if (strcmp(unique_store[j]->address, store->address) == 0 &&
235 unique_store[j]->SDport == store->SDport) {
241 unique_store[i++] = store;
242 Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport);
247 /* Call each unique Storage daemon */
248 for (j=0; j<i; j++) {
249 do_storage_status(ua, unique_store[j], NULL);
253 /* Count Client items */
256 foreach_res(client, R_CLIENT) {
259 unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
260 /* Find Unique Client address/port */
262 foreach_res(client, R_CLIENT) {
264 if (!acl_access_ok(ua, Client_ACL, client->name())) {
267 for (j=0; j<i; j++) {
268 if (strcmp(unique_client[j]->address, client->address) == 0 &&
269 unique_client[j]->FDport == client->FDport) {
275 unique_client[i++] = client;
276 Dmsg2(40, "Stuffing: %s:%d\n", client->address, client->FDport);
281 /* Call each unique File daemon */
282 for (j=0; j<i; j++) {
283 do_client_status(ua, unique_client[j], NULL);
289 void list_dir_status_header(UAContext *ua)
291 char dt[MAX_TIME_LENGTH];
292 char b1[35], b2[35], b3[35], b4[35], b5[35];
294 ua->send_msg(_("%s Version: %s (%s) %s %s %s\n"), my_name, VERSION, BDATE,
295 HOST_OS, DISTNAME, DISTVER);
296 bstrftime_nc(dt, sizeof(dt), daemon_start_time);
297 if (num_jobs_run == 1) {
298 ua->send_msg(_("Daemon started %s, 1 Job run since started.\n"), dt);
301 ua->send_msg(_("Daemon started %s, %d Jobs run since started.\n"),
304 ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
305 edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
306 edit_uint64_with_commas(sm_bytes, b2),
307 edit_uint64_with_commas(sm_max_bytes, b3),
308 edit_uint64_with_commas(sm_buffers, b4),
309 edit_uint64_with_commas(sm_max_buffers, b5));
311 /* TODO: use this function once for all daemons */
312 if (debug_level > 0 && plugin_list->size() > 0) {
315 POOL_MEM msg(PM_FNAME);
316 pm_strcpy(msg, " Plugin: ");
317 foreach_alist(plugin, plugin_list) {
318 len = pm_strcat(msg, plugin->file);
320 pm_strcat(msg, "\n ");
325 ua->send_msg("%s\n", msg.c_str());
329 static void do_director_status(UAContext *ua)
331 list_dir_status_header(ua);
334 * List scheduled Jobs
336 list_scheduled_jobs(ua);
341 list_running_jobs(ua);
344 * List terminated jobs
346 list_terminated_jobs(ua);
347 ua->send_msg("====\n");
350 static void do_storage_status(UAContext *ua, STORE *store, char *cmd)
355 lstore.store = store;
356 pm_strcpy(lstore.store_source, _("unknown source"));
357 set_wstorage(ua->jcr, &lstore);
358 /* Try connecting for up to 15 seconds */
359 if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"),
360 store->name(), store->address, store->SDport);
361 if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
362 ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"),
364 if (ua->jcr->store_bsock) {
365 bnet_close(ua->jcr->store_bsock);
366 ua->jcr->store_bsock = NULL;
370 Dmsg0(20, _("Connected to storage daemon\n"));
371 sd = ua->jcr->store_bsock;
373 sd->fsend(".status %s", cmd);
377 while (sd->recv() >= 0) {
378 ua->send_msg("%s", sd->msg);
380 sd->signal( BNET_TERMINATE);
382 ua->jcr->store_bsock = NULL;
386 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd)
390 /* Connect to File daemon */
392 ua->jcr->client = client;
393 /* Release any old dummy key */
394 if (ua->jcr->sd_auth_key) {
395 free(ua->jcr->sd_auth_key);
397 /* Create a new dummy SD auth key */
398 ua->jcr->sd_auth_key = bstrdup("dummy");
400 /* Try to connect for 15 seconds */
401 if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
402 client->name(), client->address, client->FDport);
403 if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
404 ua->send_msg(_("Failed to connect to Client %s.\n====\n"),
406 if (ua->jcr->file_bsock) {
407 bnet_close(ua->jcr->file_bsock);
408 ua->jcr->file_bsock = NULL;
412 Dmsg0(20, _("Connected to file daemon\n"));
413 fd = ua->jcr->file_bsock;
415 fd->fsend(".status %s", cmd);
419 while (fd->recv() >= 0) {
420 ua->send_msg("%s", fd->msg);
422 fd->signal(BNET_TERMINATE);
424 ua->jcr->file_bsock = NULL;
429 static void prt_runhdr(UAContext *ua)
432 ua->send_msg(_("\nScheduled Jobs:\n"));
433 ua->send_msg(_("Level Type Pri Scheduled Name Volume\n"));
434 ua->send_msg(_("===================================================================================\n"));
438 /* Scheduling packet */
440 dlink link; /* keep this as first item!!! */
449 static void prt_runtime(UAContext *ua, sched_pkt *sp)
451 char dt[MAX_TIME_LENGTH];
452 const char *level_ptr;
454 bool close_db = false;
459 orig_jobtype = jcr->get_JobType();
460 memset(&mr, 0, sizeof(mr));
461 if (sp->job->JobType == JT_BACKUP) {
463 ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
464 Dmsg1(250, "Using pool=%s\n", jcr->pool->name());
466 close_db = true; /* new db opened, remember to close it */
469 mr.PoolId = jcr->jr.PoolId;
470 mr.StorageId = sp->store->StorageId;
471 jcr->wstore = sp->store;
472 Dmsg0(250, "call find_next_volume_for_append\n");
473 /* no need to set ScratchPoolId, since we use fnv_no_create_vol */
474 ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune);
477 bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
480 bstrftime_nc(dt, sizeof(dt), sp->runtime);
481 switch (sp->job->JobType) {
487 level_ptr = level_to_str(sp->level);
491 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
492 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
493 sp->job->name(), mr.VolumeName);
495 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
496 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
497 sp->job->name(), mr.VolumeName);
500 db_close_database(jcr, jcr->db);
502 jcr->db = ua->db; /* restore ua db to jcr */
503 jcr->set_JobType(orig_jobtype);
507 * Sort items by runtime, priority
509 static int my_compare(void *item1, void *item2)
511 sched_pkt *p1 = (sched_pkt *)item1;
512 sched_pkt *p2 = (sched_pkt *)item2;
513 if (p1->runtime < p2->runtime) {
515 } else if (p1->runtime > p2->runtime) {
518 if (p1->priority < p2->priority) {
520 } else if (p1->priority > p2->priority) {
527 * Find all jobs to be run in roughly the
530 static void list_scheduled_jobs(UAContext *ua)
535 int level, num_jobs = 0;
537 bool hdr_printed = false;
542 Dmsg0(200, "enter list_sched_jobs()\n");
545 i = find_arg_with_value(ua, NT_("days"));
547 days = atoi(ua->argv[i]);
548 if (((days < 0) || (days > 500)) && !ua->api) {
549 ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n"));
554 /* Loop through all jobs */
556 foreach_res(job, R_JOB) {
557 if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) {
560 for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
562 level = job->JobLevel;
566 priority = job->Priority;
568 priority = run->Priority;
574 sp = (sched_pkt *)malloc(sizeof(sched_pkt));
577 sp->priority = priority;
578 sp->runtime = runtime;
579 sp->pool = run->pool;
580 get_job_storage(&store, job, run);
581 sp->store = store.store;
582 Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type);
583 sched.binary_insert_multiple(sp, my_compare);
586 } /* end for loop over resources */
588 foreach_dlist(sp, &sched) {
591 if (num_jobs == 0 && !ua->api) {
592 ua->send_msg(_("No Scheduled Jobs.\n"));
594 if (!ua->api) ua->send_msg("====\n");
595 Dmsg0(200, "Leave list_sched_jobs_runs()\n");
598 static void list_running_jobs(UAContext *ua)
603 char *emsg; /* edited message */
604 char dt[MAX_TIME_LENGTH];
606 bool pool_mem = false;
608 Dmsg0(200, "enter list_run_jobs()\n");
609 if (!ua->api) ua->send_msg(_("\nRunning Jobs:\n"));
611 if (jcr->JobId == 0) { /* this is us */
612 /* this is a console or other control job. We only show console
613 * jobs in the status output.
615 if (jcr->get_JobType() == JT_CONSOLE && !ua->api) {
616 bstrftime_nc(dt, sizeof(dt), jcr->start_time);
617 ua->send_msg(_("Console connected at %s\n"), dt);
626 /* Note the following message is used in regress -- don't change */
627 if (!ua->api) ua->send_msg(_("No Jobs running.\n====\n"));
628 Dmsg0(200, "leave list_run_jobs()\n");
633 ua->send_msg(_(" JobId Level Name Status\n"));
634 ua->send_msg(_("======================================================================\n"));
637 if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
641 switch (jcr->JobStatus) {
643 msg = _("is waiting execution");
646 msg = _("is running");
649 msg = _("is blocked");
652 msg = _("has terminated");
654 case JS_ErrorTerminated:
655 msg = _("has erred");
658 msg = _("has errors");
661 msg = _("has a fatal error");
664 msg = _("has verify differences");
667 msg = _("has been canceled");
670 emsg = (char *) get_pool_memory(PM_FNAME);
672 Mmsg(emsg, _("is waiting on Client"));
674 Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
680 emsg = (char *) get_pool_memory(PM_FNAME);
682 Mmsg(emsg, _("is waiting on Storage %s"), jcr->wstore->name());
683 } else if (jcr->rstore) {
684 Mmsg(emsg, _("is waiting on Storage %s"), jcr->rstore->name());
686 Mmsg(emsg, _("is waiting on Storage"));
691 case JS_WaitStoreRes:
692 msg = _("is waiting on max Storage jobs");
694 case JS_WaitClientRes:
695 msg = _("is waiting on max Client jobs");
698 msg = _("is waiting on max Job jobs");
701 msg = _("is waiting on max total jobs");
703 case JS_WaitStartTime:
704 msg = _("is waiting for its start time");
706 case JS_WaitPriority:
707 msg = _("is waiting for higher priority jobs to finish");
709 case JS_DataCommitting:
710 msg = _("SD committing Data");
712 case JS_DataDespooling:
713 msg = _("SD despooling Data");
715 case JS_AttrDespooling:
716 msg = _("SD despooling Attributes");
718 case JS_AttrInserting:
719 msg = _("Dir inserting Attributes");
723 emsg = (char *)get_pool_memory(PM_FNAME);
724 Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus);
730 * Now report Storage daemon status code
732 switch (jcr->SDJobStatus) {
735 free_pool_memory(emsg);
738 msg = _("is waiting for a mount request");
742 free_pool_memory(emsg);
745 msg = _("is waiting for an appendable Volume");
749 emsg = (char *)get_pool_memory(PM_FNAME);
752 if (!jcr->client || !jcr->wstore) {
753 Mmsg(emsg, _("is waiting for Client to connect to Storage daemon"));
755 Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"),
756 jcr->client->name(), jcr->wstore->name());
760 case JS_DataCommitting:
761 msg = _("SD committing Data");
763 case JS_DataDespooling:
764 msg = _("SD despooling Data");
766 case JS_AttrDespooling:
767 msg = _("SD despooling Attributes");
769 case JS_AttrInserting:
770 msg = _("Dir inserting Attributes");
773 switch (jcr->get_JobType()) {
776 bstrncpy(level, " ", sizeof(level));
779 bstrncpy(level, level_to_str(jcr->get_JobLevel()), sizeof(level));
785 ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\n"),
786 jcr->JobId, level, jcr->Job, msg);
788 ua->send_msg(_("%6d %-6s %-20s %s\n"),
789 jcr->JobId, level, jcr->Job, msg);
793 free_pool_memory(emsg);
798 if (!ua->api) ua->send_msg("====\n");
799 Dmsg0(200, "leave list_run_jobs()\n");
802 static void list_terminated_jobs(UAContext *ua)
804 char dt[MAX_TIME_LENGTH], b1[30], b2[30];
807 if (last_jobs->empty()) {
808 if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n"));
811 lock_last_jobs_list();
812 struct s_last_job *je;
814 ua->send_msg(_("\nTerminated Jobs:\n"));
815 ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n"));
816 ua->send_msg(_("====================================================================\n"));
818 foreach_dlist(je, last_jobs) {
819 char JobName[MAX_NAME_LENGTH];
820 const char *termstat;
822 bstrncpy(JobName, je->Job, sizeof(JobName));
823 /* There are three periods after the Job name */
825 for (int i=0; i<3; i++) {
826 if ((p=strrchr(JobName, '.')) != NULL) {
831 if (!acl_access_ok(ua, Job_ACL, JobName)) {
835 bstrftime_nc(dt, sizeof(dt), je->end_time);
836 switch (je->JobType) {
839 bstrncpy(level, " ", sizeof(level));
842 bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
846 switch (je->JobStatus) {
848 termstat = _("Created");
851 case JS_ErrorTerminated:
852 termstat = _("Error");
855 termstat = _("Diffs");
858 termstat = _("Cancel");
864 termstat = _("Other");
868 ua->send_msg(_("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"),
871 edit_uint64_with_commas(je->JobFiles, b1),
872 edit_uint64_with_suffix(je->JobBytes, b2),
876 ua->send_msg(_("%6d %-6s %8s %10s %-7s %-8s %s\n"),
879 edit_uint64_with_commas(je->JobFiles, b1),
880 edit_uint64_with_suffix(je->JobBytes, b2),
885 if (!ua->api) ua->send_msg(_("\n"));
886 unlock_last_jobs_list();