2 Bacula® - The Network Backup Solution
4 Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version two of the GNU General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * Bacula Director -- User Agent Status Command
32 * Kern Sibbald, August MMI
41 extern void *start_heap;
43 static void list_scheduled_jobs(UAContext *ua);
44 static void list_running_jobs(UAContext *ua);
45 static void list_terminated_jobs(UAContext *ua);
46 static void do_storage_status(UAContext *ua, STORE *store, char *cmd);
47 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd);
48 static void do_director_status(UAContext *ua);
49 static void do_all_status(UAContext *ua);
50 void status_slots(UAContext *ua, STORE *store);
52 static char OKqstatus[] = "1000 OK .status\n";
53 static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n";
59 bool dot_status_cmd(UAContext *ua, const char *cmd)
67 Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc);
70 ua->send_msg("1900 Bad .status command, missing arguments.\n");
74 if (strcasecmp(ua->argk[1], "dir") == 0) {
75 if (strcasecmp(ua->argk[2], "current") == 0) {
76 ua->send_msg(OKqstatus, ua->argk[2]);
78 if (njcr->JobId != 0 && acl_access_ok(ua, Job_ACL, njcr->job->name())) {
79 ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1),
80 njcr->JobStatus, njcr->JobErrors);
84 } else if (strcasecmp(ua->argk[2], "last") == 0) {
85 ua->send_msg(OKqstatus, ua->argk[2]);
86 if ((last_jobs) && (last_jobs->size() > 0)) {
87 job = (s_last_job*)last_jobs->last();
88 if (acl_access_ok(ua, Job_ACL, job->Job)) {
89 ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1),
90 job->JobStatus, job->Errors);
93 } else if (strcasecmp(ua->argk[2], "header") == 0) {
94 list_dir_status_header(ua);
95 } else if (strcasecmp(ua->argk[2], "scheduled") == 0) {
96 list_scheduled_jobs(ua);
97 } else if (strcasecmp(ua->argk[2], "running") == 0) {
98 list_running_jobs(ua);
99 } else if (strcasecmp(ua->argk[2], "terminated") == 0) {
100 list_terminated_jobs(ua);
102 ua->send_msg("1900 Bad .status command, wrong argument.\n");
105 } else if (strcasecmp(ua->argk[1], "client") == 0) {
106 client = get_client_resource(ua);
108 Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2]));
109 do_client_status(ua, client, ua->argk[2]);
111 } else if (strcasecmp(ua->argk[1], "storage") == 0) {
112 store = get_storage_resource(ua, false /*no default*/);
114 do_storage_status(ua, store, ua->argk[2]);
117 ua->send_msg("1900 Bad .status command, wrong argument.\n");
124 /* This is the *old* command handler, so we must return
125 * 1 or it closes the connection
127 int qstatus_cmd(UAContext *ua, const char *cmd)
129 dot_status_cmd(ua, cmd);
136 int status_cmd(UAContext *ua, const char *cmd)
142 Dmsg1(20, "status:%s:\n", cmd);
144 for (i=1; i<ua->argc; i++) {
145 if (strcasecmp(ua->argk[i], NT_("all")) == 0) {
148 } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 ||
149 strcasecmp(ua->argk[i], NT_("director")) == 0) {
150 do_director_status(ua);
152 } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) {
153 client = get_client_resource(ua);
155 do_client_status(ua, client, NULL);
159 store = get_storage_resource(ua, false/*no default*/);
161 if (find_arg(ua, NT_("slots")) > 0) {
162 status_slots(ua, store);
164 do_storage_status(ua, store, NULL);
170 /* If no args, ask for status type */
172 char prmt[MAX_NAME_LENGTH];
174 start_prompt(ua, _("Status available for:\n"));
175 add_prompt(ua, NT_("Director"));
176 add_prompt(ua, NT_("Storage"));
177 add_prompt(ua, NT_("Client"));
178 add_prompt(ua, NT_("All"));
179 Dmsg0(20, "do_prompt: select daemon\n");
180 if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
183 Dmsg1(20, "item=%d\n", item);
185 case 0: /* Director */
186 do_director_status(ua);
189 store = select_storage_resource(ua);
191 do_storage_status(ua, store, NULL);
195 client = select_client_resource(ua);
197 do_client_status(ua, client, NULL);
210 static void do_all_status(UAContext *ua)
212 STORE *store, **unique_store;
213 CLIENT *client, **unique_client;
217 do_director_status(ua);
219 /* Count Storage items */
222 foreach_res(store, R_STORAGE) {
225 unique_store = (STORE **) malloc(i * sizeof(STORE));
226 /* Find Unique Storage address/port */
228 foreach_res(store, R_STORAGE) {
230 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
233 for (j=0; j<i; j++) {
234 if (strcmp(unique_store[j]->address, store->address) == 0 &&
235 unique_store[j]->SDport == store->SDport) {
241 unique_store[i++] = store;
242 Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport);
247 /* Call each unique Storage daemon */
248 for (j=0; j<i; j++) {
249 do_storage_status(ua, unique_store[j], NULL);
253 /* Count Client items */
256 foreach_res(client, R_CLIENT) {
259 unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
260 /* Find Unique Client address/port */
262 foreach_res(client, R_CLIENT) {
264 if (!acl_access_ok(ua, Client_ACL, client->name())) {
267 for (j=0; j<i; j++) {
268 if (strcmp(unique_client[j]->address, client->address) == 0 &&
269 unique_client[j]->FDport == client->FDport) {
275 unique_client[i++] = client;
276 Dmsg2(40, "Stuffing: %s:%d\n", client->address, client->FDport);
281 /* Call each unique File daemon */
282 for (j=0; j<i; j++) {
283 do_client_status(ua, unique_client[j], NULL);
289 void list_dir_status_header(UAContext *ua)
291 char dt[MAX_TIME_LENGTH];
292 char b1[35], b2[35], b3[35], b4[35], b5[35];
294 ua->send_msg(_("%s Version: %s (%s) %s %s %s\n"), my_name, VERSION, BDATE,
295 HOST_OS, DISTNAME, DISTVER);
296 bstrftime_nc(dt, sizeof(dt), daemon_start_time);
297 if (num_jobs_run == 1) {
298 ua->send_msg(_("Daemon started %s, 1 Job run since started.\n"), dt);
301 ua->send_msg(_("Daemon started %s, %d Jobs run since started.\n"),
304 ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
305 edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
306 edit_uint64_with_commas(sm_bytes, b2),
307 edit_uint64_with_commas(sm_max_bytes, b3),
308 edit_uint64_with_commas(sm_buffers, b4),
309 edit_uint64_with_commas(sm_max_buffers, b5));
311 /* TODO: use this function once for all daemons */
312 if (debug_level > 0 && plugin_list->size() > 0) {
315 POOL_MEM msg(PM_FNAME);
316 pm_strcpy(msg, " Plugin: ");
317 foreach_alist(plugin, plugin_list) {
318 len = pm_strcat(msg, plugin->file);
320 pm_strcat(msg, "\n ");
325 ua->send_msg("%s\n", msg.c_str());
329 static void do_director_status(UAContext *ua)
331 list_dir_status_header(ua);
334 * List scheduled Jobs
336 list_scheduled_jobs(ua);
341 list_running_jobs(ua);
344 * List terminated jobs
346 list_terminated_jobs(ua);
347 ua->send_msg("====\n");
350 static void do_storage_status(UAContext *ua, STORE *store, char *cmd)
355 lstore.store = store;
356 pm_strcpy(lstore.store_source, _("unknown source"));
357 set_wstorage(ua->jcr, &lstore);
358 /* Try connecting for up to 15 seconds */
359 if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"),
360 store->name(), store->address, store->SDport);
361 if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
362 ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"),
364 if (ua->jcr->store_bsock) {
365 bnet_close(ua->jcr->store_bsock);
366 ua->jcr->store_bsock = NULL;
370 Dmsg0(20, _("Connected to storage daemon\n"));
371 sd = ua->jcr->store_bsock;
373 sd->fsend(".status %s", cmd);
377 while (sd->recv() >= 0) {
378 ua->send_msg("%s", sd->msg);
380 sd->signal( BNET_TERMINATE);
382 ua->jcr->store_bsock = NULL;
386 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd)
390 /* Connect to File daemon */
392 ua->jcr->client = client;
393 /* Release any old dummy key */
394 if (ua->jcr->sd_auth_key) {
395 free(ua->jcr->sd_auth_key);
397 /* Create a new dummy SD auth key */
398 ua->jcr->sd_auth_key = bstrdup("dummy");
400 /* Try to connect for 15 seconds */
401 if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
402 client->name(), client->address, client->FDport);
403 if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
404 ua->send_msg(_("Failed to connect to Client %s.\n====\n"),
406 if (ua->jcr->file_bsock) {
407 bnet_close(ua->jcr->file_bsock);
408 ua->jcr->file_bsock = NULL;
412 Dmsg0(20, _("Connected to file daemon\n"));
413 fd = ua->jcr->file_bsock;
415 fd->fsend(".status %s", cmd);
419 while (fd->recv() >= 0) {
420 ua->send_msg("%s", fd->msg);
422 fd->signal(BNET_TERMINATE);
424 ua->jcr->file_bsock = NULL;
429 static void prt_runhdr(UAContext *ua)
432 ua->send_msg(_("\nScheduled Jobs:\n"));
433 ua->send_msg(_("Level Type Pri Scheduled Name Volume\n"));
434 ua->send_msg(_("===================================================================================\n"));
438 /* Scheduling packet */
440 dlink link; /* keep this as first item!!! */
449 static void prt_runtime(UAContext *ua, sched_pkt *sp)
451 char dt[MAX_TIME_LENGTH];
452 const char *level_ptr;
454 bool close_db = false;
459 orig_jobtype = jcr->get_JobType();
460 memset(&mr, 0, sizeof(mr));
461 if (sp->job->JobType == JT_BACKUP) {
463 ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
464 Dmsg1(250, "Using pool=%s\n", jcr->pool->name());
466 close_db = true; /* new db opened, remember to close it */
469 mr.PoolId = jcr->jr.PoolId;
470 mr.StorageId = sp->store->StorageId;
471 jcr->wstore = sp->store;
472 Dmsg0(250, "call find_next_volume_for_append\n");
473 ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune);
476 bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
479 bstrftime_nc(dt, sizeof(dt), sp->runtime);
480 switch (sp->job->JobType) {
486 level_ptr = level_to_str(sp->level);
490 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
491 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
492 sp->job->name(), mr.VolumeName);
494 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
495 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
496 sp->job->name(), mr.VolumeName);
499 db_close_database(jcr, jcr->db);
501 jcr->db = ua->db; /* restore ua db to jcr */
502 jcr->set_JobType(orig_jobtype);
506 * Sort items by runtime, priority
508 static int my_compare(void *item1, void *item2)
510 sched_pkt *p1 = (sched_pkt *)item1;
511 sched_pkt *p2 = (sched_pkt *)item2;
512 if (p1->runtime < p2->runtime) {
514 } else if (p1->runtime > p2->runtime) {
517 if (p1->priority < p2->priority) {
519 } else if (p1->priority > p2->priority) {
526 * Find all jobs to be run in roughly the
529 static void list_scheduled_jobs(UAContext *ua)
534 int level, num_jobs = 0;
536 bool hdr_printed = false;
541 Dmsg0(200, "enter list_sched_jobs()\n");
544 i = find_arg_with_value(ua, NT_("days"));
546 days = atoi(ua->argv[i]);
547 if (((days < 0) || (days > 500)) && !ua->api) {
548 ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n"));
553 /* Loop through all jobs */
555 foreach_res(job, R_JOB) {
556 if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) {
559 for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
561 level = job->JobLevel;
565 priority = job->Priority;
567 priority = run->Priority;
573 sp = (sched_pkt *)malloc(sizeof(sched_pkt));
576 sp->priority = priority;
577 sp->runtime = runtime;
578 sp->pool = run->pool;
579 get_job_storage(&store, job, run);
580 sp->store = store.store;
581 Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type);
582 sched.binary_insert_multiple(sp, my_compare);
585 } /* end for loop over resources */
587 foreach_dlist(sp, &sched) {
590 if (num_jobs == 0 && !ua->api) {
591 ua->send_msg(_("No Scheduled Jobs.\n"));
593 if (!ua->api) ua->send_msg("====\n");
594 Dmsg0(200, "Leave list_sched_jobs_runs()\n");
597 static void list_running_jobs(UAContext *ua)
602 char *emsg; /* edited message */
603 char dt[MAX_TIME_LENGTH];
605 bool pool_mem = false;
607 Dmsg0(200, "enter list_run_jobs()\n");
608 if (!ua->api) ua->send_msg(_("\nRunning Jobs:\n"));
610 if (jcr->JobId == 0) { /* this is us */
611 /* this is a console or other control job. We only show console
612 * jobs in the status output.
614 if (jcr->get_JobType() == JT_CONSOLE && !ua->api) {
615 bstrftime_nc(dt, sizeof(dt), jcr->start_time);
616 ua->send_msg(_("Console connected at %s\n"), dt);
625 /* Note the following message is used in regress -- don't change */
626 if (!ua->api) ua->send_msg(_("No Jobs running.\n====\n"));
627 Dmsg0(200, "leave list_run_jobs()\n");
632 ua->send_msg(_(" JobId Level Name Status\n"));
633 ua->send_msg(_("======================================================================\n"));
636 if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
640 switch (jcr->JobStatus) {
642 msg = _("is waiting execution");
645 msg = _("is running");
648 msg = _("is blocked");
651 msg = _("has terminated");
653 case JS_ErrorTerminated:
654 msg = _("has erred");
657 msg = _("has errors");
660 msg = _("has a fatal error");
663 msg = _("has verify differences");
666 msg = _("has been canceled");
669 emsg = (char *) get_pool_memory(PM_FNAME);
671 Mmsg(emsg, _("is waiting on Client"));
673 Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
679 emsg = (char *) get_pool_memory(PM_FNAME);
681 Mmsg(emsg, _("is waiting on Storage %s"), jcr->wstore->name());
682 } else if (jcr->rstore) {
683 Mmsg(emsg, _("is waiting on Storage %s"), jcr->rstore->name());
685 Mmsg(emsg, _("is waiting on Storage"));
690 case JS_WaitStoreRes:
691 msg = _("is waiting on max Storage jobs");
693 case JS_WaitClientRes:
694 msg = _("is waiting on max Client jobs");
697 msg = _("is waiting on max Job jobs");
700 msg = _("is waiting on max total jobs");
702 case JS_WaitStartTime:
703 msg = _("is waiting for its start time");
705 case JS_WaitPriority:
706 msg = _("is waiting for higher priority jobs to finish");
708 case JS_DataCommitting:
709 msg = _("SD committing Data");
711 case JS_DataDespooling:
712 msg = _("SD despooling Data");
714 case JS_AttrDespooling:
715 msg = _("SD despooling Attributes");
717 case JS_AttrInserting:
718 msg = _("Dir inserting Attributes");
722 emsg = (char *)get_pool_memory(PM_FNAME);
723 Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus);
729 * Now report Storage daemon status code
731 switch (jcr->SDJobStatus) {
734 free_pool_memory(emsg);
737 msg = _("is waiting for a mount request");
741 free_pool_memory(emsg);
744 msg = _("is waiting for an appendable Volume");
748 emsg = (char *)get_pool_memory(PM_FNAME);
751 if (!jcr->client || !jcr->wstore) {
752 Mmsg(emsg, _("is waiting for Client to connect to Storage daemon"));
754 Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"),
755 jcr->client->name(), jcr->wstore->name());
759 case JS_DataCommitting:
760 msg = _("SD committing Data");
762 case JS_DataDespooling:
763 msg = _("SD despooling Data");
765 case JS_AttrDespooling:
766 msg = _("SD despooling Attributes");
768 case JS_AttrInserting:
769 msg = _("Dir inserting Attributes");
772 switch (jcr->get_JobType()) {
775 bstrncpy(level, " ", sizeof(level));
778 bstrncpy(level, level_to_str(jcr->get_JobLevel()), sizeof(level));
784 ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\n"),
785 jcr->JobId, level, jcr->Job, msg);
787 ua->send_msg(_("%6d %-6s %-20s %s\n"),
788 jcr->JobId, level, jcr->Job, msg);
792 free_pool_memory(emsg);
797 if (!ua->api) ua->send_msg("====\n");
798 Dmsg0(200, "leave list_run_jobs()\n");
801 static void list_terminated_jobs(UAContext *ua)
803 char dt[MAX_TIME_LENGTH], b1[30], b2[30];
806 if (last_jobs->empty()) {
807 if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n"));
810 lock_last_jobs_list();
811 struct s_last_job *je;
813 ua->send_msg(_("\nTerminated Jobs:\n"));
814 ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n"));
815 ua->send_msg(_("====================================================================\n"));
817 foreach_dlist(je, last_jobs) {
818 char JobName[MAX_NAME_LENGTH];
819 const char *termstat;
821 bstrncpy(JobName, je->Job, sizeof(JobName));
822 /* There are three periods after the Job name */
824 for (int i=0; i<3; i++) {
825 if ((p=strrchr(JobName, '.')) != NULL) {
830 if (!acl_access_ok(ua, Job_ACL, JobName)) {
834 bstrftime_nc(dt, sizeof(dt), je->end_time);
835 switch (je->JobType) {
838 bstrncpy(level, " ", sizeof(level));
841 bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
845 switch (je->JobStatus) {
847 termstat = _("Created");
850 case JS_ErrorTerminated:
851 termstat = _("Error");
854 termstat = _("Diffs");
857 termstat = _("Cancel");
863 termstat = _("Other");
867 ua->send_msg(_("%6d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"),
870 edit_uint64_with_commas(je->JobFiles, b1),
871 edit_uint64_with_suffix(je->JobBytes, b2),
875 ua->send_msg(_("%6d %-6s %8s %10s %-7s %-8s %s\n"),
878 edit_uint64_with_commas(je->JobFiles, b1),
879 edit_uint64_with_suffix(je->JobBytes, b2),
884 if (!ua->api) ua->send_msg(_("\n"));
885 unlock_last_jobs_list();