2 Bacula(R) - The Network Backup Solution
4 Copyright (C) 2000-2017 Kern Sibbald
6 The original author of Bacula is Kern Sibbald, with contributions
7 from many others, a complete list can be found in the file AUTHORS.
9 You may use this file and others of this release according to the
10 license defined in the LICENSE file, which includes the Affero General
11 Public License, v3.0 ("AGPLv3") and some additional permissions and
12 terms pursuant to its AGPLv3 Section 7.
14 This notice must be preserved when any source code is
15 conveyed and/or propagated.
17 Bacula(R) is a registered trademark of Kern Sibbald.
20 * Bacula Director -- User Agent Status Command
22 * Kern Sibbald, August MMI
29 extern void *start_heap;
30 extern utime_t last_reload_time;
32 static void list_scheduled_jobs(UAContext *ua);
33 static void llist_scheduled_jobs(UAContext *ua);
34 static void list_running_jobs(UAContext *ua);
35 static void list_terminated_jobs(UAContext *ua);
36 static void do_storage_status(UAContext *ua, STORE *store, char *cmd);
37 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd);
38 static void do_director_status(UAContext *ua);
39 static void do_all_status(UAContext *ua);
40 void status_slots(UAContext *ua, STORE *store);
41 void status_content(UAContext *ua, STORE *store);
43 static char OKqstatus[] = "1000 OK .status\n";
44 static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n";
50 bool dot_status_cmd(UAContext *ua, const char *cmd)
58 Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc);
61 ua->send_msg("1900 Bad .status command, missing arguments.\n");
65 if (strcasecmp(ua->argk[1], "dir") == 0) {
66 if (strcasecmp(ua->argk[2], "current") == 0) {
67 ua->send_msg(OKqstatus, ua->argk[2]);
69 if (!njcr->is_internal_job() && acl_access_ok(ua, Job_ACL, njcr->job->name())) {
70 ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1),
71 njcr->JobStatus, njcr->JobErrors);
75 } else if (strcasecmp(ua->argk[2], "last") == 0) {
76 ua->send_msg(OKqstatus, ua->argk[2]);
77 if ((last_jobs) && (last_jobs->size() > 0)) {
78 job = (s_last_job*)last_jobs->last();
79 if (acl_access_ok(ua, Job_ACL, job->Job)) {
80 ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1),
81 job->JobStatus, job->Errors);
84 } else if (strcasecmp(ua->argk[2], "header") == 0) {
85 list_dir_status_header(ua);
86 } else if (strcasecmp(ua->argk[2], "scheduled") == 0) {
87 list_scheduled_jobs(ua);
88 } else if (strcasecmp(ua->argk[2], "running") == 0) {
89 list_running_jobs(ua);
90 } else if (strcasecmp(ua->argk[2], "terminated") == 0) {
91 list_terminated_jobs(ua);
93 ua->send_msg("1900 Bad .status command, wrong argument.\n");
96 } else if (strcasecmp(ua->argk[1], "client") == 0) {
97 client = get_client_resource(ua, JT_BACKUP_RESTORE);
99 Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2]));
100 do_client_status(ua, client, ua->argk[2]);
102 } else if (strcasecmp(ua->argk[1], "storage") == 0) {
103 store = get_storage_resource(ua, false /*no default*/, true/*unique*/);
105 ua->send_msg("1900 Bad .status command, wrong argument.\n");
108 do_storage_status(ua, store, ua->argk[2]);
110 ua->send_msg("1900 Bad .status command, wrong argument.\n");
117 /* Test the network between FD and SD */
118 static int do_network_status(UAContext *ua)
120 CLIENT *client = NULL;
123 char *store_address, ed1[50];
125 uint64_t nb = 50 * 1024 * 1024;
127 int i = find_arg_with_value(ua, "bytes");
129 if (!size_to_uint64(ua->argv[i], strlen(ua->argv[i]), &nb)) {
134 client = get_client_resource(ua, JT_BACKUP_RESTORE);
139 store.store = get_storage_resource(ua, false, true);
144 jcr->client = client;
145 set_wstorage(jcr, &store);
148 ua->send_msg(_("Connecting to Storage %s at %s:%d\n"),
149 store.store->name(), store.store->address, store.store->SDport);
152 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
153 ua->error_msg(_("Failed to connect to Storage.\n"));
157 if (!start_storage_daemon_job(jcr, NULL, NULL)) {
162 * Note startup sequence of SD/FD is different depending on
163 * whether the SD listens (normal) or the SD calls the FD.
165 if (!client->sd_calls_client) {
166 if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) {
169 } /* Else it's done in init_storage_job() */
172 ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
173 client->name(), client->address(), client->FDport);
176 if (!connect_to_file_daemon(jcr, 1, 15, 0)) {
177 ua->error_msg(_("Failed to connect to Client.\n"));
181 if (jcr->sd_calls_client) {
183 * SD must call "client" i.e. FD
185 if (jcr->FDVersion < 10) {
186 Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n"));
189 if (!send_client_addr_to_sd(jcr)) {
192 if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) {
196 store_address = store.store->address; /* dummy */
197 store_port = 0; /* flag that SD calls FD */
201 * send Storage daemon address to the File daemon,
202 * then wait for File daemon to make connection
203 * with Storage daemon.
205 if (store.store->SDDport == 0) {
206 store.store->SDDport = store.store->SDport;
209 store_address = get_storage_address(jcr->client, store.store);
210 store_port = store.store->SDDport;
213 if (!send_store_addr_to_fd(jcr, store.store, store_address, store_port)) {
218 ua->info_msg(_("Running network test between Client=%s and Storage=%s with %sB ...\n"),
219 client->name(), store.store->name(), edit_uint64_with_suffix(nb, ed1));
222 if (!jcr->file_bsock->fsend("testnetwork bytes=%lld\n", nb)) {
226 while (jcr->file_bsock->recv() > 0) {
227 ua->info_msg(jcr->file_bsock->msg);
231 jcr->file_bsock->signal(BNET_TERMINATE);
232 jcr->store_bsock->signal(BNET_TERMINATE);
233 wait_for_storage_daemon_termination(jcr);
235 free_bsock(jcr->file_bsock);
236 free_bsock(jcr->store_bsock);
243 /* This is the *old* command handler, so we must return
244 * 1 or it closes the connection
246 int qstatus_cmd(UAContext *ua, const char *cmd)
248 dot_status_cmd(ua, cmd);
255 int status_cmd(UAContext *ua, const char *cmd)
261 Dmsg1(20, "status:%s:\n", cmd);
263 for (i=1; i<ua->argc; i++) {
264 if (strcasecmp(ua->argk[i], NT_("network")) == 0) {
265 do_network_status(ua);
267 } else if (strcasecmp(ua->argk[i], NT_("schedule")) == 0 ||
268 strcasecmp(ua->argk[i], NT_("scheduled")) == 0) {
269 llist_scheduled_jobs(ua);
271 } else if (strcasecmp(ua->argk[i], NT_("all")) == 0) {
274 } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 ||
275 strcasecmp(ua->argk[i], NT_("director")) == 0) {
276 do_director_status(ua);
278 } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) {
279 client = get_client_resource(ua, JT_BACKUP_RESTORE);
281 do_client_status(ua, client, NULL);
285 store = get_storage_resource(ua, false/*no default*/, true/*unique*/);
287 if (find_arg(ua, NT_("slots")) > 0) {
288 status_slots(ua, store);
290 do_storage_status(ua, store, NULL);
296 /* If no args, ask for status type */
298 char prmt[MAX_NAME_LENGTH];
300 start_prompt(ua, _("Status available for:\n"));
301 add_prompt(ua, NT_("Director"));
302 add_prompt(ua, NT_("Storage"));
303 add_prompt(ua, NT_("Client"));
304 add_prompt(ua, NT_("Scheduled"));
305 add_prompt(ua, NT_("Network"));
306 add_prompt(ua, NT_("All"));
307 Dmsg0(20, "do_prompt: select daemon\n");
308 if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
311 Dmsg1(20, "item=%d\n", item);
313 case 0: /* Director */
314 do_director_status(ua);
317 store = select_storage_resource(ua, true/*unique*/);
319 do_storage_status(ua, store, NULL);
323 client = select_client_resource(ua, JT_BACKUP_RESTORE);
325 do_client_status(ua, client, NULL);
329 llist_scheduled_jobs(ua);
332 do_network_status(ua);
344 static void do_all_status(UAContext *ua)
346 STORE *store, **unique_store;
347 CLIENT *client, **unique_client;
351 do_director_status(ua);
353 /* Count Storage items */
356 foreach_res(store, R_STORAGE) {
359 unique_store = (STORE **) malloc(i * sizeof(STORE));
360 /* Find Unique Storage address/port */
362 foreach_res(store, R_STORAGE) {
364 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
367 for (j=0; j<i; j++) {
368 if (strcmp(unique_store[j]->address, store->address) == 0 &&
369 unique_store[j]->SDport == store->SDport) {
375 unique_store[i++] = store;
376 Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport);
381 /* Call each unique Storage daemon */
382 for (j=0; j<i; j++) {
383 do_storage_status(ua, unique_store[j], NULL);
387 /* Count Client items */
390 foreach_res(client, R_CLIENT) {
393 unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
394 /* Find Unique Client address/port */
396 foreach_res(client, R_CLIENT) {
398 if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) {
401 for (j=0; j<i; j++) {
402 if (strcmp(unique_client[j]->address(), client->address()) == 0 &&
403 unique_client[j]->FDport == client->FDport) {
409 unique_client[i++] = client;
410 Dmsg2(40, "Stuffing: %s:%d\n", client->address(), client->FDport);
415 /* Call each unique File daemon */
416 for (j=0; j<i; j++) {
417 do_client_status(ua, unique_client[j], NULL);
423 static void api_list_dir_status_header(UAContext *ua)
425 OutputWriter wt(ua->api_opts);
426 wt.start_group("header");
428 OT_STRING, "name", my_name,
429 OT_STRING, "version", VERSION " (" BDATE ")",
430 OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER,
431 OT_UTIME, "started", daemon_start_time,
432 OT_UTIME, "reloaded", last_reload_time,
433 OT_INT, "jobs_run", num_jobs_run,
434 OT_INT, "jobs_running",job_count(),
435 OT_INT, "nclients", ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(),
436 OT_INT, "nstores", ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(),
437 OT_INT, "npools", ((rblist *)res_head[R_POOL-r_first]->res_list)->size(),
438 OT_INT, "ncats", ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(),
439 OT_INT, "nfset", ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(),
440 OT_INT, "nscheds", ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size(),
441 OT_PLUGINS,"plugins", b_plugin_list,
444 ua->send_msg("%s", wt.end_group());
447 void list_dir_status_header(UAContext *ua)
449 char dt[MAX_TIME_LENGTH], dt1[MAX_TIME_LENGTH];
450 char b1[35], b2[35], b3[35], b4[35], b5[35];
453 api_list_dir_status_header(ua);
457 ua->send_msg(_("%s %sVersion: %s (%s) %s %s %s\n"), my_name,
458 "", VERSION, BDATE, HOST_OS, DISTNAME, DISTVER);
459 bstrftime_nc(dt, sizeof(dt), daemon_start_time);
460 bstrftimes(dt1, sizeof(dt1), last_reload_time);
461 ua->send_msg(_("Daemon started %s, conf reloaded %s\n"), dt, dt1);
462 ua->send_msg(_(" Jobs: run=%d, running=%d mode=%d,%d\n"),
463 num_jobs_run, job_count(), (int)DEVELOPER_MODE, 0);
464 ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
465 edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
466 edit_uint64_with_commas(sm_bytes, b2),
467 edit_uint64_with_commas(sm_max_bytes, b3),
468 edit_uint64_with_commas(sm_buffers, b4),
469 edit_uint64_with_commas(sm_max_buffers, b5));
470 ua->send_msg(_(" Res: njobs=%d nclients=%d nstores=%d npools=%d ncats=%d"
471 " nfsets=%d nscheds=%d\n"),
472 ((rblist *)res_head[R_JOB-r_first]->res_list)->size(),
473 ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(),
474 ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(),
475 ((rblist *)res_head[R_POOL-r_first]->res_list)->size(),
476 ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(),
477 ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(),
478 ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size());
481 /* TODO: use this function once for all daemons */
482 if (b_plugin_list && b_plugin_list->size() > 0) {
485 POOL_MEM msg(PM_FNAME);
486 pm_strcpy(msg, " Plugin: ");
487 foreach_alist(plugin, b_plugin_list) {
488 len = pm_strcat(msg, plugin->file);
490 pm_strcat(msg, "\n ");
495 ua->send_msg("%s\n", msg.c_str());
499 static void do_director_status(UAContext *ua)
501 list_dir_status_header(ua);
504 * List scheduled Jobs
506 list_scheduled_jobs(ua);
511 list_running_jobs(ua);
514 * List terminated jobs
516 list_terminated_jobs(ua);
517 ua->send_msg("====\n");
520 static void do_storage_status(UAContext *ua, STORE *store, char *cmd)
526 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
527 ua->error_msg(_("No authorization for Storage \"%s\"\n"), store->name());
531 * The Storage daemon is problematic because it shows information
532 * related to multiple Job, so if there is a Client or Job
533 * ACL restriction, we forbid all access to the Storage.
535 if (have_restricted_acl(ua, Client_ACL) ||
536 have_restricted_acl(ua, Job_ACL)) {
537 ua->error_msg(_("Restricted Client or Job does not permit access to Storage daemons\n"));
540 lstore.store = store;
541 pm_strcpy(lstore.store_source, _("unknown source"));
542 set_wstorage(ua->jcr, &lstore);
543 /* Try connecting for up to 15 seconds */
544 if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"),
545 store->name(), store->address, store->SDport);
546 if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
547 ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"),
549 free_bsock(ua->jcr->store_bsock);
552 Dmsg0(20, "Connected to storage daemon\n");
553 sd = ua->jcr->store_bsock;
557 * For .status storage=xxx shstore list
558 * send .status shstore list xxx-device
560 if (strcasecmp(cmd, "shstore") == 0) {
562 ua->send_msg(_("Must have three arguments\n"));
565 pm_strcpy(devname, store->dev_name());
566 bash_spaces(devname.c_str());
567 sd->fsend(".status %s %s %s api=%d api_opts=%s",
568 cmd, ua->argk[3], devname.c_str(),
569 ua->api, ua->api_opts);
571 int i = find_arg_with_value(ua, "device");
573 Mmsg(devname, "device=%s", ua->argv[i]);
574 bash_spaces(devname.c_str());
576 sd->fsend(".status %s api=%d api_opts=%s %s",
577 cmd, ua->api, ua->api_opts, devname.c_str());
582 while (sd->recv() >= 0) {
583 ua->send_msg("%s", sd->msg);
585 sd->signal(BNET_TERMINATE);
586 free_bsock(ua->jcr->store_bsock);
590 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd)
594 if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) {
595 ua->error_msg(_("No authorization for Client \"%s\"\n"), client->name());
598 /* Connect to File daemon */
599 ua->jcr->client = client;
600 /* Release any old dummy key */
601 if (ua->jcr->sd_auth_key) {
602 free(ua->jcr->sd_auth_key);
604 /* Create a new dummy SD auth key */
605 ua->jcr->sd_auth_key = bstrdup("dummy");
607 /* Try to connect for 15 seconds */
608 if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
609 client->name(), client->address(), client->FDport);
610 if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
611 ua->send_msg(_("Failed to connect to Client %s.\n====\n"),
613 free_bsock(ua->jcr->file_bsock);
616 Dmsg0(20, _("Connected to file daemon\n"));
617 fd = ua->jcr->file_bsock;
619 fd->fsend(".status %s api=%d api_opts=%s", cmd, ua->api, ua->api_opts);
623 while (fd->recv() >= 0) {
624 ua->send_msg("%s", fd->msg);
626 fd->signal(BNET_TERMINATE);
627 free_bsock(ua->jcr->file_bsock);
632 static void prt_runhdr(UAContext *ua)
635 ua->send_msg(_("\nScheduled Jobs:\n"));
636 ua->send_msg(_("Level Type Pri Scheduled Job Name Volume\n"));
637 ua->send_msg(_("===================================================================================\n"));
641 static void prt_lrunhdr(UAContext *ua)
644 ua->send_msg(_("\nScheduled Jobs:\n"));
645 ua->send_msg(_("Level Type Pri Scheduled Job Name Schedule\n"));
646 ua->send_msg(_("=====================================================================================\n"));
651 /* Scheduling packet */
653 dlink link; /* keep this as first item!!! */
662 static void prt_runtime(UAContext *ua, sched_pkt *sp, OutputWriter *ow)
664 char dt[MAX_TIME_LENGTH];
665 const char *level_ptr;
667 bool close_db = false;
672 orig_jobtype = jcr->getJobType();
673 if (sp->job->JobType == JT_BACKUP) {
675 ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
676 Dmsg1(250, "Using pool=%s\n", jcr->pool->name());
678 close_db = true; /* new db opened, remember to close it */
681 mr.PoolId = jcr->jr.PoolId;
682 jcr->wstore = sp->store;
683 set_storageid_in_mr(jcr->wstore, &mr);
684 Dmsg0(250, "call find_next_volume_for_append\n");
685 /* no need to set ScratchPoolId, since we use fnv_no_create_vol */
686 ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune);
689 bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
692 bstrftime_nc(dt, sizeof(dt), sp->runtime);
693 switch (sp->job->JobType) {
698 level_ptr = "Restore";
701 level_ptr = level_to_str(sp->level);
705 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
706 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
707 sp->job->name(), mr.VolumeName);
709 } else if (ua->api > 1) {
711 ow->get_output(OT_CLEAR,
713 OT_STRING, "name", sp->job->name(),
714 OT_JOBLEVEL, "level", sp->level,
715 OT_JOBTYPE, "type", sp->job->JobType,
716 OT_INT, "priority",sp->priority,
717 OT_UTIME, "schedtime", sp->runtime,
718 OT_STRING, "volume", mr.VolumeName,
719 OT_STRING, "pool", jcr->pool?jcr->pool->name():"",
720 OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"",
726 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
727 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
728 sp->job->name(), mr.VolumeName);
731 db_close_database(jcr, jcr->db);
733 jcr->db = ua->db; /* restore ua db to jcr */
734 jcr->setJobType(orig_jobtype);
738 * Detailed listing of all scheduler jobs
740 static void llist_scheduled_jobs(UAContext *ua)
745 int level, num_jobs = 0;
747 bool hdr_printed = false;
748 char sched_name[MAX_NAME_LENGTH];
749 char job_name[MAX_NAME_LENGTH];
752 time_t now = time(NULL);
754 const char *level_ptr;
756 Dmsg0(200, "enter list_sched_jobs()\n");
758 i = find_arg_with_value(ua, NT_("days"));
760 days = atoi(ua->argv[i]);
761 if (((days < 0) || (days > 3000)) && !ua->api) {
762 ua->send_msg(_("Ignoring invalid value for days. Max is 3000.\n"));
769 i = find_arg_with_value(ua, NT_("limit"));
771 limit = atoi(ua->argv[i]);
772 if (((limit < 0) || (limit > 2000)) && !ua->api) {
773 ua->send_msg(_("Ignoring invalid value for limit. Max is 2000.\n"));
780 i = find_arg_with_value(ua, NT_("time"));
782 now = str_to_utime(ua->argv[i]);
784 ua->send_msg(_("Ignoring invalid time.\n"));
789 i = find_arg_with_value(ua, NT_("schedule"));
791 bstrncpy(sched_name, ua->argv[i], sizeof(sched_name));
796 i = find_arg_with_value(ua, NT_("job"));
798 bstrncpy(job_name, ua->argv[i], sizeof(job_name));
803 /* Loop through all jobs */
805 foreach_res(job, R_JOB) {
806 sched = job->schedule;
807 if (!sched || !job->is_enabled() || (sched && !sched->is_enabled()) ||
808 (job->client && !job->client->is_enabled())) {
809 continue; /* no, skip this job */
811 if (job_name[0] && strcmp(job_name, job->name()) != 0) {
814 for (run=sched->run; run; run=run->next) {
816 for (i=0; i<days; i++) {
818 int mday, wday, month, wom, woy, ldom;
819 char dt[MAX_TIME_LENGTH];
822 /* compute values for next time */
823 (void)localtime_r(&next, &tm);
824 mday = tm.tm_mday - 1;
828 woy = tm_woy(next); /* get week of year */
829 ldom = tm_ldom(month, tm.tm_year + 1900);
833 Dmsg6(000, "m=%d md=%d wd=%d wom=%d woy=%d ldom=%d\n",
834 month, mday, wday, wom, woy, ldom);
835 Dmsg6(000, "bitset bsm=%d bsmd=%d bswd=%d bswom=%d bswoy=%d bsldom=%d\n",
836 bit_is_set(month, run->month),
837 bit_is_set(mday, run->mday),
838 bit_is_set(wday, run->wday),
839 bit_is_set(wom, run->wom),
840 bit_is_set(woy, run->woy),
841 bit_is_set(31, run->mday));
844 ok = (bit_is_set(mday, run->mday) &&
845 bit_is_set(wday, run->wday) &&
846 bit_is_set(month, run->month) &&
847 bit_is_set(wom, run->wom) &&
848 bit_is_set(woy, run->woy)) ||
849 (bit_is_set(month, run->month) &&
850 bit_is_set(31, run->mday) && mday == ldom);
852 next += 24 * 60 * 60; /* Add one day */
856 level = job->JobLevel;
860 switch (job->JobType) {
865 level_ptr = "Restore";
868 level_ptr = level_to_str(level);
871 priority = job->Priority;
873 priority = run->Priority;
880 for (int j=0; j < 24; j++) {
881 if (bit_is_set(j, run->hour)) {
883 tm.tm_min = run->minute;
885 runtime = mktime(&tm);
886 bstrftime_dn(dt, sizeof(dt), runtime);
888 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
889 level_ptr, job_type_to_str(job->JobType), priority, dt,
890 job->name(), sched->name());
892 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
893 level_ptr, job_type_to_str(job->JobType), priority, dt,
894 job->name(), sched->name());
898 next += 24 * 60 * 60; /* Add one day */
900 if (num_jobs >= limit) {
904 } /* end loop over run pkts */
905 } /* end for loop over resources */
908 if (num_jobs == 0 && !ua->api) {
909 ua->send_msg(_("No Scheduled Jobs.\n"));
911 if (!ua->api) ua->send_msg("====\n");
912 Dmsg0(200, "Leave ;list_sched_jobs_runs()\n");
917 * Sort items by runtime, priority
919 static int my_compare(void *item1, void *item2)
921 sched_pkt *p1 = (sched_pkt *)item1;
922 sched_pkt *p2 = (sched_pkt *)item2;
923 if (p1->runtime < p2->runtime) {
925 } else if (p1->runtime > p2->runtime) {
928 if (p1->priority < p2->priority) {
930 } else if (p1->priority > p2->priority) {
937 * Find all jobs to be run in roughly the
940 static void list_scheduled_jobs(UAContext *ua)
942 OutputWriter ow(ua->api_opts);
946 int level, num_jobs = 0;
948 bool hdr_printed = false;
949 char sched_name[MAX_NAME_LENGTH];
954 Dmsg0(200, "enter list_sched_jobs()\n");
957 i = find_arg_with_value(ua, NT_("days"));
959 days = atoi(ua->argv[i]);
960 if (((days < 0) || (days > 500)) && !ua->api) {
961 ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n"));
965 i = find_arg_with_value(ua, NT_("schedule"));
967 bstrncpy(sched_name, ua->argv[i], sizeof(sched_name));
972 /* Loop through all jobs */
974 foreach_res(job, R_JOB) {
975 if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->is_enabled()) {
978 if (sched_name[0] && job->schedule &&
979 strcasecmp(job->schedule->name(), sched_name) != 0) {
982 for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
984 level = job->JobLevel;
988 priority = job->Priority;
990 priority = run->Priority;
996 sp = (sched_pkt *)malloc(sizeof(sched_pkt));
999 sp->priority = priority;
1000 sp->runtime = runtime;
1001 sp->pool = run->pool;
1002 get_job_storage(&store, job, run);
1003 sp->store = store.store;
1004 Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type);
1005 sched.binary_insert_multiple(sp, my_compare);
1008 } /* end for loop over resources */
1010 foreach_dlist(sp, &sched) {
1011 prt_runtime(ua, sp, &ow);
1013 if (num_jobs == 0 && !ua->api) {
1014 ua->send_msg(_("No Scheduled Jobs.\n"));
1016 if (!ua->api) ua->send_msg("====\n");
1017 Dmsg0(200, "Leave list_sched_jobs_runs()\n");
1020 static void list_running_jobs(UAContext *ua)
1026 const char *msg, *msgdir;
1027 char *emsg; /* edited message */
1028 char dt[MAX_TIME_LENGTH];
1030 bool pool_mem = false;
1031 OutputWriter ow(ua->api_opts);
1034 if ((i = find_arg_with_value(ua, "jobid")) >= 0) {
1035 jid = str_to_int64(ua->argv[i]);
1038 Dmsg0(200, "enter list_run_jobs()\n");
1041 ua->send_msg(_("\nRunning Jobs:\n"));
1043 if (jcr->JobId == 0) { /* this is us */
1044 /* this is a console or other control job. We only show console
1045 * jobs in the status output.
1047 if (jcr->getJobType() == JT_CONSOLE) {
1048 bstrftime_nc(dt, sizeof(dt), jcr->start_time);
1049 ua->send_msg(_("Console connected %sat %s\n"),
1050 (ua->UA_sock && ua->UA_sock->tls)?_("using TLS "):"",
1059 njobs = 0; /* count the number of job really displayed */
1061 if (jcr->JobId == 0 || !jcr->job || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
1064 /* JobId keyword found in command line */
1065 if (jid > 0 && jcr->JobId != jid) {
1070 /* display the header for the first job */
1072 ua->send_msg(_(" JobId Type Level Files Bytes Name Status\n"));
1073 ua->send_msg(_("======================================================================\n"));
1075 } else if (ua->api > 1) {
1076 ua->send_msg(ow.start_group("running", false));
1079 status = jcr->JobStatus;
1082 msg = _("is waiting execution");
1085 msg = _("is running");
1088 msg = _("is blocked");
1091 msg = _("has terminated");
1094 msg = _("has terminated with warnings");
1097 msg = _("has terminated in incomplete state");
1099 case JS_ErrorTerminated:
1100 msg = _("has erred");
1103 msg = _("has errors");
1106 msg = _("has a fatal error");
1108 case JS_Differences:
1109 msg = _("has verify differences");
1112 msg = _("has been canceled");
1115 emsg = (char *) get_pool_memory(PM_FNAME);
1117 Mmsg(emsg, _("is waiting on Client"));
1119 Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
1125 emsg = (char *) get_pool_memory(PM_FNAME);
1127 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->wstore->name());
1128 } else if (jcr->rstore) {
1129 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->rstore->name());
1131 Mmsg(emsg, _("is waiting on Storage"));
1136 case JS_WaitStoreRes:
1137 msg = _("is waiting on max Storage jobs");
1139 case JS_WaitClientRes:
1140 msg = _("is waiting on max Client jobs");
1143 msg = _("is waiting on max Job jobs");
1145 case JS_WaitMaxJobs:
1146 msg = _("is waiting on max total jobs");
1148 case JS_WaitStartTime:
1149 emsg = (char *) get_pool_memory(PM_FNAME);
1150 Mmsg(emsg, _("is waiting for its start time (%s)"),
1151 bstrftime_ny(dt, sizeof(dt), jcr->sched_time));
1155 case JS_WaitPriority:
1156 msg = _("is waiting for higher priority jobs to finish");
1159 msg = _("is waiting for a Shared Storage device");
1161 case JS_DataCommitting:
1162 msg = _("SD committing Data");
1164 case JS_DataDespooling:
1165 msg = _("SD despooling Data");
1167 case JS_AttrDespooling:
1168 msg = _("SD despooling Attributes");
1170 case JS_AttrInserting:
1171 msg = _("Dir inserting Attributes");
1175 emsg = (char *)get_pool_memory(PM_FNAME);
1176 Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus);
1181 msgdir = msg; /* Keep it to know if we update the status variable */
1183 * Now report Storage daemon status code
1185 switch (jcr->SDJobStatus) {
1188 free_pool_memory(emsg);
1191 msg = _("is waiting for a mount request");
1195 free_pool_memory(emsg);
1198 msg = _("is waiting for an appendable Volume");
1201 /* Special case when JobStatus=JS_WaitFD, we don't have a FD link yet
1202 * we need to stay in WaitFD status See bee mantis #1414 */
1203 if (jcr->JobStatus != JS_WaitFD) {
1205 emsg = (char *)get_pool_memory(PM_FNAME);
1208 if (!jcr->client || !jcr->wstore) {
1209 Mmsg(emsg, _("is waiting for Client to connect to Storage daemon"));
1211 Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"),
1212 jcr->client->name(), jcr->wstore->name());
1217 case JS_DataCommitting:
1218 msg = _("SD committing Data");
1220 case JS_DataDespooling:
1221 msg = _("SD despooling Data");
1223 case JS_AttrDespooling:
1224 msg = _("SD despooling Attributes");
1226 case JS_AttrInserting:
1227 msg = _("Dir inserting Attributes");
1230 if (msg != msgdir) {
1231 status = jcr->SDJobStatus;
1233 switch (jcr->getJobType()) {
1235 bstrncpy(level, "Admin", sizeof(level));
1238 bstrncpy(level, "Restore", sizeof(level));
1241 bstrncpy(level, level_to_str(jcr->getJobLevel()), sizeof(level));
1247 bash_spaces(jcr->comment);
1248 ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\t%s\n"),
1249 jcr->JobId, level, jcr->Job, msg, jcr->comment);
1250 unbash_spaces(jcr->comment);
1252 } else if (ua->api > 1) {
1253 ua->send_msg("%s", ow.get_output(OT_CLEAR,
1255 OT_INT32, "jobid", jcr->JobId,
1256 OT_JOBLEVEL,"level", jcr->getJobLevel(),
1257 OT_JOBTYPE, "type", jcr->getJobType(),
1258 OT_JOBSTATUS,"status", status,
1259 OT_STRING, "status_desc",msg,
1260 OT_STRING, "comment", jcr->comment,
1261 OT_SIZE, "jobbytes", jcr->JobBytes,
1262 OT_INT32, "jobfiles", jcr->JobFiles,
1263 OT_STRING, "job", jcr->Job,
1264 OT_STRING, "name", jcr->job->name(),
1265 OT_STRING, "clientname",jcr->client?jcr->client->name():"",
1266 OT_STRING, "fileset", jcr->fileset?jcr->fileset->name():"",
1267 OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"",
1268 OT_STRING, "rstorage", jcr->rstore?jcr->rstore->name():"",
1269 OT_UTIME, "schedtime", jcr->sched_time,
1270 OT_UTIME, "starttime", jcr->start_time,
1271 OT_INT32, "priority", jcr->JobPriority,
1272 OT_INT32, "errors", jcr->JobErrors,
1277 char b1[50], b2[50], b3[50];
1279 bstrncpy(b1, job_type_to_str(jcr->getJobType()), sizeof(b1));
1281 ua->send_msg(_("%6d %-4s %-3s %10s %10s %-17s %s\n"),
1282 jcr->JobId, b1, level,
1283 edit_uint64_with_commas(jcr->JobFiles, b2),
1284 edit_uint64_with_suffix(jcr->JobBytes, b3),
1285 jcr->job->name(), msg);
1289 free_pool_memory(emsg);
1296 /* Note the following message is used in regress -- don't change */
1297 ua->send_msg(_("No Jobs running.\n====\n"));
1298 Dmsg0(200, "leave list_run_jobs()\n");
1301 /* display a closing header */
1303 ua->send_msg("====\n");
1304 } else if (ua->api > 1) {
1305 ua->send_msg(ow.end_group(false));
1308 Dmsg0(200, "leave list_run_jobs()\n");
1311 static void list_terminated_jobs(UAContext *ua)
1313 char dt[MAX_TIME_LENGTH], b1[30], b2[30];
1315 OutputWriter ow(ua->api_opts);
1317 if (last_jobs->empty()) {
1318 if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n"));
1321 lock_last_jobs_list();
1322 struct s_last_job *je;
1324 ua->send_msg(_("\nTerminated Jobs:\n"));
1325 ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n"));
1326 ua->send_msg(_("====================================================================\n"));
1327 } else if (ua->api > 1) {
1328 ua->send_msg(ow.start_group("terminated"));
1330 foreach_dlist(je, last_jobs) {
1331 char JobName[MAX_NAME_LENGTH];
1332 const char *termstat;
1334 bstrncpy(JobName, je->Job, sizeof(JobName));
1335 /* There are three periods after the Job name */
1337 for (int i=0; i<3; i++) {
1338 if ((p=strrchr(JobName, '.')) != NULL) {
1343 if (!acl_access_ok(ua, Job_ACL, JobName)) {
1347 bstrftime_nc(dt, sizeof(dt), je->end_time);
1348 switch (je->JobType) {
1350 bstrncpy(level, "Admin", sizeof(level));
1353 bstrncpy(level, "Restore", sizeof(level));
1356 bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
1360 switch (je->JobStatus) {
1362 termstat = _("Created");
1365 case JS_ErrorTerminated:
1366 termstat = _("Error");
1368 case JS_Differences:
1369 termstat = _("Diffs");
1372 termstat = _("Cancel");
1378 termstat = _("OK -- with warnings");
1381 termstat = _("Incomplete");
1384 termstat = _("Other");
1388 ua->send_msg(_("%7d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"),
1391 edit_uint64_with_commas(je->JobFiles, b1),
1392 edit_uint64_with_suffix(je->JobBytes, b2),
1395 } else if (ua->api > 1) {
1397 ow.get_output(OT_CLEAR,
1399 OT_INT32, "jobid", je->JobId,
1400 OT_JOBLEVEL,"level", je->JobLevel,
1401 OT_JOBTYPE, "type", je->JobType,
1402 OT_JOBSTATUS,"status", je->JobStatus,
1403 OT_STRING, "status_desc",termstat,
1404 OT_SIZE, "jobbytes", je->JobBytes,
1405 OT_INT32, "jobfiles", je->JobFiles,
1406 OT_STRING, "job", je->Job,
1407 OT_UTIME, "starttime", je->start_time,
1408 OT_UTIME, "endtime", je->end_time,
1409 OT_INT32, "errors", je->Errors,
1414 ua->send_msg(_("%6d %-7s %8s %10s %-7s %-8s %s\n"),
1417 edit_uint64_with_commas(je->JobFiles, b1),
1418 edit_uint64_with_suffix(je->JobBytes, b2),
1424 ua->send_msg(_("\n"));
1425 } else if (ua->api > 1) {
1426 ua->send_msg(ow.end_group(false));
1428 unlock_last_jobs_list();