2 Bacula(R) - The Network Backup Solution
4 Copyright (C) 2000-2017 Kern Sibbald
6 The original author of Bacula is Kern Sibbald, with contributions
7 from many others, a complete list can be found in the file AUTHORS.
9 You may use this file and others of this release according to the
10 license defined in the LICENSE file, which includes the Affero General
11 Public License, v3.0 ("AGPLv3") and some additional permissions and
12 terms pursuant to its AGPLv3 Section 7.
14 This notice must be preserved when any source code is
15 conveyed and/or propagated.
17 Bacula(R) is a registered trademark of Kern Sibbald.
20 * Bacula Director -- User Agent Status Command
22 * Kern Sibbald, August MMI
29 extern void *start_heap;
30 extern utime_t last_reload_time;
32 static void list_scheduled_jobs(UAContext *ua);
33 static void llist_scheduled_jobs(UAContext *ua);
34 static void list_running_jobs(UAContext *ua);
35 static void list_terminated_jobs(UAContext *ua);
36 static void do_storage_status(UAContext *ua, STORE *store, char *cmd);
37 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd);
38 static void do_director_status(UAContext *ua);
39 static void do_all_status(UAContext *ua);
40 void status_slots(UAContext *ua, STORE *store);
41 void status_content(UAContext *ua, STORE *store);
43 static char OKqstatus[] = "1000 OK .status\n";
44 static char DotStatusJob[] = "JobId=%s JobStatus=%c JobErrors=%d\n";
50 bool dot_status_cmd(UAContext *ua, const char *cmd)
58 Dmsg2(20, "status=\"%s\" argc=%d\n", cmd, ua->argc);
61 ua->send_msg("1900 Bad .status command, missing arguments.\n");
65 if (strcasecmp(ua->argk[1], "dir") == 0) {
66 if (strcasecmp(ua->argk[2], "current") == 0) {
67 ua->send_msg(OKqstatus, ua->argk[2]);
69 if (!njcr->is_internal_job() && acl_access_ok(ua, Job_ACL, njcr->job->name())) {
70 ua->send_msg(DotStatusJob, edit_int64(njcr->JobId, ed1),
71 njcr->JobStatus, njcr->JobErrors);
75 } else if (strcasecmp(ua->argk[2], "last") == 0) {
76 ua->send_msg(OKqstatus, ua->argk[2]);
77 if ((last_jobs) && (last_jobs->size() > 0)) {
78 job = (s_last_job*)last_jobs->last();
79 if (acl_access_ok(ua, Job_ACL, job->Job)) {
80 ua->send_msg(DotStatusJob, edit_int64(job->JobId, ed1),
81 job->JobStatus, job->Errors);
84 } else if (strcasecmp(ua->argk[2], "header") == 0) {
85 list_dir_status_header(ua);
86 } else if (strcasecmp(ua->argk[2], "scheduled") == 0) {
87 list_scheduled_jobs(ua);
88 } else if (strcasecmp(ua->argk[2], "running") == 0) {
89 list_running_jobs(ua);
90 } else if (strcasecmp(ua->argk[2], "terminated") == 0) {
91 list_terminated_jobs(ua);
93 ua->send_msg("1900 Bad .status command, wrong argument.\n");
96 } else if (strcasecmp(ua->argk[1], "client") == 0) {
97 client = get_client_resource(ua, JT_BACKUP_RESTORE);
99 Dmsg2(200, "Client=%s arg=%s\n", client->name(), NPRT(ua->argk[2]));
100 do_client_status(ua, client, ua->argk[2]);
102 } else if (strcasecmp(ua->argk[1], "storage") == 0) {
103 store = get_storage_resource(ua, false /*no default*/, true/*unique*/);
105 ua->send_msg("1900 Bad .status command, wrong argument.\n");
108 do_storage_status(ua, store, ua->argk[2]);
110 ua->send_msg("1900 Bad .status command, wrong argument.\n");
117 /* Test the network between FD and SD */
118 static int do_network_status(UAContext *ua)
120 CLIENT *client = NULL;
123 char *store_address, ed1[50];
125 uint64_t nb = 50 * 1024 * 1024;
128 int i = find_arg_with_value(ua, "bytes");
130 if (!size_to_uint64(ua->argv[i], strlen(ua->argv[i]), &nb)) {
135 client = get_client_resource(ua, JT_BACKUP_RESTORE);
140 store.store = get_storage_resource(ua, false, true);
145 jcr->client = client;
146 set_wstorage(jcr, &store);
149 ua->send_msg(_("Connecting to Storage %s at %s:%d\n"),
150 store.store->name(), store.store->address, store.store->SDport);
153 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
154 ua->error_msg(_("Failed to connect to Storage.\n"));
158 if (!start_storage_daemon_job(jcr, NULL, NULL)) {
163 * Note startup sequence of SD/FD is different depending on
164 * whether the SD listens (normal) or the SD calls the FD.
166 if (!client->sd_calls_client) {
167 if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) {
170 } /* Else it's done in init_storage_job() */
173 ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
174 client->name(), client->address(buf.addr()), client->FDport);
177 if (!connect_to_file_daemon(jcr, 1, 15, 0)) {
178 ua->error_msg(_("Failed to connect to Client.\n"));
182 if (jcr->sd_calls_client) {
184 * SD must call "client" i.e. FD
186 if (jcr->FDVersion < 10) {
187 Jmsg(jcr, M_FATAL, 0, _("The File daemon does not support SDCallsClient.\n"));
190 if (!send_client_addr_to_sd(jcr)) {
193 if (!run_storage_and_start_message_thread(jcr, jcr->store_bsock)) {
197 store_address = store.store->address; /* dummy */
198 store_port = 0; /* flag that SD calls FD */
202 * send Storage daemon address to the File daemon,
203 * then wait for File daemon to make connection
204 * with Storage daemon.
206 if (store.store->SDDport == 0) {
207 store.store->SDDport = store.store->SDport;
210 store_address = get_storage_address(jcr->client, store.store);
211 store_port = store.store->SDDport;
214 if (!send_store_addr_to_fd(jcr, store.store, store_address, store_port)) {
219 ua->info_msg(_("Running network test between Client=%s and Storage=%s with %sB ...\n"),
220 client->name(), store.store->name(), edit_uint64_with_suffix(nb, ed1));
223 if (!jcr->file_bsock->fsend("testnetwork bytes=%lld\n", nb)) {
227 while (jcr->file_bsock->recv() > 0) {
228 ua->info_msg(jcr->file_bsock->msg);
232 jcr->file_bsock->signal(BNET_TERMINATE);
233 jcr->store_bsock->signal(BNET_TERMINATE);
234 wait_for_storage_daemon_termination(jcr);
236 free_bsock(jcr->file_bsock);
237 free_bsock(jcr->store_bsock);
244 /* This is the *old* command handler, so we must return
245 * 1 or it closes the connection
247 int qstatus_cmd(UAContext *ua, const char *cmd)
249 dot_status_cmd(ua, cmd);
256 int status_cmd(UAContext *ua, const char *cmd)
262 Dmsg1(20, "status:%s:\n", cmd);
264 for (i=1; i<ua->argc; i++) {
265 if (strcasecmp(ua->argk[i], NT_("network")) == 0) {
266 do_network_status(ua);
268 } else if (strcasecmp(ua->argk[i], NT_("schedule")) == 0 ||
269 strcasecmp(ua->argk[i], NT_("scheduled")) == 0) {
270 llist_scheduled_jobs(ua);
272 } else if (strcasecmp(ua->argk[i], NT_("all")) == 0) {
275 } else if (strcasecmp(ua->argk[i], NT_("dir")) == 0 ||
276 strcasecmp(ua->argk[i], NT_("director")) == 0) {
277 do_director_status(ua);
279 } else if (strcasecmp(ua->argk[i], NT_("client")) == 0) {
280 client = get_client_resource(ua, JT_BACKUP_RESTORE);
282 do_client_status(ua, client, NULL);
286 store = get_storage_resource(ua, false/*no default*/, true/*unique*/);
288 if (find_arg(ua, NT_("slots")) > 0) {
289 status_slots(ua, store);
291 do_storage_status(ua, store, NULL);
297 /* If no args, ask for status type */
299 char prmt[MAX_NAME_LENGTH];
301 start_prompt(ua, _("Status available for:\n"));
302 add_prompt(ua, NT_("Director"));
303 add_prompt(ua, NT_("Storage"));
304 add_prompt(ua, NT_("Client"));
305 add_prompt(ua, NT_("Scheduled"));
306 add_prompt(ua, NT_("Network"));
307 add_prompt(ua, NT_("All"));
308 Dmsg0(20, "do_prompt: select daemon\n");
309 if ((item=do_prompt(ua, "", _("Select daemon type for status"), prmt, sizeof(prmt))) < 0) {
312 Dmsg1(20, "item=%d\n", item);
314 case 0: /* Director */
315 do_director_status(ua);
318 store = select_storage_resource(ua, true/*unique*/);
320 do_storage_status(ua, store, NULL);
324 client = select_client_resource(ua, JT_BACKUP_RESTORE);
326 do_client_status(ua, client, NULL);
330 llist_scheduled_jobs(ua);
333 do_network_status(ua);
345 static void do_all_status(UAContext *ua)
347 STORE *store, **unique_store;
348 CLIENT *client, **unique_client;
353 do_director_status(ua);
355 /* Count Storage items */
358 foreach_res(store, R_STORAGE) {
361 unique_store = (STORE **) malloc(i * sizeof(STORE));
362 /* Find Unique Storage address/port */
364 foreach_res(store, R_STORAGE) {
366 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
369 for (j=0; j<i; j++) {
370 if (strcmp(unique_store[j]->address, store->address) == 0 &&
371 unique_store[j]->SDport == store->SDport) {
377 unique_store[i++] = store;
378 Dmsg2(40, "Stuffing: %s:%d\n", store->address, store->SDport);
383 /* Call each unique Storage daemon */
384 for (j=0; j<i; j++) {
385 do_storage_status(ua, unique_store[j], NULL);
389 /* Count Client items */
392 foreach_res(client, R_CLIENT) {
395 unique_client = (CLIENT **)malloc(i * sizeof(CLIENT));
396 /* Find Unique Client address/port */
398 foreach_res(client, R_CLIENT) {
400 if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) {
403 for (j=0; j<i; j++) {
404 if (strcmp(unique_client[j]->address(buf1.addr()), client->address(buf2.addr())) == 0 &&
405 unique_client[j]->FDport == client->FDport) {
411 unique_client[i++] = client;
412 Dmsg2(40, "Stuffing: %s:%d\n", client->address(buf1.addr()), client->FDport);
417 /* Call each unique File daemon */
418 for (j=0; j<i; j++) {
419 do_client_status(ua, unique_client[j], NULL);
425 static void api_list_dir_status_header(UAContext *ua)
427 OutputWriter wt(ua->api_opts);
428 wt.start_group("header");
430 OT_STRING, "name", my_name,
431 OT_STRING, "version", VERSION " (" BDATE ")",
432 OT_STRING, "uname", HOST_OS " " DISTNAME " " DISTVER,
433 OT_UTIME, "started", daemon_start_time,
434 OT_UTIME, "reloaded", last_reload_time,
435 OT_INT, "jobs_run", num_jobs_run,
436 OT_INT, "jobs_running",job_count(),
437 OT_INT, "nclients", ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(),
438 OT_INT, "nstores", ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(),
439 OT_INT, "npools", ((rblist *)res_head[R_POOL-r_first]->res_list)->size(),
440 OT_INT, "ncats", ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(),
441 OT_INT, "nfset", ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(),
442 OT_INT, "nscheds", ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size(),
443 OT_PLUGINS,"plugins", b_plugin_list,
446 ua->send_msg("%s", wt.end_group());
449 void list_dir_status_header(UAContext *ua)
451 char dt[MAX_TIME_LENGTH], dt1[MAX_TIME_LENGTH];
452 char b1[35], b2[35], b3[35], b4[35], b5[35];
455 api_list_dir_status_header(ua);
459 ua->send_msg(_("%s %sVersion: %s (%s) %s %s %s\n"), my_name,
460 "", VERSION, BDATE, HOST_OS, DISTNAME, DISTVER);
461 bstrftime_nc(dt, sizeof(dt), daemon_start_time);
462 bstrftimes(dt1, sizeof(dt1), last_reload_time);
463 ua->send_msg(_("Daemon started %s, conf reloaded %s\n"), dt, dt1);
464 ua->send_msg(_(" Jobs: run=%d, running=%d mode=%d,%d\n"),
465 num_jobs_run, job_count(), (int)DEVELOPER_MODE, 0);
466 ua->send_msg(_(" Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n"),
467 edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
468 edit_uint64_with_commas(sm_bytes, b2),
469 edit_uint64_with_commas(sm_max_bytes, b3),
470 edit_uint64_with_commas(sm_buffers, b4),
471 edit_uint64_with_commas(sm_max_buffers, b5));
472 ua->send_msg(_(" Res: njobs=%d nclients=%d nstores=%d npools=%d ncats=%d"
473 " nfsets=%d nscheds=%d\n"),
474 ((rblist *)res_head[R_JOB-r_first]->res_list)->size(),
475 ((rblist *)res_head[R_CLIENT-r_first]->res_list)->size(),
476 ((rblist *)res_head[R_STORAGE-r_first]->res_list)->size(),
477 ((rblist *)res_head[R_POOL-r_first]->res_list)->size(),
478 ((rblist *)res_head[R_CATALOG-r_first]->res_list)->size(),
479 ((rblist *)res_head[R_FILESET-r_first]->res_list)->size(),
480 ((rblist *)res_head[R_SCHEDULE-r_first]->res_list)->size());
483 /* TODO: use this function once for all daemons */
484 if (b_plugin_list && b_plugin_list->size() > 0) {
487 POOL_MEM msg(PM_FNAME);
488 pm_strcpy(msg, " Plugin: ");
489 foreach_alist(plugin, b_plugin_list) {
490 len = pm_strcat(msg, plugin->file);
492 pm_strcat(msg, "\n ");
497 ua->send_msg("%s\n", msg.c_str());
501 static void do_director_status(UAContext *ua)
503 list_dir_status_header(ua);
506 * List scheduled Jobs
508 list_scheduled_jobs(ua);
513 list_running_jobs(ua);
516 * List terminated jobs
518 list_terminated_jobs(ua);
519 ua->send_msg("====\n");
522 static void do_storage_status(UAContext *ua, STORE *store, char *cmd)
528 if (!acl_access_ok(ua, Storage_ACL, store->name())) {
529 ua->error_msg(_("No authorization for Storage \"%s\"\n"), store->name());
533 * The Storage daemon is problematic because it shows information
534 * related to multiple Job, so if there is a Client or Job
535 * ACL restriction, we forbid all access to the Storage.
537 if (have_restricted_acl(ua, Client_ACL) ||
538 have_restricted_acl(ua, Job_ACL)) {
539 ua->error_msg(_("Restricted Client or Job does not permit access to Storage daemons\n"));
542 lstore.store = store;
543 pm_strcpy(lstore.store_source, _("unknown source"));
544 set_wstorage(ua->jcr, &lstore);
545 /* Try connecting for up to 15 seconds */
546 if (!ua->api) ua->send_msg(_("Connecting to Storage daemon %s at %s:%d\n"),
547 store->name(), store->address, store->SDport);
548 if (!connect_to_storage_daemon(ua->jcr, 1, 15, 0)) {
549 ua->send_msg(_("\nFailed to connect to Storage daemon %s.\n====\n"),
551 free_bsock(ua->jcr->store_bsock);
554 Dmsg0(20, "Connected to storage daemon\n");
555 sd = ua->jcr->store_bsock;
559 * For .status storage=xxx shstore list
560 * send .status shstore list xxx-device
562 if (strcasecmp(cmd, "shstore") == 0) {
564 ua->send_msg(_("Must have three arguments\n"));
567 pm_strcpy(devname, store->dev_name());
568 bash_spaces(devname.c_str());
569 sd->fsend(".status %s %s %s api=%d api_opts=%s",
570 cmd, ua->argk[3], devname.c_str(),
571 ua->api, ua->api_opts);
573 int i = find_arg_with_value(ua, "device");
575 Mmsg(devname, "device=%s", ua->argv[i]);
576 bash_spaces(devname.c_str());
578 sd->fsend(".status %s api=%d api_opts=%s %s",
579 cmd, ua->api, ua->api_opts, devname.c_str());
584 while (sd->recv() >= 0) {
585 ua->send_msg("%s", sd->msg);
587 sd->signal(BNET_TERMINATE);
588 free_bsock(ua->jcr->store_bsock);
592 static void do_client_status(UAContext *ua, CLIENT *client, char *cmd)
597 if (!acl_access_client_ok(ua, client->name(), JT_BACKUP_RESTORE)) {
598 ua->error_msg(_("No authorization for Client \"%s\"\n"), client->name());
601 /* Connect to File daemon */
602 ua->jcr->client = client;
603 /* Release any old dummy key */
604 if (ua->jcr->sd_auth_key) {
605 free(ua->jcr->sd_auth_key);
607 /* Create a new dummy SD auth key */
608 ua->jcr->sd_auth_key = bstrdup("dummy");
610 /* Try to connect for 15 seconds */
611 if (!ua->api) ua->send_msg(_("Connecting to Client %s at %s:%d\n"),
612 client->name(), client->address(buf.addr()), client->FDport);
613 if (!connect_to_file_daemon(ua->jcr, 1, 15, 0)) {
614 ua->send_msg(_("Failed to connect to Client %s.\n====\n"),
616 free_bsock(ua->jcr->file_bsock);
619 Dmsg0(20, _("Connected to file daemon\n"));
620 fd = ua->jcr->file_bsock;
622 fd->fsend(".status %s api=%d api_opts=%s", cmd, ua->api, ua->api_opts);
626 while (fd->recv() >= 0) {
627 ua->send_msg("%s", fd->msg);
629 fd->signal(BNET_TERMINATE);
630 free_bsock(ua->jcr->file_bsock);
635 static void prt_runhdr(UAContext *ua)
638 ua->send_msg(_("\nScheduled Jobs:\n"));
639 ua->send_msg(_("Level Type Pri Scheduled Job Name Volume\n"));
640 ua->send_msg(_("===================================================================================\n"));
644 static void prt_lrunhdr(UAContext *ua)
647 ua->send_msg(_("\nScheduled Jobs:\n"));
648 ua->send_msg(_("Level Type Pri Scheduled Job Name Schedule\n"));
649 ua->send_msg(_("=====================================================================================\n"));
654 /* Scheduling packet */
656 dlink link; /* keep this as first item!!! */
665 static void prt_runtime(UAContext *ua, sched_pkt *sp, OutputWriter *ow)
667 char dt[MAX_TIME_LENGTH];
668 const char *level_ptr;
670 bool close_db = false;
675 orig_jobtype = jcr->getJobType();
676 if (sp->job->JobType == JT_BACKUP) {
678 ok = complete_jcr_for_job(jcr, sp->job, sp->pool);
679 Dmsg1(250, "Using pool=%s\n", jcr->pool->name());
681 close_db = true; /* new db opened, remember to close it */
684 mr.PoolId = jcr->jr.PoolId;
685 jcr->wstore = sp->store;
686 set_storageid_in_mr(jcr->wstore, &mr);
687 Dmsg0(250, "call find_next_volume_for_append\n");
688 /* no need to set ScratchPoolId, since we use fnv_no_create_vol */
689 ok = find_next_volume_for_append(jcr, &mr, 1, fnv_no_create_vol, fnv_no_prune);
692 bstrncpy(mr.VolumeName, "*unknown*", sizeof(mr.VolumeName));
695 bstrftime_nc(dt, sizeof(dt), sp->runtime);
696 switch (sp->job->JobType) {
701 level_ptr = "Restore";
704 level_ptr = level_to_str(sp->level);
708 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
709 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
710 sp->job->name(), mr.VolumeName);
712 } else if (ua->api > 1) {
714 ow->get_output(OT_CLEAR,
716 OT_STRING, "name", sp->job->name(),
717 OT_JOBLEVEL, "level", sp->level,
718 OT_JOBTYPE, "type", sp->job->JobType,
719 OT_INT, "priority",sp->priority,
720 OT_UTIME, "schedtime", sp->runtime,
721 OT_STRING, "volume", mr.VolumeName,
722 OT_STRING, "pool", jcr->pool?jcr->pool->name():"",
723 OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"",
729 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
730 level_ptr, job_type_to_str(sp->job->JobType), sp->priority, dt,
731 sp->job->name(), mr.VolumeName);
734 db_close_database(jcr, jcr->db);
736 jcr->db = ua->db; /* restore ua db to jcr */
737 jcr->setJobType(orig_jobtype);
741 * Detailed listing of all scheduler jobs
743 static void llist_scheduled_jobs(UAContext *ua)
748 int level, num_jobs = 0;
750 bool hdr_printed = false;
751 char sched_name[MAX_NAME_LENGTH];
752 char job_name[MAX_NAME_LENGTH];
755 time_t now = time(NULL);
757 const char *level_ptr;
759 Dmsg0(200, "enter list_sched_jobs()\n");
761 i = find_arg_with_value(ua, NT_("days"));
763 days = atoi(ua->argv[i]);
764 if (((days < 0) || (days > 3000)) && !ua->api) {
765 ua->send_msg(_("Ignoring invalid value for days. Max is 3000.\n"));
772 i = find_arg_with_value(ua, NT_("limit"));
774 limit = atoi(ua->argv[i]);
775 if (((limit < 0) || (limit > 2000)) && !ua->api) {
776 ua->send_msg(_("Ignoring invalid value for limit. Max is 2000.\n"));
783 i = find_arg_with_value(ua, NT_("time"));
785 now = str_to_utime(ua->argv[i]);
787 ua->send_msg(_("Ignoring invalid time.\n"));
792 i = find_arg_with_value(ua, NT_("schedule"));
794 bstrncpy(sched_name, ua->argv[i], sizeof(sched_name));
799 i = find_arg_with_value(ua, NT_("job"));
801 bstrncpy(job_name, ua->argv[i], sizeof(job_name));
806 /* Loop through all jobs */
808 foreach_res(job, R_JOB) {
809 sched = job->schedule;
810 if (!sched || !job->is_enabled() || (sched && !sched->is_enabled()) ||
811 (job->client && !job->client->is_enabled())) {
812 continue; /* no, skip this job */
814 if (job_name[0] && strcmp(job_name, job->name()) != 0) {
817 for (run=sched->run; run; run=run->next) {
819 for (i=0; i<days; i++) {
821 int mday, wday, month, wom, woy, ldom;
822 char dt[MAX_TIME_LENGTH];
825 /* compute values for next time */
826 (void)localtime_r(&next, &tm);
827 mday = tm.tm_mday - 1;
831 woy = tm_woy(next); /* get week of year */
832 ldom = tm_ldom(month, tm.tm_year + 1900);
836 Dmsg6(000, "m=%d md=%d wd=%d wom=%d woy=%d ldom=%d\n",
837 month, mday, wday, wom, woy, ldom);
838 Dmsg6(000, "bitset bsm=%d bsmd=%d bswd=%d bswom=%d bswoy=%d bsldom=%d\n",
839 bit_is_set(month, run->month),
840 bit_is_set(mday, run->mday),
841 bit_is_set(wday, run->wday),
842 bit_is_set(wom, run->wom),
843 bit_is_set(woy, run->woy),
844 bit_is_set(31, run->mday));
847 ok = (bit_is_set(mday, run->mday) &&
848 bit_is_set(wday, run->wday) &&
849 bit_is_set(month, run->month) &&
850 bit_is_set(wom, run->wom) &&
851 bit_is_set(woy, run->woy)) ||
852 (bit_is_set(month, run->month) &&
853 bit_is_set(31, run->mday) && mday == ldom);
855 next += 24 * 60 * 60; /* Add one day */
859 level = job->JobLevel;
863 switch (job->JobType) {
868 level_ptr = "Restore";
871 level_ptr = level_to_str(level);
874 priority = job->Priority;
876 priority = run->Priority;
883 for (int j=0; j < 24; j++) {
884 if (bit_is_set(j, run->hour)) {
886 tm.tm_min = run->minute;
888 runtime = mktime(&tm);
889 bstrftime_dn(dt, sizeof(dt), runtime);
891 ua->send_msg(_("%-14s\t%-8s\t%3d\t%-18s\t%-18s\t%s\n"),
892 level_ptr, job_type_to_str(job->JobType), priority, dt,
893 job->name(), sched->name());
895 ua->send_msg(_("%-14s %-8s %3d %-18s %-18s %s\n"),
896 level_ptr, job_type_to_str(job->JobType), priority, dt,
897 job->name(), sched->name());
901 next += 24 * 60 * 60; /* Add one day */
903 if (num_jobs >= limit) {
907 } /* end loop over run pkts */
908 } /* end for loop over resources */
911 if (num_jobs == 0 && !ua->api) {
912 ua->send_msg(_("No Scheduled Jobs.\n"));
914 if (!ua->api) ua->send_msg("====\n");
915 Dmsg0(200, "Leave ;list_sched_jobs_runs()\n");
920 * Sort items by runtime, priority
922 static int my_compare(void *item1, void *item2)
924 sched_pkt *p1 = (sched_pkt *)item1;
925 sched_pkt *p2 = (sched_pkt *)item2;
926 if (p1->runtime < p2->runtime) {
928 } else if (p1->runtime > p2->runtime) {
931 if (p1->priority < p2->priority) {
933 } else if (p1->priority > p2->priority) {
940 * Find all jobs to be run in roughly the
943 static void list_scheduled_jobs(UAContext *ua)
945 OutputWriter ow(ua->api_opts);
949 int level, num_jobs = 0;
951 bool hdr_printed = false;
952 char sched_name[MAX_NAME_LENGTH];
957 Dmsg0(200, "enter list_sched_jobs()\n");
960 i = find_arg_with_value(ua, NT_("days"));
962 days = atoi(ua->argv[i]);
963 if (((days < 0) || (days > 500)) && !ua->api) {
964 ua->send_msg(_("Ignoring invalid value for days. Max is 500.\n"));
968 i = find_arg_with_value(ua, NT_("schedule"));
970 bstrncpy(sched_name, ua->argv[i], sizeof(sched_name));
975 /* Loop through all jobs */
977 foreach_res(job, R_JOB) {
978 if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->is_enabled()) {
981 if (sched_name[0] && job->schedule &&
982 strcasecmp(job->schedule->name(), sched_name) != 0) {
985 for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
987 level = job->JobLevel;
991 priority = job->Priority;
993 priority = run->Priority;
999 sp = (sched_pkt *)malloc(sizeof(sched_pkt));
1002 sp->priority = priority;
1003 sp->runtime = runtime;
1004 sp->pool = run->pool;
1005 get_job_storage(&store, job, run);
1006 sp->store = store.store;
1007 Dmsg3(250, "job=%s store=%s MediaType=%s\n", job->name(), sp->store->name(), sp->store->media_type);
1008 sched.binary_insert_multiple(sp, my_compare);
1011 } /* end for loop over resources */
1013 foreach_dlist(sp, &sched) {
1014 prt_runtime(ua, sp, &ow);
1016 if (num_jobs == 0 && !ua->api) {
1017 ua->send_msg(_("No Scheduled Jobs.\n"));
1019 if (!ua->api) ua->send_msg("====\n");
1020 Dmsg0(200, "Leave list_sched_jobs_runs()\n");
1023 static void list_running_jobs(UAContext *ua)
1029 const char *msg, *msgdir;
1030 char *emsg; /* edited message */
1031 char dt[MAX_TIME_LENGTH];
1033 bool pool_mem = false;
1034 OutputWriter ow(ua->api_opts);
1037 if ((i = find_arg_with_value(ua, "jobid")) >= 0) {
1038 jid = str_to_int64(ua->argv[i]);
1041 Dmsg0(200, "enter list_run_jobs()\n");
1044 ua->send_msg(_("\nRunning Jobs:\n"));
1046 if (jcr->JobId == 0) { /* this is us */
1047 /* this is a console or other control job. We only show console
1048 * jobs in the status output.
1050 if (jcr->getJobType() == JT_CONSOLE) {
1051 bstrftime_nc(dt, sizeof(dt), jcr->start_time);
1052 ua->send_msg(_("Console connected %sat %s\n"),
1053 (ua->UA_sock && ua->UA_sock->tls)?_("using TLS "):"",
1062 njobs = 0; /* count the number of job really displayed */
1064 if (jcr->JobId == 0 || !jcr->job || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
1067 /* JobId keyword found in command line */
1068 if (jid > 0 && jcr->JobId != jid) {
1073 /* display the header for the first job */
1075 ua->send_msg(_(" JobId Type Level Files Bytes Name Status\n"));
1076 ua->send_msg(_("======================================================================\n"));
1078 } else if (ua->api > 1) {
1079 ua->send_msg(ow.start_group("running", false));
1082 status = jcr->JobStatus;
1085 msg = _("is waiting execution");
1088 msg = _("is running");
1091 msg = _("is blocked");
1094 msg = _("has terminated");
1097 msg = _("has terminated with warnings");
1100 msg = _("has terminated in incomplete state");
1102 case JS_ErrorTerminated:
1103 msg = _("has erred");
1106 msg = _("has errors");
1109 msg = _("has a fatal error");
1111 case JS_Differences:
1112 msg = _("has verify differences");
1115 msg = _("has been canceled");
1118 emsg = (char *) get_pool_memory(PM_FNAME);
1120 Mmsg(emsg, _("is waiting on Client"));
1122 Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
1128 emsg = (char *) get_pool_memory(PM_FNAME);
1130 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->wstore->name());
1131 } else if (jcr->rstore) {
1132 Mmsg(emsg, _("is waiting on Storage \"%s\""), jcr->rstore->name());
1134 Mmsg(emsg, _("is waiting on Storage"));
1139 case JS_WaitStoreRes:
1140 msg = _("is waiting on max Storage jobs");
1142 case JS_WaitClientRes:
1143 msg = _("is waiting on max Client jobs");
1146 msg = _("is waiting on max Job jobs");
1148 case JS_WaitMaxJobs:
1149 msg = _("is waiting on max total jobs");
1151 case JS_WaitStartTime:
1152 emsg = (char *) get_pool_memory(PM_FNAME);
1153 Mmsg(emsg, _("is waiting for its start time (%s)"),
1154 bstrftime_ny(dt, sizeof(dt), jcr->sched_time));
1158 case JS_WaitPriority:
1159 msg = _("is waiting for higher priority jobs to finish");
1162 msg = _("is waiting for a Shared Storage device");
1164 case JS_DataCommitting:
1165 msg = _("SD committing Data");
1167 case JS_DataDespooling:
1168 msg = _("SD despooling Data");
1170 case JS_AttrDespooling:
1171 msg = _("SD despooling Attributes");
1173 case JS_AttrInserting:
1174 msg = _("Dir inserting Attributes");
1178 emsg = (char *)get_pool_memory(PM_FNAME);
1179 Mmsg(emsg, _("is in unknown state %c"), jcr->JobStatus);
1184 msgdir = msg; /* Keep it to know if we update the status variable */
1186 * Now report Storage daemon status code
1188 switch (jcr->SDJobStatus) {
1191 free_pool_memory(emsg);
1194 msg = _("is waiting for a mount request");
1198 free_pool_memory(emsg);
1201 msg = _("is waiting for an appendable Volume");
1204 /* Special case when JobStatus=JS_WaitFD, we don't have a FD link yet
1205 * we need to stay in WaitFD status See bee mantis #1414 */
1206 if (jcr->JobStatus != JS_WaitFD) {
1208 emsg = (char *)get_pool_memory(PM_FNAME);
1211 if (!jcr->client || !jcr->wstore) {
1212 Mmsg(emsg, _("is waiting for Client to connect to Storage daemon"));
1214 Mmsg(emsg, _("is waiting for Client %s to connect to Storage %s"),
1215 jcr->client->name(), jcr->wstore->name());
1220 case JS_DataCommitting:
1221 msg = _("SD committing Data");
1223 case JS_DataDespooling:
1224 msg = _("SD despooling Data");
1226 case JS_AttrDespooling:
1227 msg = _("SD despooling Attributes");
1229 case JS_AttrInserting:
1230 msg = _("Dir inserting Attributes");
1233 if (msg != msgdir) {
1234 status = jcr->SDJobStatus;
1236 switch (jcr->getJobType()) {
1238 bstrncpy(level, "Admin", sizeof(level));
1241 bstrncpy(level, "Restore", sizeof(level));
1244 bstrncpy(level, level_to_str(jcr->getJobLevel()), sizeof(level));
1250 bash_spaces(jcr->comment);
1251 ua->send_msg(_("%6d\t%-6s\t%-20s\t%s\t%s\n"),
1252 jcr->JobId, level, jcr->Job, msg, jcr->comment);
1253 unbash_spaces(jcr->comment);
1255 } else if (ua->api > 1) {
1256 ua->send_msg("%s", ow.get_output(OT_CLEAR,
1258 OT_INT32, "jobid", jcr->JobId,
1259 OT_JOBLEVEL,"level", jcr->getJobLevel(),
1260 OT_JOBTYPE, "type", jcr->getJobType(),
1261 OT_JOBSTATUS,"status", status,
1262 OT_STRING, "status_desc",msg,
1263 OT_STRING, "comment", jcr->comment,
1264 OT_SIZE, "jobbytes", jcr->JobBytes,
1265 OT_INT32, "jobfiles", jcr->JobFiles,
1266 OT_STRING, "job", jcr->Job,
1267 OT_STRING, "name", jcr->job->name(),
1268 OT_STRING, "clientname",jcr->client?jcr->client->name():"",
1269 OT_STRING, "fileset", jcr->fileset?jcr->fileset->name():"",
1270 OT_STRING, "storage", jcr->wstore?jcr->wstore->name():"",
1271 OT_STRING, "rstorage", jcr->rstore?jcr->rstore->name():"",
1272 OT_UTIME, "schedtime", jcr->sched_time,
1273 OT_UTIME, "starttime", jcr->start_time,
1274 OT_INT32, "priority", jcr->JobPriority,
1275 OT_INT32, "errors", jcr->JobErrors,
1280 char b1[50], b2[50], b3[50];
1282 bstrncpy(b1, job_type_to_str(jcr->getJobType()), sizeof(b1));
1284 ua->send_msg(_("%6d %-4s %-3s %10s %10s %-17s %s\n"),
1285 jcr->JobId, b1, level,
1286 edit_uint64_with_commas(jcr->JobFiles, b2),
1287 edit_uint64_with_suffix(jcr->JobBytes, b3),
1288 jcr->job->name(), msg);
1292 free_pool_memory(emsg);
1299 /* Note the following message is used in regress -- don't change */
1300 ua->send_msg(_("No Jobs running.\n====\n"));
1301 Dmsg0(200, "leave list_run_jobs()\n");
1304 /* display a closing header */
1306 ua->send_msg("====\n");
1307 } else if (ua->api > 1) {
1308 ua->send_msg(ow.end_group(false));
1311 Dmsg0(200, "leave list_run_jobs()\n");
1314 static void list_terminated_jobs(UAContext *ua)
1316 char dt[MAX_TIME_LENGTH], b1[30], b2[30];
1318 OutputWriter ow(ua->api_opts);
1320 if (last_jobs->empty()) {
1321 if (!ua->api) ua->send_msg(_("No Terminated Jobs.\n"));
1324 lock_last_jobs_list();
1325 struct s_last_job *je;
1327 ua->send_msg(_("\nTerminated Jobs:\n"));
1328 ua->send_msg(_(" JobId Level Files Bytes Status Finished Name \n"));
1329 ua->send_msg(_("====================================================================\n"));
1330 } else if (ua->api > 1) {
1331 ua->send_msg(ow.start_group("terminated"));
1333 foreach_dlist(je, last_jobs) {
1334 char JobName[MAX_NAME_LENGTH];
1335 const char *termstat;
1337 bstrncpy(JobName, je->Job, sizeof(JobName));
1338 /* There are three periods after the Job name */
1340 for (int i=0; i<3; i++) {
1341 if ((p=strrchr(JobName, '.')) != NULL) {
1346 if (!acl_access_ok(ua, Job_ACL, JobName)) {
1350 bstrftime_nc(dt, sizeof(dt), je->end_time);
1351 switch (je->JobType) {
1353 bstrncpy(level, "Admin", sizeof(level));
1356 bstrncpy(level, "Restore", sizeof(level));
1359 bstrncpy(level, level_to_str(je->JobLevel), sizeof(level));
1363 switch (je->JobStatus) {
1365 termstat = _("Created");
1368 case JS_ErrorTerminated:
1369 termstat = _("Error");
1371 case JS_Differences:
1372 termstat = _("Diffs");
1375 termstat = _("Cancel");
1381 termstat = _("OK -- with warnings");
1384 termstat = _("Incomplete");
1387 termstat = _("Other");
1391 ua->send_msg(_("%7d\t%-6s\t%8s\t%10s\t%-7s\t%-8s\t%s\n"),
1394 edit_uint64_with_commas(je->JobFiles, b1),
1395 edit_uint64_with_suffix(je->JobBytes, b2),
1398 } else if (ua->api > 1) {
1400 ow.get_output(OT_CLEAR,
1402 OT_INT32, "jobid", je->JobId,
1403 OT_JOBLEVEL,"level", je->JobLevel,
1404 OT_JOBTYPE, "type", je->JobType,
1405 OT_JOBSTATUS,"status", je->JobStatus,
1406 OT_STRING, "status_desc",termstat,
1407 OT_SIZE, "jobbytes", je->JobBytes,
1408 OT_INT32, "jobfiles", je->JobFiles,
1409 OT_STRING, "job", je->Job,
1410 OT_UTIME, "starttime", je->start_time,
1411 OT_UTIME, "endtime", je->end_time,
1412 OT_INT32, "errors", je->Errors,
1417 ua->send_msg(_("%6d %-7s %8s %10s %-7s %-8s %s\n"),
1420 edit_uint64_with_commas(je->JobFiles, b1),
1421 edit_uint64_with_suffix(je->JobBytes, b2),
1427 ua->send_msg(_("\n"));
1428 } else if (ua->api > 1) {
1429 ua->send_msg(ow.end_group(false));
1431 unlock_last_jobs_list();