+ STORE* store;
+ int level, num_jobs = 0;
+ int priority;
+ bool hdr_printed = false;
+ dlist sched;
+ sched_pkt *sp;
+ int days, i;
+
+ Dmsg0(200, "enter list_sched_jobs()\n");
+
+ days = 1;
+ i = find_arg_with_value(ua, NT_("days"));
+ if (i >= 0) {
+ days = atoi(ua->argv[i]);
+ if ((days < 0) || (days > 50)) {
+ bsendmsg(ua, _("Ignoring illegal value for days.\n"));
+ days = 1;
+ }
+ }
+
+ /* Loop through all jobs */
+ LockRes();
+ foreach_res(job, R_JOB) {
+ if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) {
+ continue;
+ }
+ for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) {
+ level = job->JobLevel;
+ if (run->level) {
+ level = run->level;
+ }
+ priority = job->Priority;
+ if (run->Priority) {
+ priority = run->Priority;
+ }
+ if (run->storage) {
+ store = run->storage;
+ } else {
+ store = (STORE *)job->storage->first();
+ }
+ if (!hdr_printed) {
+ prt_runhdr(ua);
+ hdr_printed = true;
+ }
+ sp = (sched_pkt *)malloc(sizeof(sched_pkt));
+ sp->job = job;
+ sp->level = level;
+ sp->priority = priority;
+ sp->runtime = runtime;
+ sp->pool = run->pool;
+ sp->store = store;
+ sched.binary_insert_multiple(sp, my_compare);
+ num_jobs++;
+ }
+ } /* end for loop over resources */
+ UnlockRes();
+ foreach_dlist(sp, &sched) {
+ prt_runtime(ua, sp);
+ }
+ if (num_jobs == 0) {
+ bsendmsg(ua, _("No Scheduled Jobs.\n"));
+ }
+ bsendmsg(ua, _("====\n"));
+ Dmsg0(200, "Leave list_sched_jobs_runs()\n");
+}
+
+static void list_running_jobs(UAContext *ua)
+{
+ JCR *jcr;
+ int njobs = 0;
+ const char *msg;
+ char *emsg; /* edited message */
+ char dt[MAX_TIME_LENGTH];
+ char level[10];
+ bool pool_mem = false;
+
+ Dmsg0(200, "enter list_run_jobs()\n");
+ bsendmsg(ua, _("\nRunning Jobs:\n"));
+ foreach_jcr(jcr) {
+ if (jcr->JobId == 0) { /* this is us */
+ /* this is a console or other control job. We only show console
+ * jobs in the status output.
+ */
+ if (jcr->JobType == JT_CONSOLE) {
+ bstrftime_nc(dt, sizeof(dt), jcr->start_time);
+ bsendmsg(ua, _("Console connected at %s\n"), dt);
+ }
+ continue;
+ }
+ njobs++;
+ }
+ endeach_jcr(jcr);
+
+ if (njobs == 0) {
+ /* Note the following message is used in regress -- don't change */
+ bsendmsg(ua, _("No Jobs running.\n====\n"));
+ Dmsg0(200, "leave list_run_jobs()\n");
+ return;
+ }
+ njobs = 0;
+ bsendmsg(ua, _(" JobId Level Name Status\n"));
+ bsendmsg(ua, _("======================================================================\n"));
+ foreach_jcr(jcr) {
+ if (jcr->JobId == 0 || !acl_access_ok(ua, Job_ACL, jcr->job->name())) {
+ continue;
+ }
+ njobs++;
+ switch (jcr->JobStatus) {
+ case JS_Created:
+ msg = _("is waiting execution");
+ break;
+ case JS_Running:
+ msg = _("is running");
+ break;
+ case JS_Blocked:
+ msg = _("is blocked");
+ break;
+ case JS_Terminated:
+ msg = _("has terminated");
+ break;
+ case JS_ErrorTerminated:
+ msg = _("has erred");
+ break;
+ case JS_Error:
+ msg = _("has errors");
+ break;
+ case JS_FatalError:
+ msg = _("has a fatal error");
+ break;
+ case JS_Differences:
+ msg = _("has verify differences");
+ break;
+ case JS_Canceled:
+ msg = _("has been canceled");
+ break;
+ case JS_WaitFD:
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ Mmsg(emsg, _("is waiting on Client %s"), jcr->client->name());
+ pool_mem = true;
+ msg = emsg;
+ break;
+ case JS_WaitSD:
+ emsg = (char *) get_pool_memory(PM_FNAME);
+ if (jcr->wstore) {
+ Mmsg(emsg, _("is waiting on Storage %s"), jcr->wstore->name());
+ } else {
+ Mmsg(emsg, _("is waiting on Storage %s"), jcr->rstore->name());
+ }
+ pool_mem = true;
+ msg = emsg;
+ break;
+ case JS_WaitStoreRes:
+ msg = _("is waiting on max Storage jobs");
+ break;
+ case JS_WaitClientRes:
+ msg = _("is waiting on max Client jobs");
+ break;
+ case JS_WaitJobRes:
+ msg = _("is waiting on max Job jobs");
+ break;
+ case JS_WaitMaxJobs:
+ msg = _("is waiting on max total jobs");
+ break;
+ case JS_WaitStartTime:
+ msg = _("is waiting for its start time");
+ break;
+ case JS_WaitPriority:
+ msg = _("is waiting for higher priority jobs to finish");
+ break;