X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fdird%2Fjobq.c;h=4ba28a5f7921028c29c730c5330107f20424a681;hb=10cfd798ced2d27f61ead2de6fe9b1bcc8e3468d;hp=2c02aa5e778b174c23ba0b06693d1db9030cfffa;hpb=7cefbfdacb1d394f034d4bee6e7a96af1f06df95;p=bacula%2Fbacula diff --git a/bacula/src/dird/jobq.c b/bacula/src/dird/jobq.c old mode 100755 new mode 100644 index 2c02aa5e77..4ba28a5f79 --- a/bacula/src/dird/jobq.c +++ b/bacula/src/dird/jobq.c @@ -1,3 +1,21 @@ +/* + Bacula(R) - The Network Backup Solution + + Copyright (C) 2000-2017 Kern Sibbald + + The original author of Bacula is Kern Sibbald, with contributions + from many others, a complete list can be found in the file AUTHORS. + + You may use this file and others of this release according to the + license defined in the LICENSE file, which includes the Affero General + Public License, v3.0 ("AGPLv3") and some additional permissions and + terms pursuant to its AGPLv3 Section 7. + + This notice must be preserved when any source code is + conveyed and/or propagated. + + Bacula(R) is a registered trademark of Kern Sibbald. +*/ /* * Bacula job queue routines. * @@ -10,40 +28,12 @@ * * Kern Sibbald, July MMIII * - * Version $Id$ * * This code was adapted from the Bacula workq, which was * adapted from "Programming with POSIX Threads", by * David R. Butenhof * */ -/* - Bacula® - The Network Backup Solution - - Copyright (C) 2003-2006 Free Software Foundation Europe e.V. - - The main author of Bacula is Kern Sibbald, with contributions from - many others, a complete list can be found in the file AUTHORS. - This program is Free Software; you can redistribute it and/or - modify it under the terms of version two of the GNU General Public - License as published by the Free Software Foundation plus additions - that are listed in the file LICENSE. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - - Bacula® is a registered trademark of John Walker. - The licensor of Bacula is the Free Software Foundation Europe - (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, - Switzerland, email:ftf@fsfeurope.org. -*/ #include "bacula.h" #include "dird.h" @@ -56,8 +46,8 @@ extern "C" void *sched_wait(void *arg); static int start_server(jobq_t *jq); static bool acquire_resources(JCR *jcr); - - +static bool reschedule_job(JCR *jcr, jobq_t *jq, jobq_item_t *je); +static void dec_write_store(JCR *jcr); /* * Initialize a job queue @@ -72,7 +62,7 @@ int jobq_init(jobq_t *jq, int threads, void *(*engine)(void *arg)) if ((stat = pthread_attr_init(&jq->attr)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_attr_init: ERR=%s\n"), be.strerror(stat)); + Jmsg1(NULL, M_ERROR, 0, _("pthread_attr_init: ERR=%s\n"), be.bstrerror(stat)); return stat; } if ((stat = pthread_attr_setdetachstate(&jq->attr, PTHREAD_CREATE_DETACHED)) != 0) { @@ -81,13 +71,13 @@ int jobq_init(jobq_t *jq, int threads, void *(*engine)(void *arg)) } if ((stat = pthread_mutex_init(&jq->mutex, NULL)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_init: ERR=%s\n"), be.strerror(stat)); + Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_init: ERR=%s\n"), be.bstrerror(stat)); pthread_attr_destroy(&jq->attr); return stat; } if ((stat = pthread_cond_init(&jq->work, NULL)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_init: ERR=%s\n"), be.strerror(stat)); + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_init: ERR=%s\n"), be.bstrerror(stat)); pthread_mutex_destroy(&jq->mutex); pthread_attr_destroy(&jq->attr); return stat; @@ -118,40 +108,32 @@ int jobq_destroy(jobq_t *jq) if (jq->valid != JOBQ_VALID) { return EINVAL; } - if ((stat = pthread_mutex_lock(&jq->mutex)) != 0) { - berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_lock: ERR=%s\n"), be.strerror(stat)); - return stat; - } + P(jq->mutex); jq->valid = 0; /* prevent any more operations */ - /* - * If any threads are active, wake them + /* + * If any threads are active, wake them */ if (jq->num_workers > 0) { jq->quit = true; if (jq->idle_workers) { if ((stat = pthread_cond_broadcast(&jq->work)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_broadcast: ERR=%s\n"), be.strerror(stat)); - pthread_mutex_unlock(&jq->mutex); + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_broadcast: ERR=%s\n"), be.bstrerror(stat)); + V(jq->mutex); return stat; } } while (jq->num_workers > 0) { if ((stat = pthread_cond_wait(&jq->work, &jq->mutex)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_wait: ERR=%s\n"), be.strerror(stat)); - pthread_mutex_unlock(&jq->mutex); + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_wait: ERR=%s\n"), be.bstrerror(stat)); + V(jq->mutex); return stat; } } } - if ((stat = pthread_mutex_unlock(&jq->mutex)) != 0) { - berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_unlock: ERR=%s\n"), be.strerror(stat)); - return stat; - } + V(jq->mutex); stat = pthread_mutex_destroy(&jq->mutex); stat1 = pthread_cond_destroy(&jq->work); stat2 = pthread_attr_destroy(&jq->attr); @@ -171,7 +153,7 @@ struct wait_pkt { * this routine is only used for jobs started from the console * for which the user explicitly specified a start time. Otherwise * most jobs are put into the job queue only when their - * scheduled time arives. + * scheduled time arrives. */ extern "C" void *sched_wait(void *arg) @@ -179,10 +161,11 @@ void *sched_wait(void *arg) JCR *jcr = ((wait_pkt *)arg)->jcr; jobq_t *jq = ((wait_pkt *)arg)->jq; + set_jcr_in_tsd(INVALID_JCR); Dmsg0(2300, "Enter sched_wait.\n"); free(arg); time_t wtime = jcr->sched_time - time(NULL); - set_jcr_job_status(jcr, JS_WaitStartTime); + jcr->setJobStatus(JS_WaitStartTime); /* Wait until scheduled time arrives */ if (wtime > 0) { Jmsg(jcr, M_INFO, 0, _("Job %s waiting %d seconds for scheduled start time.\n"), @@ -190,7 +173,7 @@ void *sched_wait(void *arg) } /* Check every 30 seconds if canceled */ while (wtime > 0) { - Dmsg3(2300, "Waiting on sched time, jobid=%d secs=%d use=%d\n", + Dmsg3(2300, "Waiting on sched time, jobid=%d secs=%d use=%d\n", jcr->JobId, wtime, jcr->use_count()); if (wtime > 30) { wtime = 30; @@ -208,6 +191,32 @@ void *sched_wait(void *arg) return NULL; } +/* Procedure to update the client->NumConcurrentJobs */ +static void update_client_numconcurrentjobs(JCR *jcr, int val) +{ + int num; + if (!jcr->client) { + return; + } + + switch (jcr->getJobType()) + { + case JT_MIGRATE: + case JT_COPY: + case JT_ADMIN: + break; + case JT_BACKUP: + /* Fall through wanted */ + default: + if (jcr->no_client_used() || jcr->wasVirtualFull) { + break; + } + num = jcr->client->getNumConcurrentJobs(); + jcr->client->setNumConcurrentJobs(num + val); + break; + } +} + /* * Add a job to the queue * jq is a queue that was created with jobq_init @@ -221,16 +230,16 @@ int jobq_add(jobq_t *jq, JCR *jcr) pthread_t id; wait_pkt *sched_pkt; - if (!jcr->term_wait_inited) { + if (!jcr->term_wait_inited) { /* Initialize termination condition variable */ if ((stat = pthread_cond_init(&jcr->term_wait, NULL)) != 0) { berrno be; - Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.strerror(stat)); + Jmsg1(jcr, M_FATAL, 0, _("Unable to init job cond variable: ERR=%s\n"), be.bstrerror(stat)); return stat; } jcr->term_wait_inited = true; - } - + } + Dmsg3(2300, "jobq_add jobid=%d jcr=0x%x use_count=%d\n", jcr->JobId, jcr, jcr->use_count()); if (jq->valid != JOBQ_VALID) { Jmsg0(jcr, M_ERROR, 0, "Jobq_add queue not initialized.\n"); @@ -244,20 +253,15 @@ int jobq_add(jobq_t *jq, JCR *jcr) sched_pkt = (wait_pkt *)malloc(sizeof(wait_pkt)); sched_pkt->jcr = jcr; sched_pkt->jq = jq; - stat = pthread_create(&id, &jq->attr, sched_wait, (void *)sched_pkt); + stat = pthread_create(&id, &jq->attr, sched_wait, (void *)sched_pkt); if (stat != 0) { /* thread not created */ berrno be; - Jmsg1(jcr, M_ERROR, 0, _("pthread_thread_create: ERR=%s\n"), be.strerror(stat)); + Jmsg1(jcr, M_ERROR, 0, _("pthread_thread_create: ERR=%s\n"), be.bstrerror(stat)); } return stat; } - if ((stat = pthread_mutex_lock(&jq->mutex)) != 0) { - berrno be; - Jmsg1(jcr, M_ERROR, 0, _("pthread_mutex_lock: ERR=%s\n"), be.strerror(stat)); - free_jcr(jcr); /* release jcr */ - return stat; - } + P(jq->mutex); if ((item = (jobq_item_t *)malloc(sizeof(jobq_item_t))) == NULL) { free_jcr(jcr); /* release jcr */ @@ -265,6 +269,8 @@ int jobq_add(jobq_t *jq, JCR *jcr) } item->jcr = jcr; + /* While waiting in a queue this job is not attached to a thread */ + set_jcr_in_tsd(INVALID_JCR); if (job_canceled(jcr)) { /* Add job to ready queue so that it is canceled quickly */ jq->ready_jobs->prepend(item); @@ -292,7 +298,7 @@ int jobq_add(jobq_t *jq, JCR *jcr) /* Ensure that at least one server looks at the queue. */ stat = start_server(jq); - pthread_mutex_unlock(&jq->mutex); + V(jq->mutex); Dmsg0(2300, "Return jobq_add\n"); return stat; } @@ -317,12 +323,7 @@ int jobq_remove(jobq_t *jq, JCR *jcr) return EINVAL; } - if ((stat = pthread_mutex_lock(&jq->mutex)) != 0) { - berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_lock: ERR=%s\n"), be.strerror(stat)); - return stat; - } - + P(jq->mutex); foreach_dlist(item, jq->waiting_jobs) { if (jcr == item->jcr) { found = true; @@ -330,7 +331,7 @@ int jobq_remove(jobq_t *jq, JCR *jcr) } } if (!found) { - pthread_mutex_unlock(&jq->mutex); + V(jq->mutex); Dmsg2(2300, "jobq_remove jobid=%d jcr=0x%x not in wait queue\n", jcr->JobId, jcr); return EINVAL; } @@ -342,7 +343,7 @@ int jobq_remove(jobq_t *jq, JCR *jcr) stat = start_server(jq); - pthread_mutex_unlock(&jq->mutex); + V(jq->mutex); Dmsg0(2300, "Return jobq_remove\n"); return stat; } @@ -357,24 +358,26 @@ static int start_server(jobq_t *jq) pthread_t id; /* - * if any threads are idle, wake one -- - * actually we do a broadcast because on /lib/tls + * if any threads are idle, wake one. + * Actually we do a broadcast because on /lib/tls * these signals seem to get lost from time to time. */ if (jq->idle_workers > 0) { Dmsg0(2300, "Signal worker to wake up\n"); if ((stat = pthread_cond_broadcast(&jq->work)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_signal: ERR=%s\n"), be.strerror(stat)); + Jmsg1(NULL, M_ERROR, 0, _("pthread_cond_signal: ERR=%s\n"), be.bstrerror(stat)); return stat; } } else if (jq->num_workers < jq->max_workers) { Dmsg0(2300, "Create worker thread\n"); /* No idle threads so create a new one */ set_thread_concurrency(jq->max_workers + 1); + jq->num_workers++; if ((stat = pthread_create(&id, &jq->attr, jobq_server, (void *)jq)) != 0) { berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_create: ERR=%s\n"), be.strerror(stat)); + jq->num_workers--; + Jmsg1(NULL, M_ERROR, 0, _("pthread_create: ERR=%s\n"), be.bstrerror(stat)); return stat; } } @@ -397,13 +400,9 @@ void *jobq_server(void *arg) bool timedout = false; bool work = true; + set_jcr_in_tsd(INVALID_JCR); Dmsg0(2300, "Start jobq_server\n"); - if ((stat = pthread_mutex_lock(&jq->mutex)) != 0) { - berrno be; - Jmsg1(NULL, M_ERROR, 0, _("pthread_mutex_lock: ERR=%s\n"), be.strerror(stat)); - return NULL; - } - jq->num_workers++; + P(jq->mutex); for (;;) { struct timeval tv; @@ -429,7 +428,7 @@ void *jobq_server(void *arg) /* This shouldn't happen */ Dmsg0(2300, "This shouldn't happen\n"); jq->num_workers--; - pthread_mutex_unlock(&jq->mutex); + V(jq->mutex); return NULL; } break; @@ -448,21 +447,30 @@ void *jobq_server(void *arg) Dmsg0(2300, "ready queue not empty start server\n"); if (start_server(jq) != 0) { jq->num_workers--; - pthread_mutex_unlock(&jq->mutex); + V(jq->mutex); return NULL; } } jq->running_jobs->append(je); + + /* Attach jcr to this thread while we run the job */ + jcr->my_thread_id = pthread_self(); + jcr->set_killable(true); + set_jcr_in_tsd(jcr); Dmsg1(2300, "Took jobid=%d from ready and appended to run\n", jcr->JobId); /* Release job queue lock */ V(jq->mutex); /* Call user's routine here */ - Dmsg2(2300, "Calling user engine for jobid=%d use=%d\n", jcr->JobId, - jcr->use_count()); + Dmsg3(2300, "Calling user engine for jobid=%d use=%d stat=%c\n", jcr->JobId, + jcr->use_count(), jcr->JobStatus); jq->engine(je->jcr); + /* Job finished detach from thread */ + remove_jcr_from_tsd(je->jcr); + je->jcr->set_killable(false); + Dmsg2(2300, "Back from user engine jobid=%d use=%d.\n", jcr->JobId, jcr->use_count()); @@ -476,93 +484,20 @@ void *jobq_server(void *arg) * put into the ready queue. */ if (jcr->acquired_resource_locks) { - if (jcr->rstore) { - jcr->rstore->NumConcurrentJobs = 0; - Dmsg1(200, "Dec rncj=%d\n", jcr->rstore->NumConcurrentJobs); - } - if (jcr->wstore) { - jcr->wstore->NumConcurrentJobs--; - Dmsg1(200, "Dec wncj=%d\n", jcr->wstore->NumConcurrentJobs); - } - jcr->client->NumConcurrentJobs--; - jcr->job->NumConcurrentJobs--; + int num; + dec_read_store(jcr); + dec_write_store(jcr); + update_client_numconcurrentjobs(jcr, -1); + num = jcr->job->getNumConcurrentJobs() - 1; + jcr->job->setNumConcurrentJobs(num); jcr->acquired_resource_locks = false; } - /* - * Reschedule the job if necessary and requested - */ - if (jcr->job->RescheduleOnError && - jcr->JobStatus != JS_Terminated && - jcr->JobStatus != JS_Canceled && - jcr->job->RescheduleTimes > 0 && - jcr->JobType == JT_BACKUP && - (jcr->job->RescheduleTimes == 0 || - jcr->reschedule_count < jcr->job->RescheduleTimes)) { - char dt[50], dt2[50]; - - /* - * Reschedule this job by cleaning it up, but - * reuse the same JobId if possible. - */ - time_t now = time(NULL); - jcr->reschedule_count++; - jcr->sched_time = now + jcr->job->RescheduleInterval; - bstrftime(dt, sizeof(dt), now); - bstrftime(dt2, sizeof(dt2), jcr->sched_time); - Dmsg4(2300, "Rescheduled Job %s to re-run in %d seconds.(now=%u,then=%u)\n", jcr->Job, - (int)jcr->job->RescheduleInterval, now, jcr->sched_time); - Jmsg(jcr, M_INFO, 0, _("Rescheduled Job %s at %s to re-run in %d seconds (%s).\n"), - jcr->Job, dt, (int)jcr->job->RescheduleInterval, dt2); - dird_free_jcr_pointers(jcr); /* partial cleanup old stuff */ - jcr->JobStatus = -1; - set_jcr_job_status(jcr, JS_WaitStartTime); - jcr->SDJobStatus = 0; - if (jcr->JobBytes == 0) { - Dmsg2(2300, "Requeue job=%d use=%d\n", jcr->JobId, jcr->use_count()); - V(jq->mutex); - jobq_add(jq, jcr); /* queue the job to run again */ - P(jq->mutex); - free_jcr(jcr); /* release jcr */ - free(je); /* free the job entry */ - continue; /* look for another job to run */ - } - /* - * Something was actually backed up, so we cannot reuse - * the old JobId or there will be database record - * conflicts. We now create a new job, copying the - * appropriate fields. - */ - JCR *njcr = new_jcr(sizeof(JCR), dird_free_jcr); - set_jcr_defaults(njcr, jcr->job); - njcr->reschedule_count = jcr->reschedule_count; - njcr->sched_time = jcr->sched_time; - njcr->JobLevel = jcr->JobLevel; - njcr->JobStatus = -1; - set_jcr_job_status(njcr, jcr->JobStatus); - if (jcr->rstore) { - copy_rstorage(njcr, jcr->rstorage, _("previous Job")); - } else { - free_rstorage(njcr); - } - if (jcr->wstore) { - copy_wstorage(njcr, jcr->wstorage, _("previous Job")); - } else { - free_wstorage(njcr); - } - njcr->messages = jcr->messages; - Dmsg0(2300, "Call to run new job\n"); - V(jq->mutex); - run_job(njcr); /* This creates a "new" job */ - free_jcr(njcr); /* release "new" jcr */ - P(jq->mutex); - Dmsg0(2300, "Back from running new job.\n"); + if (reschedule_job(jcr, jq, je)) { + continue; /* go look for more work */ } + /* Clean up and release old jcr */ - if (jcr->db) { - db_close_database(jcr, jcr->db); - jcr->db = NULL; - } Dmsg2(2300, "====== Termination job=%d use_cnt=%d\n", jcr->JobId, jcr->use_count()); jcr->SDJobStatus = 0; V(jq->mutex); /* release internal lock */ @@ -577,11 +512,26 @@ void *jobq_server(void *arg) Dmsg0(2300, "Done check ready, now check wait queue.\n"); if (!jq->waiting_jobs->empty() && !jq->quit) { int Priority; + bool running_allow_mix = false; je = (jobq_item_t *)jq->waiting_jobs->first(); jobq_item_t *re = (jobq_item_t *)jq->running_jobs->first(); if (re) { Priority = re->jcr->JobPriority; - Dmsg2(2300, "JobId %d is running. Look for pri=%d\n", re->jcr->JobId, Priority); + Dmsg2(2300, "JobId %d is running. Look for pri=%d\n", + re->jcr->JobId, Priority); + running_allow_mix = true; + for ( ; re; ) { + Dmsg2(2300, "JobId %d is also running with %s\n", + re->jcr->JobId, + re->jcr->job->allow_mixed_priority ? "mix" : "no mix"); + if (!re->jcr->job->allow_mixed_priority) { + running_allow_mix = false; + break; + } + re = (jobq_item_t *)jq->running_jobs->next(re); + } + Dmsg1(2300, "The running job(s) %s mixing priorities.\n", + running_allow_mix ? "allow" : "don't allow"); } else { Priority = je->jcr->JobPriority; Dmsg1(2300, "No job running. Look for Job pri=%d\n", Priority); @@ -595,12 +545,15 @@ void *jobq_server(void *arg) JCR *jcr = je->jcr; jobq_item_t *jn = (jobq_item_t *)jq->waiting_jobs->next(je); - Dmsg3(2300, "Examining Job=%d JobPri=%d want Pri=%d\n", - jcr->JobId, jcr->JobPriority, Priority); + Dmsg4(2300, "Examining Job=%d JobPri=%d want Pri=%d (%s)\n", + jcr->JobId, jcr->JobPriority, Priority, + jcr->job->allow_mixed_priority ? "mix" : "no mix"); /* Take only jobs of correct Priority */ - if (jcr->JobPriority != Priority) { - set_jcr_job_status(jcr, JS_WaitPriority); + if (!(jcr->JobPriority == Priority + || (jcr->JobPriority < Priority && + jcr->job->allow_mixed_priority && running_allow_mix))) { + jcr->setJobStatus(JS_WaitPriority); break; } @@ -674,6 +627,149 @@ void *jobq_server(void *arg) return NULL; } +/* + * Returns true if cleanup done and we should look for more work + */ +static bool reschedule_job(JCR *jcr, jobq_t *jq, jobq_item_t *je) +{ + bool resched = false; + /* + * Reschedule the job if requested and possible + */ + /* Basic condition is that more reschedule times remain */ + if (jcr->job->RescheduleTimes == 0 || + jcr->reschedule_count < jcr->job->RescheduleTimes) { + + /* Check for incomplete jobs */ + if (jcr->is_incomplete()) { + resched = (jcr->RescheduleIncompleteJobs && jcr->is_JobType(JT_BACKUP) && + !(jcr->HasBase||jcr->is_JobLevel(L_BASE))); + } else { + /* Check for failed jobs */ + resched = (jcr->job->RescheduleOnError && + !jcr->is_JobStatus(JS_Terminated) && + !jcr->is_JobStatus(JS_Canceled) && + jcr->is_JobType(JT_BACKUP)); + } + } + if (resched) { + char dt[50], dt2[50]; + + /* + * Reschedule this job by cleaning it up, but + * reuse the same JobId if possible. + */ + jcr->rerunning = jcr->is_incomplete(); /* save incomplete status */ + time_t now = time(NULL); + jcr->reschedule_count++; + jcr->sched_time = now + jcr->job->RescheduleInterval; + bstrftime(dt, sizeof(dt), now); + bstrftime(dt2, sizeof(dt2), jcr->sched_time); + Dmsg4(2300, "Rescheduled Job %s to re-run in %d seconds.(now=%u,then=%u)\n", jcr->Job, + (int)jcr->job->RescheduleInterval, now, jcr->sched_time); + Jmsg(jcr, M_INFO, 0, _("Rescheduled Job %s at %s to re-run in %d seconds (%s).\n"), + jcr->Job, dt, (int)jcr->job->RescheduleInterval, dt2); + dird_free_jcr_pointers(jcr); /* partial cleanup old stuff */ + jcr->JobStatus = -1; + jcr->setJobStatus(JS_WaitStartTime); + jcr->SDJobStatus = 0; + jcr->JobErrors = 0; + if (!allow_duplicate_job(jcr)) { + return false; + } + /* Only jobs with no output or Incomplete jobs can run on same JCR */ + if (jcr->JobBytes == 0 || jcr->rerunning) { + Dmsg2(2300, "Requeue job=%d use=%d\n", jcr->JobId, jcr->use_count()); + V(jq->mutex); + /* + * Special test here since a Virtual Full gets marked + * as a Full, so we look at the resource record + */ + if (jcr->wasVirtualFull) { + jcr->setJobLevel(L_VIRTUAL_FULL); + } + /* + * When we are using the same jcr then make sure to reset + * RealEndTime back to zero. + */ + jcr->jr.RealEndTime = 0; + jobq_add(jq, jcr); /* queue the job to run again */ + P(jq->mutex); + free_jcr(jcr); /* release jcr */ + free(je); /* free the job entry */ + return true; /* we already cleaned up */ + } + /* + * Something was actually backed up, so we cannot reuse + * the old JobId or there will be database record + * conflicts. We now create a new job, copying the + * appropriate fields. + */ + JCR *njcr = new_jcr(sizeof(JCR), dird_free_jcr); + set_jcr_defaults(njcr, jcr->job); + /* + * Eliminate the new job_end_push, then copy the one from + * the old job, and set the old one to be empty. + */ + void *v; + lock_jobs(); /* protect ourself from reload_config() */ + LockRes(); + foreach_alist(v, (&jcr->job_end_push)) { + njcr->job_end_push.append(v); + } + jcr->job_end_push.destroy(); + jcr->job_end_push.init(1, false); + UnlockRes(); + unlock_jobs(); + + njcr->reschedule_count = jcr->reschedule_count; + njcr->sched_time = jcr->sched_time; + njcr->initial_sched_time = jcr->initial_sched_time; + /* + * Special test here since a Virtual Full gets marked + * as a Full, so we look at the resource record + */ + if (jcr->wasVirtualFull) { + njcr->setJobLevel(L_VIRTUAL_FULL); + } else { + njcr->setJobLevel(jcr->getJobLevel()); + } + njcr->pool = jcr->pool; + njcr->run_pool_override = jcr->run_pool_override; + njcr->next_pool = jcr->next_pool; + njcr->run_next_pool_override = jcr->run_next_pool_override; + njcr->full_pool = jcr->full_pool; + njcr->vfull_pool = jcr->vfull_pool; + njcr->run_full_pool_override = jcr->run_full_pool_override; + njcr->run_vfull_pool_override = jcr->run_vfull_pool_override; + njcr->inc_pool = jcr->inc_pool; + njcr->run_inc_pool_override = jcr->run_inc_pool_override; + njcr->diff_pool = jcr->diff_pool; + njcr->JobStatus = -1; + njcr->setJobStatus(jcr->JobStatus); + if (jcr->rstore) { + copy_rstorage(njcr, jcr->rstorage, _("previous Job")); + } else { + free_rstorage(njcr); + } + if (jcr->wstore) { + copy_wstorage(njcr, jcr->wstorage, _("previous Job")); + } else { + free_wstorage(njcr); + } + njcr->messages = jcr->messages; + njcr->spool_data = jcr->spool_data; + njcr->write_part_after_job = jcr->write_part_after_job; + Dmsg0(2300, "Call to run new job\n"); + V(jq->mutex); + run_job(njcr); /* This creates a "new" job */ + free_jcr(njcr); /* release "new" jcr */ + P(jq->mutex); + Dmsg0(2300, "Back from running new job.\n"); + } + return false; +} + /* * See if we can acquire all the necessary resources for the job (JCR) * @@ -685,83 +781,126 @@ static bool acquire_resources(JCR *jcr) bool skip_this_jcr = false; jcr->acquired_resource_locks = false; +/* + * Turning this code off is likely to cause some deadlocks, + * but we do not really have enough information here to + * know if this is really a deadlock (it may be a dual drive + * autochanger), and in principle, the SD reservation system + * should detect these deadlocks, so push the work off on it. + */ +#ifdef xxx + if (jcr->rstore && jcr->rstore == jcr->wstore) { /* possible deadlock */ + Jmsg(jcr, M_FATAL, 0, _("Job canceled. Attempt to read and write same device.\n" + " Read storage \"%s\" (From %s) -- Write storage \"%s\" (From %s)\n"), + jcr->rstore->name(), jcr->rstore_source, jcr->wstore->name(), jcr->wstore_source); + jcr->setJobStatus(JS_Canceled); + return false; + } +#endif if (jcr->rstore) { Dmsg1(200, "Rstore=%s\n", jcr->rstore->name()); - /* - * Let only one Restore/verify job run at a time regardless - * of MaxConcurrentJobs. - */ - if (jcr->rstore->NumConcurrentJobs == 0) { - jcr->rstore->NumConcurrentJobs = 1; - Dmsg0(200, "Set rncj=1\n"); - } else { - Dmsg1(200, "Fail rncj=%d\n", jcr->rstore->NumConcurrentJobs); - set_jcr_job_status(jcr, JS_WaitStoreRes); + if (!inc_read_store(jcr)) { + Dmsg1(200, "Fail rncj=%d\n", jcr->rstore->getNumConcurrentJobs()); + jcr->setJobStatus(JS_WaitStoreRes); return false; } } - + if (jcr->wstore) { - if (jcr->rstore == jcr->wstore) { /* deadlock */ - jcr->rstore->NumConcurrentJobs = 0; /* back out rstore */ - Jmsg(jcr, M_FATAL, 0, _("Job canceled. Attempt to read and write same device.\n")); - set_jcr_job_status(jcr, JS_Canceled); - return false; - } - if (jcr->wstore->NumConcurrentJobs == 0 && - jcr->wstore->NumConcurrentJobs < jcr->wstore->MaxConcurrentJobs) { - /* Simple case, first job */ - jcr->wstore->NumConcurrentJobs = 1; - Dmsg0(200, "Set wncj=1\n"); - } else if (jcr->wstore->NumConcurrentJobs < jcr->wstore->MaxConcurrentJobs) { - jcr->wstore->NumConcurrentJobs++; - Dmsg1(200, "Inc wncj=%d\n", jcr->wstore->NumConcurrentJobs); + Dmsg1(200, "Wstore=%s\n", jcr->wstore->name()); + int num = jcr->wstore->getNumConcurrentJobs(); + if (num < jcr->wstore->MaxConcurrentJobs) { + Dmsg1(200, "Inc wncj=%d\n", num + 1); + jcr->wstore->setNumConcurrentJobs(num + 1); } else if (jcr->rstore) { - jcr->rstore->NumConcurrentJobs = 0; /* back out rstore */ - Dmsg1(200, "Fail wncj=%d\n", jcr->wstore->NumConcurrentJobs); + dec_read_store(jcr); skip_this_jcr = true; } else { - Dmsg1(200, "Fail wncj=%d\n", jcr->wstore->NumConcurrentJobs); + Dmsg1(200, "Fail wncj=%d\n", num); skip_this_jcr = true; } } if (skip_this_jcr) { - set_jcr_job_status(jcr, JS_WaitStoreRes); + jcr->setJobStatus(JS_WaitStoreRes); return false; } - if (jcr->client->NumConcurrentJobs < jcr->client->MaxConcurrentJobs) { - jcr->client->NumConcurrentJobs++; - } else { - /* Back out previous locks */ - if (jcr->wstore) { - jcr->wstore->NumConcurrentJobs--; - Dmsg1(200, "Dec wncj=%d\n", jcr->wstore->NumConcurrentJobs); - } - if (jcr->rstore) { - jcr->rstore->NumConcurrentJobs = 0; - Dmsg1(200, "Dec rncj=%d\n", jcr->rstore->NumConcurrentJobs); + if (jcr->client) { + if (jcr->client->getNumConcurrentJobs() < jcr->client->MaxConcurrentJobs) { + update_client_numconcurrentjobs(jcr, 1); + } else { + /* Back out previous locks */ + dec_write_store(jcr); + dec_read_store(jcr); + jcr->setJobStatus(JS_WaitClientRes); + return false; } - set_jcr_job_status(jcr, JS_WaitClientRes); - return false; } - if (jcr->job->NumConcurrentJobs < jcr->job->MaxConcurrentJobs) { - jcr->job->NumConcurrentJobs++; + if (jcr->job->getNumConcurrentJobs() < jcr->job->MaxConcurrentJobs) { + int num; + num = jcr->job->getNumConcurrentJobs() + 1; + jcr->job->setNumConcurrentJobs(num); } else { /* Back out previous locks */ - if (jcr->wstore) { - jcr->wstore->NumConcurrentJobs--; - Dmsg1(200, "Dec wncj=%d\n", jcr->wstore->NumConcurrentJobs); - } - if (jcr->rstore) { - jcr->rstore->NumConcurrentJobs = 0; - Dmsg1(200, "Dec rncj=%d\n", jcr->rstore->NumConcurrentJobs); - } - jcr->client->NumConcurrentJobs--; - set_jcr_job_status(jcr, JS_WaitJobRes); + dec_write_store(jcr); + dec_read_store(jcr); + update_client_numconcurrentjobs(jcr, -1); + jcr->setJobStatus(JS_WaitJobRes); return false; } jcr->acquired_resource_locks = true; return true; } + +static pthread_mutex_t rstore_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* + * Note: inc_read_store() and dec_read_store() are + * called from select_rstore() in src/dird/restore.c + */ +bool inc_read_store(JCR *jcr) +{ + P(rstore_mutex); + int num = jcr->rstore->getNumConcurrentJobs(); + int numread = jcr->rstore->getNumConcurrentReadJobs(); + int maxread = jcr->rstore->MaxConcurrentReadJobs; + if (num < jcr->rstore->MaxConcurrentJobs && + (jcr->getJobType() == JT_RESTORE || + numread == 0 || + maxread == 0 || /* No limit set */ + numread < maxread)) /* Below the limit */ + { + num++; + numread++; + jcr->rstore->setNumConcurrentReadJobs(numread); + jcr->rstore->setNumConcurrentJobs(num); + Dmsg1(200, "Inc rncj=%d\n", num); + V(rstore_mutex); + return true; + } + V(rstore_mutex); + return false; +} + +void dec_read_store(JCR *jcr) +{ + if (jcr->rstore) { + P(rstore_mutex); + int numread = jcr->rstore->getNumConcurrentReadJobs() - 1; + int num = jcr->rstore->getNumConcurrentJobs() - 1; + jcr->rstore->setNumConcurrentReadJobs(numread); + jcr->rstore->setNumConcurrentJobs(num); + Dmsg1(200, "Dec rncj=%d\n", num); + V(rstore_mutex); + } +} + +static void dec_write_store(JCR *jcr) +{ + if (jcr->wstore) { + int num = jcr->wstore->getNumConcurrentJobs() - 1; + Dmsg1(200, "Dec wncj=%d\n", num); + jcr->wstore->setNumConcurrentJobs(num); + } +}