/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2008-2008 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
+ Bacula(R) - The Network Backup Solution
+
+ Copyright (C) 2000-2016 Kern Sibbald
+
+ The original author of Bacula is Kern Sibbald, with contributions
+ from many others, a complete list can be found in the file AUTHORS.
+
+ You may use this file and others of this release according to the
+ license defined in the LICENSE file, which includes the Affero General
+ Public License, v3.0 ("AGPLv3") and some additional permissions and
+ terms pursuant to its AGPLv3 Section 7.
+
+ This notice must be preserved when any source code is
+ conveyed and/or propagated.
+
+ Bacula(R) is a registered trademark of Kern Sibbald.
*/
/*
*
* to do the backup.
* When the File daemon finishes the job, update the DB.
*
- * Version $Id: $
*/
#include "bacula.h"
static const int dbglevel = 10;
-static char OKbootstrap[] = "3000 OK bootstrap\n";
-
-static bool create_bootstrap_file(JCR *jcr, POOLMEM *jobids);
+static bool create_bootstrap_file(JCR *jcr, char *jobids);
void vbackup_cleanup(JCR *jcr, int TermCode);
-/*
+/*
* Called here before the job is run to do the job
* specific setup.
*/
bool do_vbackup_init(JCR *jcr)
{
- if (!get_or_create_fileset_record(jcr)) {
- Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
- return false;
- }
- apply_pool_overrides(jcr);
-
- if (!allow_duplicate_job(jcr)) {
- return false;
- }
+ /*
+ * if the read pool has not been allocated yet due to the job
+ * being upgraded to a virtual full then allocate it now
+ */
+ if (!jcr->rpool_source)
+ jcr->rpool_source = get_pool_memory(PM_MESSAGE);
/*
* Note, at this point, pool is the pool for this job. We
* transfer it to rpool (read pool), and a bit later,
- * pool will be changed to point to the write pool,
+ * pool will be changed to point to the write pool,
* which comes from pool->NextPool.
*/
jcr->rpool = jcr->pool; /* save read pool */
pm_strcpy(jcr->rpool_source, jcr->pool_source);
+ /* If pool storage specified, use it for virtual full */
+ copy_rstorage(jcr, jcr->pool->storage, _("Pool resource"));
Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source);
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
}
- POOLMEM *jobids = get_pool_memory(PM_FNAME);
- db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, jobids);
- Dmsg1(000, "Accurate jobids=%s\n", jobids);
- if (*jobids == 0) {
- free_pool_memory(jobids);
- Jmsg(jcr, M_FATAL, 0, _("Cannot find previous JobIds.\n"));
- return false;
- }
-
- if (!create_bootstrap_file(jcr, jobids)) {
- Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
- free_pool_memory(jobids);
- return false;
- }
- free_pool_memory(jobids);
-
- /*
- * If the original backup pool has a NextPool, make sure a
- * record exists in the database. Note, in this case, we
- * will be backing up from pool to pool->NextPool.
- */
- if (jcr->pool->NextPool) {
- jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->NextPool->name());
- if (jcr->jr.PoolId == 0) {
- return false;
- }
- }
-
- if (!set_migration_wstorage(jcr, jcr->pool)) {
+ if (!apply_wstorage_overrides(jcr, jcr->pool)) {
return false;
}
- pm_strcpy(jcr->pool_source, _("Job Pool's NextPool resource"));
Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name());
- create_clones(jcr);
-
return true;
}
/*
- * Do a backup of the specified FileSet
+ * Do a virtual backup, which consolidates all previous backups into
+ * a sort of synthetic Full.
*
* Returns: false on failure
* true on success
*/
bool do_vbackup(JCR *jcr)
{
- char ed1[100];
- BSOCK *sd;
+ char level_computed = L_FULL;
+ char ed1[100];
+ BSOCK *sd;
+ char *p;
+ sellist sel;
+ db_list_ctx jobids;
+
+ Dmsg2(100, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage);
+ Dmsg2(100, "Read store=%s, write store=%s\n",
+ ((STORE *)jcr->rstorage->first())->name(),
+ ((STORE *)jcr->wstorage->first())->name());
+
+ jcr->wasVirtualFull = true; /* remember where we came from */
/* Print Job Start message */
- Jmsg(jcr, M_INFO, 0, _("Start Vbackup JobId %s, Job=%s\n"),
+ Jmsg(jcr, M_INFO, 0, _("Start Virtual Backup JobId %s, Job=%s\n"),
edit_uint64(jcr->JobId, ed1), jcr->Job);
+ if (!jcr->accurate) {
+ Jmsg(jcr, M_WARNING, 0,
+_("This Job is not an Accurate backup so is not equivalent to a Full backup.\n"));
+ }
+
+ if (jcr->JobIds && *jcr->JobIds) {
+ JOB_DBR jr;
+ db_list_ctx status;
+ POOL_MEM query(PM_MESSAGE);
+
+ memset(&jr, 0, sizeof(jr));
+
+ if (is_an_integer(jcr->JobIds)) {
+ /* Single JobId, so start the accurate code based on this id */
+
+ jr.JobId = str_to_int64(jcr->JobIds);
+ if (!db_get_job_record(jcr, jcr->db, &jr)) {
+ Jmsg(jcr, M_ERROR, 0,
+ _("Unable to get Job record for JobId=%s: ERR=%s\n"),
+ jcr->JobIds, db_strerror(jcr->db));
+ return false;
+ }
+ Jmsg(jcr, M_INFO,0,_("Selecting jobs to build the Full state at %s\n"),
+ jr.cStartTime);
+
+ jr.JobLevel = L_INCREMENTAL; /* Take Full+Diff+Incr */
+ db_get_accurate_jobids(jcr, jcr->db, &jr, &jobids);
+
+ } else if (sel.set_string(jcr->JobIds, true)) {
+ /* Found alljobid keyword */
+ if (jcr->use_all_JobIds) {
+ jobids.count = sel.size();
+ pm_strcpy(jobids.list, sel.get_expanded_list());
+
+ /* Need to apply some filter on the job name */
+ } else {
+ Mmsg(query,
+ "SELECT JobId FROM Job "
+ "WHERE Job.Name = '%s' "
+ "AND Job.JobId IN (%s) "
+ "ORDER BY JobTDate ASC",
+ jcr->job->name(),
+ sel.get_expanded_list());
+
+ db_sql_query(jcr->db, query.c_str(), db_list_handler, &jobids);
+ }
+
+ if (jobids.count == 0) {
+ Jmsg(jcr, M_FATAL, 0, _("No valid Jobs found from user selection.\n"));
+ return false;
+ }
+
+ Jmsg(jcr, M_INFO, 0, _("Using user supplied JobIds=%s\n"),
+ jobids.list);
+
+ /* Check status */
+ Mmsg(query,
+ "SELECT Level FROM Job "
+ "WHERE Job.JobId IN (%s) "
+ "GROUP BY Level",
+ jobids.list);
+
+ /* Will produce something like F,D,I or F,I */
+ db_sql_query(jcr->db, query.c_str(), db_list_handler, &status);
+
+ /* If no full found in the list, we build a "virtualdiff" or
+ * a "virtualinc".
+ */
+ if (strchr(status.list, L_FULL) == NULL) {
+ if (strchr(status.list, L_DIFFERENTIAL)) {
+ level_computed = L_DIFFERENTIAL;
+ Jmsg(jcr, M_INFO, 0, _("No previous Full found in list, "
+ "using Differential level\n"));
+
+ } else {
+ level_computed = L_INCREMENTAL;
+ Jmsg(jcr, M_INFO, 0, _("No previous Full found in list, "
+ "using Incremental level\n"));
+ }
+ }
+ }
+
+ } else { /* No argument provided */
+ jcr->jr.JobLevel = L_VIRTUAL_FULL;
+ db_get_accurate_jobids(jcr, jcr->db, &jcr->jr, &jobids);
+ Dmsg1(10, "Accurate jobids=%s\n", jobids.list);
+ }
+
+ if (jobids.count == 0) {
+ Jmsg(jcr, M_FATAL, 0, _("No previous Jobs found.\n"));
+ return false;
+ }
+
+ /* Full by default, or might be Incr/Diff when jobid= is used */
+ jcr->jr.JobLevel = level_computed;
+
+ /*
+ * Now we find the last job that ran and store it's info in
+ * the previous_jr record. We will set our times to the
+ * values from that job so that anything changed after that
+ * time will be picked up on the next backup.
+ */
+ p = strrchr(jobids.list, ','); /* find last jobid */
+ if (p != NULL) {
+ p++;
+ } else {
+ p = jobids.list;
+ }
+ memset(&jcr->previous_jr, 0, sizeof(jcr->previous_jr));
+ jcr->previous_jr.JobId = str_to_int64(p);
+ Dmsg1(10, "Previous JobId=%s\n", p);
+ if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
+ Jmsg(jcr, M_FATAL, 0, _("Error getting Job record for previous Job: ERR=%s"),
+ db_strerror(jcr->db));
+ return false;
+ }
+
+ if (!create_bootstrap_file(jcr, jobids.list)) {
+ Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
+ return false;
+ }
/*
* Open a message channel connection with the Storage
*
*/
Dmsg0(110, "Open connection with storage daemon\n");
- set_jcr_job_status(jcr, JS_WaitSD);
+ jcr->setJobStatus(JS_WaitSD);
/*
* Start conversation with Storage daemon
*/
return false;
}
sd = jcr->store_bsock;
+
/*
* Now start a job with the Storage daemon
*/
- Dmsg2(000, "rstorage=%p wstorage=%p\n", jcr->rstorage, jcr->wstorage);
- Dmsg2(000, "Read store=%s, write store=%s\n",
- ((STORE *)jcr->rstorage->first())->name(),
- ((STORE *)jcr->wstorage->first())->name());
- if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) {
- Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"),
- ((STORE *)jcr->rstorage->first())->name());
- return false;
- }
- if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
- return false;
- }
- Dmsg0(000, "Storage daemon connection OK\n");
-
- if (!send_bootstrap_file(jcr, sd) ||
- !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
+ if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage, /*send_bsr*/true)) {
return false;
}
+ Dmsg0(100, "Storage daemon connection OK\n");
- Dmsg0(000, "Bootstrap file sent\n");
-
- /*
+ /*
* We re-update the job start record so that the start
- * time is set after the run before job. This avoids
+ * time is set after the run before job. This avoids
* that any files created by the run before job will
* be saved twice. They will be backed up in the current
* job, but not in the next one unless they are changed.
* Without this, they will be backed up in this job and
- * in the next job run because in that case, their date
+ * in the next job run because in that case, their date
* is after the start of this run.
*/
jcr->start_time = time(NULL);
jcr->jr.StartTime = jcr->start_time;
jcr->jr.JobTDate = jcr->start_time;
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
+
+ /* Add the following when support for base jobs is added to virtual full */
+ //jcr->HasBase = jcr->job->base != NULL;
+ //jcr->jr.HasBase = jcr->HasBase;
- /* Update job start record for this migration control job */
+ /* Update job start record */
if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
return false;
}
+ /* Declare the job started to start the MaxRunTime check */
+ jcr->setJobStarted();
+
/*
* Start the job prior to starting the message thread below
* to avoid two threads from using the BSOCK structure at
return false;
}
- set_jcr_job_status(jcr, JS_Running);
+ jcr->setJobStatus(JS_Running);
/* Pickup Job termination data */
- /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
+ /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */
wait_for_storage_daemon_termination(jcr);
- set_jcr_job_status(jcr, jcr->SDJobStatus);
+ jcr->setJobStatus(jcr->SDJobStatus);
db_write_batch_file_records(jcr); /* used by bulk batch file insert */
if (jcr->JobStatus != JS_Terminated) {
return false;
char sdt[50], edt[50], schedt[50];
char ec1[30], ec3[30], ec4[30], compress[50];
char ec7[30], ec8[30], elapsed[50];
- char term_code[100], fd_term_msg[100], sd_term_msg[100];
+ char term_code[100], sd_term_msg[100];
const char *term_msg;
int msg_type = M_INFO;
MEDIA_DBR mr;
CLIENT_DBR cr;
double kbps, compression;
utime_t RunTime;
+ POOL_MEM query(PM_MESSAGE);
Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode);
- memset(&mr, 0, sizeof(mr));
memset(&cr, 0, sizeof(cr));
- jcr->set_JobLevel(L_FULL); /* we want this to appear as a Full backup */
+ jcr->setJobLevel(L_FULL); /* we want this to appear as a Full backup */
jcr->jr.JobLevel = L_FULL; /* we want this to appear as a Full backup */
jcr->JobFiles = jcr->SDJobFiles;
jcr->JobBytes = jcr->SDJobBytes;
update_job_end(jcr, TermCode);
-#ifdef xxx
- /* ***FIXME*** set to time of last incremental */
/* Update final items to set them to the previous job's values */
Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
- "JobTDate=%s WHERE JobId=%s",
- jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
+ "JobTDate=%s WHERE JobId=%s",
+ jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
edit_uint64(jcr->previous_jr.JobTDate, ec1),
- edit_uint64(mig_jcr->jr.JobId, ec2));
- db_sql_query(mig_jcr->db, query.c_str(), NULL, NULL);
-#endif
+ edit_uint64(jcr->JobId, ec3));
+ db_sql_query(jcr->db, query.c_str(), NULL, NULL);
+ /* Get the fully updated job record */
if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name));
if (!db_get_media_record(jcr, jcr->db, &mr)) {
Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
mr.VolumeName, db_strerror(jcr->db));
- set_jcr_job_status(jcr, JS_ErrorTerminated);
+ jcr->setJobStatus(JS_ErrorTerminated);
}
update_bootstrap_file(jcr);
switch (jcr->JobStatus) {
case JS_Terminated:
- if (jcr->Errors || jcr->SDErrors) {
+ if (jcr->JobErrors || jcr->SDErrors) {
term_msg = _("Backup OK -- with warnings");
} else {
term_msg = _("Backup OK");
msg_type = M_ERROR; /* Generate error message */
if (jcr->store_bsock) {
jcr->store_bsock->signal(BNET_TERMINATE);
- if (jcr->SD_msg_chan) {
+ if (jcr->SD_msg_chan_started) {
pthread_cancel(jcr->SD_msg_chan);
}
}
term_msg = _("Backup Canceled");
if (jcr->store_bsock) {
jcr->store_bsock->signal(BNET_TERMINATE);
- if (jcr->SD_msg_chan) {
+ if (jcr->SD_msg_chan_started) {
pthread_cancel(jcr->SD_msg_chan);
}
}
bsnprintf(compress, sizeof(compress), "%.1f %%", compression);
}
}
- jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg));
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
-// bmicrosleep(15, 0); /* for debugging SIGHUP */
-
- Jmsg(jcr, msg_type, 0, _("Bacula %s %s (%s): %s\n"
+ Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n"
" Build OS: %s %s %s\n"
" JobId: %d\n"
" Job: %s\n"
-" Backup Level: %s%s\n"
+" Backup Level: Virtual Full\n"
" Client: \"%s\" %s\n"
" FileSet: \"%s\" %s\n"
" Pool: \"%s\" (From %s)\n"
" SD Files Written: %s\n"
" SD Bytes Written: %s (%sB)\n"
" Rate: %.1f KB/s\n"
-" Software Compression: %s\n"
-" VSS: %s\n"
-" Encryption: %s\n"
-" Accurate: %s\n"
" Volume name(s): %s\n"
" Volume Session Id: %d\n"
" Volume Session Time: %d\n"
" SD Errors: %d\n"
" SD termination status: %s\n"
" Termination: %s\n\n"),
- my_name, VERSION, LSMDATE, edt,
+ BACULA, my_name, VERSION, LSMDATE,
HOST_OS, DISTNAME, DISTVER,
jcr->jr.JobId,
jcr->jr.Job,
- level_to_str(jcr->get_JobLevel()), jcr->since,
jcr->client->name(), cr.Uname,
jcr->fileset->name(), jcr->FSCreateTime,
jcr->pool->name(), jcr->pool_source,
edit_uint64_with_commas(jcr->jr.JobBytes, ec3),
edit_uint64_with_suffix(jcr->jr.JobBytes, ec4),
kbps,
- compress,
- jcr->VSS?_("yes"):_("no"),
- jcr->Encrypt?_("yes"):_("no"),
- jcr->accurate?_("yes"):_("no"),
jcr->VolumeName,
jcr->VolSessionId,
jcr->VolSessionTime,
}
-static bool create_bootstrap_file(JCR *jcr, POOLMEM *jobids)
+static bool create_bootstrap_file(JCR *jcr, char *jobids)
{
RESTORE_CTX rx;
UAContext *ua;
#define new_get_file_list
#ifdef new_get_file_list
- if (!db_get_file_list(jcr, ua->db, jobids, insert_bootstrap_handler, (void *)rx.bsr)) {
- Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(ua->db));
+ if (!db_open_batch_connexion(jcr, jcr->db)) {
+ Jmsg0(jcr, M_FATAL, 0, "Can't get batch sql connexion");
+ return false;
+ }
+
+ if (!db_get_file_list(jcr, jcr->db_batch, jobids, false /* don't use md5 */,
+ true /* use delta */,
+ insert_bootstrap_handler, (void *)rx.bsr))
+ {
+ Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db_batch));
}
#else
char *p;
* Find files for this JobId and insert them in the tree
*/
Mmsg(rx.query, uar_sel_files, edit_int64(JobId, ed1));
- Dmsg1(000, "uar_sel_files=%s\n", rx.query);
+ Dmsg1(100, "uar_sel_files=%s\n", rx.query);
if (!db_sql_query(ua->db, rx.query, insert_bootstrap_handler, (void *)rx.bsr)) {
Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(ua->db));
}
#endif
complete_bsr(ua, rx.bsr);
- Dmsg0(000, "Print bsr\n");
- print_bsr(ua, rx.bsr);
-
jcr->ExpectedFiles = write_bsr_file(ua, rx);
- Dmsg1(000, "Found %d files to consolidate.\n", jcr->ExpectedFiles);
- if (jcr->ExpectedFiles == 0) {
- free_ua_context(ua);
- free_bsr(rx.bsr);
- return false;
- }
+ Jmsg(jcr, M_INFO, 0, _("Found %d files to consolidate into Virtual Full.\n"),
+ jcr->ExpectedFiles);
free_ua_context(ua);
free_bsr(rx.bsr);
- return true;
+ return jcr->ExpectedFiles==0?false:true;
}