2 Bacula® - The Network Backup Solution
4 Copyright (C) 2008-2008 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version two of the GNU General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * Bacula Director -- vbackup.c -- responsible for doing virtual
31 * backup jobs or in other words, consolidation or synthetic
34 * Kern Sibbald, July MMVIII
36 * Basic tasks done here:
37 * Open DB and create records for this job.
38 * Figure out what Jobs to copy.
39 * Open Message Channel with Storage daemon to tell him a job will be starting.
40 * Open connection with File daemon and pass him commands
42 * When the File daemon finishes the job, update the DB.
51 static const int dbglevel = 10;
53 static char OKbootstrap[] = "3000 OK bootstrap\n";
55 static bool create_bootstrap_file(JCR *jcr, POOLMEM *jobids);
56 void vbackup_cleanup(JCR *jcr, int TermCode);
59 * Called here before the job is run to do the job
62 bool do_vbackup_init(JCR *jcr)
64 if (!get_or_create_fileset_record(jcr)) {
65 Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
69 apply_pool_overrides(jcr);
71 if (!allow_duplicate_job(jcr)) {
76 * Note, at this point, pool is the pool for this job. We
77 * transfer it to rpool (read pool), and a bit later,
78 * pool will be changed to point to the write pool,
79 * which comes from pool->NextPool.
81 jcr->rpool = jcr->pool; /* save read pool */
82 pm_strcpy(jcr->rpool_source, jcr->pool_source);
85 Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source);
87 POOLMEM *jobids = get_pool_memory(PM_FNAME);
88 db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, jobids);
90 free_pool_memory(jobids);
91 Jmsg(jcr, M_FATAL, 0, _("Cannot find previous JobIds.\n"));
96 if (!create_bootstrap_file(jcr, jobids)) {
97 Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
98 free_pool_memory(jobids);
101 free_pool_memory(jobids);
104 * If the original backup pool has a NextPool, make sure a
105 * record exists in the database. Note, in this case, we
106 * will be backing up from pool to pool->NextPool.
108 if (jcr->pool->NextPool) {
109 jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->NextPool->name());
110 if (jcr->jr.PoolId == 0) {
114 /* ***FIXME*** this is probably not needed */
115 if (!set_migration_wstorage(jcr, jcr->pool)) {
118 pm_strcpy(jcr->pool_source, _("Job Pool's NextPool resource"));
120 Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name());
128 * Do a backup of the specified FileSet
130 * Returns: false on failure
133 bool do_vbackup(JCR *jcr)
137 JCR *mig_jcr = jcr->mig_jcr; /* newly backed up job */
140 * If mig_jcr is NULL, there is nothing to do for this job,
141 * so set a normal status, cleanup and return OK.
144 set_jcr_job_status(jcr, JS_Terminated);
145 vbackup_cleanup(jcr, jcr->JobStatus);
149 /* Print Job Start message */
150 Jmsg(jcr, M_INFO, 0, _("Start Vbackup JobId %s, Job=%s\n"),
151 edit_uint64(jcr->JobId, ed1), jcr->Job);
156 * Open a message channel connection with the Storage
157 * daemon. This is to let him know that our client
158 * will be contacting him for a backup session.
161 Dmsg0(110, "Open connection with storage daemon\n");
162 set_jcr_job_status(jcr, JS_WaitSD);
163 set_jcr_job_status(mig_jcr, JS_WaitSD);
165 * Start conversation with Storage daemon
167 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
170 sd = jcr->store_bsock;
172 * Now start a job with the Storage daemon
174 Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
175 ((STORE *)jcr->rstorage->first())->name(),
176 ((STORE *)jcr->wstorage->first())->name());
177 if (((STORE *)jcr->rstorage->first())->name() == ((STORE *)jcr->wstorage->first())->name()) {
178 Jmsg(jcr, M_FATAL, 0, _("Read storage \"%s\" same as write storage.\n"),
179 ((STORE *)jcr->rstorage->first())->name());
182 if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
185 Dmsg0(150, "Storage daemon connection OK\n");
187 if (!send_bootstrap_file(jcr, sd) ||
188 !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
193 * We re-update the job start record so that the start
194 * time is set after the run before job. This avoids
195 * that any files created by the run before job will
196 * be saved twice. They will be backed up in the current
197 * job, but not in the next one unless they are changed.
198 * Without this, they will be backed up in this job and
199 * in the next job run because in that case, their date
200 * is after the start of this run.
202 jcr->start_time = time(NULL);
203 jcr->jr.StartTime = jcr->start_time;
204 jcr->jr.JobTDate = jcr->start_time;
205 set_jcr_job_status(jcr, JS_Running);
207 /* Update job start record for this migration control job */
208 if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
209 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
214 mig_jcr->start_time = time(NULL);
215 mig_jcr->jr.StartTime = mig_jcr->start_time;
216 mig_jcr->jr.JobTDate = mig_jcr->start_time;
217 set_jcr_job_status(mig_jcr, JS_Running);
219 /* Update job start record for the real migration backup job */
220 if (!db_update_job_start_record(mig_jcr, mig_jcr->db, &mig_jcr->jr)) {
221 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(mig_jcr->db));
225 Dmsg4(dbglevel, "mig_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
226 mig_jcr->jr.Name, (int)mig_jcr->jr.JobId,
227 mig_jcr->jr.JobType, mig_jcr->jr.JobLevel);
231 * Start the job prior to starting the message thread below
232 * to avoid two threads from using the BSOCK structure at
235 if (!sd->fsend("run")) {
240 * Now start a Storage daemon message thread
242 if (!start_storage_daemon_message_thread(jcr)) {
247 set_jcr_job_status(jcr, JS_Running);
248 set_jcr_job_status(mig_jcr, JS_Running);
250 /* Pickup Job termination data */
251 /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
252 wait_for_storage_daemon_termination(jcr);
253 set_jcr_job_status(jcr, jcr->SDJobStatus);
254 db_write_batch_file_records(jcr); /* used by bulk batch file insert */
255 if (jcr->JobStatus != JS_Terminated) {
259 vbackup_cleanup(jcr, jcr->JobStatus);
262 UAContext *ua = new_ua_context(jcr);
263 edit_uint64(jcr->previous_jr.JobId, jobid);
264 /* Purge all old file records, but leave Job record */
265 purge_files_from_jobs(ua, jobid);
273 * Release resources allocated during backup.
275 void vbackup_cleanup(JCR *jcr, int TermCode)
277 char sdt[50], edt[50], schedt[50];
278 char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], compress[50];
279 char ec6[30], ec7[30], ec8[30], elapsed[50];
280 char term_code[100], fd_term_msg[100], sd_term_msg[100];
281 const char *term_msg;
282 int msg_type = M_INFO;
285 double kbps, compression;
288 Dmsg2(100, "Enter backup_cleanup %d %c\n", TermCode, TermCode);
289 memset(&mr, 0, sizeof(mr));
290 memset(&cr, 0, sizeof(cr));
292 update_job_end(jcr, TermCode);
294 if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
295 Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
296 db_strerror(jcr->db));
297 set_jcr_job_status(jcr, JS_ErrorTerminated);
300 bstrncpy(cr.Name, jcr->client->name(), sizeof(cr.Name));
301 if (!db_get_client_record(jcr, jcr->db, &cr)) {
302 Jmsg(jcr, M_WARNING, 0, _("Error getting Client record for Job report: ERR=%s"),
303 db_strerror(jcr->db));
306 bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
307 if (!db_get_media_record(jcr, jcr->db, &mr)) {
308 Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
309 mr.VolumeName, db_strerror(jcr->db));
310 set_jcr_job_status(jcr, JS_ErrorTerminated);
313 update_bootstrap_file(jcr);
315 switch (jcr->JobStatus) {
317 if (jcr->Errors || jcr->SDErrors) {
318 term_msg = _("Backup OK -- with warnings");
320 term_msg = _("Backup OK");
324 case JS_ErrorTerminated:
325 term_msg = _("*** Backup Error ***");
326 msg_type = M_ERROR; /* Generate error message */
327 if (jcr->store_bsock) {
328 jcr->store_bsock->signal(BNET_TERMINATE);
329 if (jcr->SD_msg_chan) {
330 pthread_cancel(jcr->SD_msg_chan);
335 term_msg = _("Backup Canceled");
336 if (jcr->store_bsock) {
337 jcr->store_bsock->signal(BNET_TERMINATE);
338 if (jcr->SD_msg_chan) {
339 pthread_cancel(jcr->SD_msg_chan);
344 term_msg = term_code;
345 sprintf(term_code, _("Inappropriate term code: %c\n"), jcr->JobStatus);
348 bstrftimes(schedt, sizeof(schedt), jcr->jr.SchedTime);
349 bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
350 bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
351 RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
355 kbps = ((double)jcr->jr.JobBytes) / (1000.0 * (double)RunTime);
357 if (!db_get_job_volume_names(jcr, jcr->db, jcr->jr.JobId, &jcr->VolumeName)) {
359 * Note, if the job has erred, most likely it did not write any
360 * tape, so suppress this "error" message since in that case
361 * it is normal. Or look at it the other way, only for a
362 * normal exit should we complain about this error.
364 if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
365 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(jcr->db));
367 jcr->VolumeName[0] = 0; /* none */
370 if (jcr->ReadBytes == 0) {
371 bstrncpy(compress, "None", sizeof(compress));
373 compression = (double)100 - 100.0 * ((double)jcr->JobBytes / (double)jcr->ReadBytes);
374 if (compression < 0.5) {
375 bstrncpy(compress, "None", sizeof(compress));
377 bsnprintf(compress, sizeof(compress), "%.1f %%", compression);
380 jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg));
381 jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
383 // bmicrosleep(15, 0); /* for debugging SIGHUP */
385 Jmsg(jcr, msg_type, 0, _("Bacula %s %s (%s): %s\n"
386 " Build OS: %s %s %s\n"
389 " Backup Level: %s%s\n"
390 " Client: \"%s\" %s\n"
391 " FileSet: \"%s\" %s\n"
392 " Pool: \"%s\" (From %s)\n"
393 " Catalog: \"%s\" (From %s)\n"
394 " Storage: \"%s\" (From %s)\n"
395 " Scheduled time: %s\n"
398 " Elapsed time: %s\n"
400 " FD Files Written: %s\n"
401 " SD Files Written: %s\n"
402 " FD Bytes Written: %s (%sB)\n"
403 " SD Bytes Written: %s (%sB)\n"
405 " Software Compression: %s\n"
409 " Volume name(s): %s\n"
410 " Volume Session Id: %d\n"
411 " Volume Session Time: %d\n"
412 " Last Volume Bytes: %s (%sB)\n"
413 " Non-fatal FD errors: %d\n"
415 " FD termination status: %s\n"
416 " SD termination status: %s\n"
417 " Termination: %s\n\n"),
418 my_name, VERSION, LSMDATE, edt,
419 HOST_OS, DISTNAME, DISTVER,
422 level_to_str(jcr->JobLevel), jcr->since,
423 jcr->client->name(), cr.Uname,
424 jcr->fileset->name(), jcr->FSCreateTime,
425 jcr->pool->name(), jcr->pool_source,
426 jcr->catalog->name(), jcr->catalog_source,
427 jcr->wstore->name(), jcr->wstore_source,
431 edit_utime(RunTime, elapsed, sizeof(elapsed)),
433 edit_uint64_with_commas(jcr->jr.JobFiles, ec1),
434 edit_uint64_with_commas(jcr->SDJobFiles, ec2),
435 edit_uint64_with_commas(jcr->jr.JobBytes, ec3),
436 edit_uint64_with_suffix(jcr->jr.JobBytes, ec4),
437 edit_uint64_with_commas(jcr->SDJobBytes, ec5),
438 edit_uint64_with_suffix(jcr->SDJobBytes, ec6),
441 jcr->VSS?_("yes"):_("no"),
442 jcr->Encrypt?_("yes"):_("no"),
443 jcr->accurate?_("yes"):_("no"),
447 edit_uint64_with_commas(mr.VolBytes, ec7),
448 edit_uint64_with_suffix(mr.VolBytes, ec8),
455 Dmsg0(100, "Leave vbackup_cleanup()\n");
459 * This callback routine is responsible for inserting the
460 * items it gets into the bootstrap structure. For each JobId selected
461 * this routine is called once for each file. We do not allow
462 * duplicate filenames, but instead keep the info from the most
463 * recent file entered (i.e. the JobIds are assumed to be sorted)
465 * See uar_sel_files in sql_cmds.c for query that calls us.
466 * row[0]=Path, row[1]=Filename, row[2]=FileIndex
467 * row[3]=JobId row[4]=LStat
469 int insert_bootstrap_handler(void *ctx, int num_fields, char **row)
473 RBSR *bsr = (RBSR *)ctx;
475 JobId = str_to_int64(row[3]);
476 FileIndex = str_to_int64(row[2]);
477 add_findex(bsr, JobId, FileIndex);
482 static bool create_bootstrap_file(JCR *jcr, POOLMEM *jobids)
487 memset(&rx, 0, sizeof(rx));
489 ua = new_ua_context(jcr);
491 #define new_get_file_list
492 #ifdef new_get_file_list
493 if (!db_get_file_list(jcr, ua->db, jobids, insert_bootstrap_handler, (void *)&rx.bsr)) {
494 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(ua->db));
498 for (p=rx->JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) {
501 if (JobId == last_JobId) {
502 continue; /* eliminate duplicate JobIds */
506 * Find files for this JobId and insert them in the tree
508 Mmsg(rx->query, uar_sel_files, edit_int64(JobId, ed1));
509 if (!db_sql_query(ua->db, rx->query, insert_tree_handler, (void *)&rx.bsr)) {
510 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(ua->db));
515 complete_bsr(ua, rx.bsr);
516 jcr->ExpectedFiles = write_bsr_file(ua, rx);
517 if (jcr->ExpectedFiles == 0) {