2 Bacula® - The Network Backup Solution
4 Copyright (C) 2004-2014 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from many
7 others, a complete list can be found in the file AUTHORS.
9 You may use this file and others of this release according to the
10 license defined in the LICENSE file, which includes the Affero General
11 Public License, v3.0 ("AGPLv3") and some additional permissions and
12 terms pursuant to its AGPLv3 Section 7.
14 Bacula® is a registered trademark of Kern Sibbald.
18 * Bacula Director -- mac.c -- responsible for doing
19 * migration and copy jobs.
21 * Also handles Copy jobs (March MMVIII)
23 * Written by Kern Sibbald, September MMIV
25 * Basic tasks done here:
26 * Open DB and create records for this job.
27 * Open Message Channel with Storage daemon to tell him a job will be starting.
28 * Open connection with Storage daemon and pass him commands
30 * When the Storage daemon finishes the job, update the DB.
38 static const int dbglevel = 10;
39 static char storaddr[] = "storage address=%s port=%d ssl=%d Job=%s Authentication=%s\n";
40 static char OKstore[] = "2000 OK storage\n";
42 /* Imported subroutines */
43 extern int getJob_to_migrate(JCR *jcr);
44 extern bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
45 const char *query2, const char *type);
46 extern bool find_mediaid_then_jobids(JCR *jcr, idpkt *ids, const char *query1,
48 extern bool find_jobids_of_pool_uncopied_jobs(JCR *jcr, idpkt *ids);
50 static bool set_mac_next_pool(JCR *jcr, POOL **pool);
53 * Called here before the job is run to do the job
54 * specific setup. Note, one of the important things to
55 * complete in this init code is to make the definitive
56 * choice of input and output storage devices. This is
57 * because immediately after the init, the job is queued
58 * in the jobq.c code, and it checks that all the resources
59 * (storage resources in particular) are available, so these
60 * must all be properly defined.
62 * previous_jr refers to the job DB record of the Job that is
63 * going to be migrated.
64 * prev_job refers to the job resource of the Job that is
65 * going to be migrated.
66 * jcr is the jcr for the current "migration" job. It is a
67 * control job that is put in the DB as a migration job, which
68 * means that this job migrated a previous job to a new job.
69 * No Volume or File data is associated with this control
71 * wjcr refers to the migrate/copy job that is writing and is run by
72 * the current jcr. It is a backup job that writes the
73 * data written for the previous_jr into the new pool. This
74 * job (wjcr) becomes the new backup job that replaces
75 * the original backup job. Note, this jcr is not really run. It
76 * is simply attached to the current jcr. It will show up in
77 * the Director's status output, but not in the SD or FD, both of
78 * which deal only with the current migration job (i.e. jcr).
80 bool do_mac_init(JCR *jcr)
84 JCR *wjcr; /* jcr of writing job */
88 apply_pool_overrides(jcr);
90 if (!allow_duplicate_job(jcr)) {
94 jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->name());
95 if (jcr->jr.PoolId == 0) {
96 Dmsg1(dbglevel, "JobId=%d no PoolId\n", (int)jcr->JobId);
97 Jmsg(jcr, M_FATAL, 0, _("Could not get or create a Pool record.\n"));
101 * Note, at this point, pool is the pool for this job. We
102 * transfer it to rpool (read pool), and a bit later,
103 * pool will be changed to point to the write pool,
104 * which comes from pool->NextPool.
106 jcr->rpool = jcr->pool; /* save read pool */
107 pm_strcpy(jcr->rpool_source, jcr->pool_source);
108 Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->rpool->name(), jcr->rpool_source);
110 if (!get_or_create_fileset_record(jcr)) {
111 Dmsg1(dbglevel, "JobId=%d no FileSet\n", (int)jcr->JobId);
112 Jmsg(jcr, M_FATAL, 0, _("Could not get or create the FileSet record.\n"));
116 /* If we find a job or jobs to migrate it is previous_jr.JobId */
117 count = getJob_to_migrate(jcr);
122 set_mac_next_pool(jcr, &pool);
123 return true; /* no work */
126 Dmsg1(dbglevel, "Back from getJob_to_migrate JobId=%d\n", (int)jcr->JobId);
128 if (jcr->previous_jr.JobId == 0) {
129 Dmsg1(dbglevel, "JobId=%d no previous JobId\n", (int)jcr->JobId);
130 Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
131 set_mac_next_pool(jcr, &pool);
132 return true; /* no work */
135 if (create_restore_bootstrap_file(jcr) < 0) {
136 Jmsg(jcr, M_FATAL, 0, _("Create bootstrap file failed.\n"));
140 if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
141 jcr->setJobStatus(JS_Terminated);
142 Dmsg1(dbglevel, "JobId=%d expected files == 0\n", (int)jcr->JobId);
143 if (jcr->previous_jr.JobId == 0) {
144 Jmsg(jcr, M_INFO, 0, _("No previous Job found to %s.\n"), jcr->get_ActionName(0));
146 Jmsg(jcr, M_INFO, 0, _("Previous Job has no data to %s.\n"), jcr->get_ActionName(0));
148 set_mac_next_pool(jcr, &pool);
149 return true; /* no work */
153 Dmsg5(dbglevel, "JobId=%d: Current: Name=%s JobId=%d Type=%c Level=%c\n",
155 jcr->jr.Name, (int)jcr->jr.JobId,
156 jcr->jr.JobType, jcr->jr.JobLevel);
159 job = (JOB *)GetResWithName(R_JOB, jcr->jr.Name);
160 prev_job = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name);
163 Jmsg(jcr, M_FATAL, 0, _("Job resource not found for \"%s\".\n"), jcr->jr.Name);
167 Jmsg(jcr, M_FATAL, 0, _("Previous Job resource not found for \"%s\".\n"),
168 jcr->previous_jr.Name);
173 /* Create a write jcr */
174 wjcr = jcr->wjcr = new_jcr(sizeof(JCR), dird_free_jcr);
175 memcpy(&wjcr->previous_jr, &jcr->previous_jr, sizeof(wjcr->previous_jr));
178 * Turn the wjcr into a "real" job that takes on the aspects of
179 * the previous backup job "prev_job".
181 set_jcr_defaults(wjcr, prev_job);
182 if (!setup_job(wjcr)) {
183 Jmsg(jcr, M_FATAL, 0, _("setup job failed.\n"));
187 /* Now reset the job record from the previous job */
188 memcpy(&wjcr->jr, &jcr->previous_jr, sizeof(wjcr->jr));
189 /* Update the jr to reflect the new values of PoolId and JobId. */
190 wjcr->jr.PoolId = jcr->jr.PoolId;
191 wjcr->jr.JobId = wjcr->JobId;
192 wjcr->sd_client = true;
193 //wjcr->setJobType(jcr->getJobType());
194 wjcr->spool_data = job->spool_data; /* turn on spooling if requested in job */
195 wjcr->spool_size = jcr->spool_size;
198 /* Don't let WatchDog checks Max*Time value on this Job */
199 wjcr->no_maxtime = true;
200 /* Don't check for duplicates on this jobs */
201 wjcr->job->IgnoreDuplicateJobChecking = true;
202 Dmsg4(dbglevel, "wjcr: Name=%s JobId=%d Type=%c Level=%c\n",
203 wjcr->jr.Name, (int)wjcr->jr.JobId,
204 wjcr->jr.JobType, wjcr->jr.JobLevel);
206 if (set_mac_next_pool(jcr, &pool)) {
207 /* If pool storage specified, use it for restore */
208 copy_rstorage(wjcr, pool->storage, _("Pool resource"));
209 copy_rstorage(jcr, pool->storage, _("Pool resource"));
211 wjcr->pool = jcr->pool;
212 wjcr->next_pool = jcr->next_pool;
213 wjcr->jr.PoolId = jcr->jr.PoolId;
220 * set_mac_next_pool() called by do_mac_init()
221 * at differents stages.
222 * The idea here is to make a common subroutine for the
223 * NextPool's search code and to permit do_mac_init()
224 * to return with NextPool set in jcr struct.
226 static bool set_mac_next_pool(JCR *jcr, POOL **retpool)
233 * Get the PoolId used with the original job. Then
234 * find the pool name from the database record.
236 memset(&pr, 0, sizeof(pr));
237 pr.PoolId = jcr->jr.PoolId;
238 if (!db_get_pool_record(jcr, jcr->db, &pr)) {
239 Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
240 edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
243 /* Get the pool resource corresponding to the original job */
244 pool = (POOL *)GetResWithName(R_POOL, pr.Name);
247 Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name);
251 if (!apply_wstorage_overrides(jcr, pool)) {
255 Dmsg2(dbglevel, "Write pool=%s read rpool=%s\n", jcr->pool->name(), jcr->rpool->name());
261 * Send storage address and authentication to deblock the other
264 static bool send_store_addr_to_sd(JCR *jcr, char *Job, char *sd_auth_key,
265 STORE *store, char *store_address, uint32_t store_port)
267 int tls_need = BNET_TLS_NONE;
269 /* TLS Requirement */
270 if (store->tls_enable) {
271 if (store->tls_require) {
272 tls_need = BNET_TLS_REQUIRED;
274 tls_need = BNET_TLS_OK;
279 * Send Storage address to the SD client
281 Dmsg2(200, "=== Job=%s sd auth key=%s\n", Job, sd_auth_key);
282 jcr->store_bsock->fsend(storaddr, store_address, store_port,
283 tls_need, Job, sd_auth_key);
284 if (!response(jcr, jcr->store_bsock, OKstore, "Storage", DISPLAY_ERROR)) {
285 Dmsg4(050, "Response fail for: JobId=%d storeaddr=%s:%d Job=%s\n",
286 jcr->JobId, store_address, store_port, Job);
287 Jmsg3(jcr, M_FATAL, 0, "Response failure: storeddr=%s:%d Job=%s\n",
288 store_address, store_port, Job);
296 * Do a Migration and Copy of a previous job
298 * Returns: false on failure
301 bool do_mac(JCR *jcr)
305 JCR *wjcr = jcr->wjcr; /* newly migrated job */
312 * If wjcr is NULL, there is nothing to do for this job,
313 * so set a normal status, cleanup and return OK.
316 jcr->setJobStatus(JS_Terminated);
317 mac_cleanup(jcr, JS_Terminated, JS_Terminated);
321 if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
322 Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to %s. ERR=%s"),
323 edit_int64(jcr->previous_jr.JobId, ed1),
324 jcr->get_ActionName(0),
325 db_strerror(jcr->db));
326 jcr->setJobStatus(JS_Terminated);
327 mac_cleanup(jcr, JS_Terminated, JS_Terminated);
330 /* Make sure this job was not already migrated */
331 if (jcr->previous_jr.JobType != JT_BACKUP &&
332 jcr->previous_jr.JobType != JT_JOB_COPY) {
333 Jmsg(jcr, M_INFO, 0, _("JobId %s already %s probably by another Job. %s stopped.\n"),
334 edit_int64(jcr->previous_jr.JobId, ed1),
335 jcr->get_ActionName(1),
336 jcr->get_OperationName());
337 jcr->setJobStatus(JS_Terminated);
338 mac_cleanup(jcr, JS_Terminated, JS_Terminated);
342 /* Print Job Start message */
343 Jmsg(jcr, M_INFO, 0, _("Start %s JobId %s, Job=%s\n"),
344 jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job);
346 Dmsg3(200, "Start %s JobId %s, Job=%s\n",
347 jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job);
351 * Now separate the read and write storages. jcr has no wstor...
352 * they all go into wjcr.
354 free_rwstorage(wjcr);
356 wjcr->wstore = jcr->wstore;
358 wjcr->wstorage = jcr->wstorage;
359 jcr->wstorage = NULL;
361 /* TODO: See priority with bandwidth parameter */
362 if (jcr->job->max_bandwidth > 0) {
363 jcr->max_bandwidth = jcr->job->max_bandwidth;
364 } else if (jcr->client->max_bandwidth > 0) {
365 jcr->max_bandwidth = jcr->client->max_bandwidth;
368 if (jcr->max_bandwidth > 0) {
369 send_bwlimit(jcr, jcr->Job); /* Old clients don't have this command */
373 * Open a message channel connection with the Storage
374 * daemon. This is to let him know that our client
375 * will be contacting him for a backup session.
378 jcr->setJobStatus(JS_WaitSD);
379 wjcr->setJobStatus(JS_WaitSD);
382 * Start conversation with write Storage daemon
384 Dmsg0(200, "Connect to write (wjcr) storage daemon.\n");
385 if (!connect_to_storage_daemon(wjcr, 10, SDConnectTimeout, 1)) {
388 wsd = wjcr->store_bsock;
391 * Start conversation with read Storage daemon
393 Dmsg1(200, "Connect to read (jcr) storage daemon. Jid=%d\n", jcr->JobId);
394 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
397 sd = jcr->store_bsock;
398 jcr->sd_calls_client = jcr->client->sd_calls_client;
400 Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
401 ((STORE *)jcr->rstorage->first())->name(),
402 ((STORE *)wjcr->wstorage->first())->name());
405 * Now start a job with the read Storage daemon sending the bsr.
406 * This call returns the sd_auth_key
408 Dmsg1(200, "Start job with read (jcr) storage daemon. Jid=%d\n", jcr->JobId);
409 if (!start_storage_daemon_job(jcr, jcr->rstorage, NULL, /*send_bsr*/true)) {
412 Dmsg0(150, "Read storage daemon connection OK\n");
414 if (jcr->sd_calls_client) {
415 wjcr->sd_calls_client = true;
416 wjcr->sd_client = false;
418 wjcr->sd_calls_client = true;
419 wjcr->sd_client = true;
423 * Now start a job with the write Storage daemon sending.
425 Dmsg1(200, "Start Job with write (wjcr) storage daemon. Jid=%d\n", jcr->JobId);
426 if (!start_storage_daemon_job(wjcr, NULL, wjcr->wstorage, /*no_send_bsr*/false)) {
429 Dmsg0(150, "Write storage daemon connection OK\n");
432 /* Declare the job started to start the MaxRunTime check */
433 jcr->setJobStarted();
436 * We re-update the job start record so that the start
437 * time is set after the run before job. This avoids
438 * that any files created by the run before job will
439 * be saved twice. They will be backed up in the current
440 * job, but not in the next one unless they are changed.
441 * Without this, they will be backed up in this job and
442 * in the next job run because in that case, their date
443 * is after the start of this run.
445 jcr->start_time = time(NULL);
446 jcr->jr.StartTime = jcr->start_time;
447 jcr->jr.JobTDate = jcr->start_time;
448 jcr->setJobStatus(JS_Running);
450 /* Update job start record for this mac control job */
451 if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
452 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
456 /* Declare the job started to start the MaxRunTime check */
457 jcr->setJobStarted();
459 wjcr->start_time = time(NULL);
460 wjcr->jr.StartTime = wjcr->start_time;
461 wjcr->jr.JobTDate = wjcr->start_time;
462 wjcr->setJobStatus(JS_Running);
465 /* Update job start record for the real mac backup job */
466 if (!db_update_job_start_record(wjcr, wjcr->db, &wjcr->jr)) {
467 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(wjcr->db));
471 Dmsg4(dbglevel, "wjcr: Name=%s JobId=%d Type=%c Level=%c\n",
472 wjcr->jr.Name, (int)wjcr->jr.JobId,
473 wjcr->jr.JobType, wjcr->jr.JobLevel);
475 store = wjcr->wstore;
476 if (store->SDDport == 0) {
477 store->SDDport = store->SDport;
480 if (jcr->sd_calls_client) {
482 * Reading SD must call the "client" i.e. the writing SD
484 if (jcr->SDVersion < 3) {
485 Jmsg(jcr, M_FATAL, 0, _("The Storage daemon does not support SDCallsClient.\n"));
489 store_address = store->address; /* note: store points to wstore */
491 Dmsg2(200, "Start write message thread jid=%d Job=%s\n", wjcr->JobId, wjcr->Job);
492 if (!run_storage_and_start_message_thread(wjcr, wsd)) {
496 store_port = store->SDDport;
499 * Send writing SD address to the reading SD
501 /* Send and wait for connection */
502 /* ***FIXME*** this should probably be jcr->rstore, store_address, ...
503 * to get TLS right */
504 if (!send_store_addr_to_sd(jcr, wjcr->Job, wjcr->sd_auth_key,
505 store, store_address, store_port)) {
509 /* Start read message thread */
510 Dmsg2(200, "Start read message thread jid=%d Job=%s\n", jcr->JobId, jcr->Job);
511 if (!run_storage_and_start_message_thread(jcr, sd)) {
517 * Writing SD must simulate an FD and call the reading SD
519 * Send Storage daemon address to the writing SD
521 store_address = get_storage_address(jcr->client, store);
522 store_port = store->SDDport;
524 /* Start read message thread */
525 Dmsg2(200, "Start read message thread jid=%d Job=%s\n", jcr->JobId, jcr->Job);
526 if (!run_storage_and_start_message_thread(jcr, sd)) {
530 /* Attempt connection for one hour */
531 if (!send_store_addr_to_sd(wjcr, jcr->Job, jcr->sd_auth_key,
532 store, store_address, store_port)) {
535 /* Start write message thread */
536 Dmsg2(200, "Start write message thread jid=%d Job=%s\n", wjcr->JobId, wjcr->Job);
537 if (!run_storage_and_start_message_thread(wjcr, wsd)) {
542 jcr->setJobStatus(JS_Running);
543 wjcr->setJobStatus(JS_Running);
545 /* Pickup Job termination data */
546 /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors */
547 wait_for_storage_daemon_termination(wjcr);
548 wjcr->setJobStatus(wjcr->SDJobStatus);
549 wait_for_storage_daemon_termination(jcr);
550 jcr->setJobStatus(jcr->SDJobStatus);
551 db_write_batch_file_records(wjcr); /* used by bulk batch file insert */
553 ok = jcr->is_JobStatus(JS_Terminated) && wjcr->is_JobStatus(JS_Terminated);
556 /* Put back jcr write storages for proper cleanup */
557 jcr->wstorage = wjcr->wstorage;
558 jcr->wstore = wjcr->wstore;
560 wjcr->wstorage = NULL;
561 wjcr->file_bsock = NULL;
564 mac_cleanup(jcr, jcr->JobStatus, wjcr->JobStatus);
570 * Called from mac_sql.c for each migration/copy job to start
572 void start_mac_job(JCR *jcr)
574 UAContext *ua = new_ua_context(jcr);
576 char args[MAX_NAME_LENGTH + 50];
579 Mmsg(ua->cmd, "run job=\"%s\" jobid=%s ignoreduplicatecheck=yes pool=\"%s\"",
580 jcr->job->name(), edit_uint64(jcr->MigrateJobId, ed1),
582 if (jcr->next_pool) {
583 bsnprintf(args, sizeof(args), " nextpool=\"%s\"", jcr->next_pool->name());
584 pm_strcat(ua->cmd, args);
586 Dmsg2(dbglevel, "=============== %s cmd=%s\n", jcr->get_OperationName(), ua->cmd);
587 parse_ua_args(ua); /* parse command */
588 JobId_t jobid = run_cmd(ua, ua->cmd);
590 Jmsg(jcr, M_ERROR, 0, _("Could not start migration/copy job.\n"));
592 Jmsg(jcr, M_INFO, 0, _("%s JobId %d started.\n"), jcr->get_OperationName(), (int)jobid);
598 * Release resources allocated during backup.
600 /* ***FIXME*** implement writeTermCode */
601 void mac_cleanup(JCR *jcr, int TermCode, int writeTermCode)
603 char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH];
604 char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], elapsed[50];
605 char ec6[50], ec7[50], ec8[50];
606 char term_code[100], sd_term_msg[100];
607 const char *term_msg;
608 int msg_type = M_INFO;
612 JCR *wjcr = jcr->wjcr;
613 POOL_MEM query(PM_MESSAGE);
616 Dmsg2(100, "Enter mac_cleanup %d %c\n", TermCode, TermCode);
617 update_job_end(jcr, TermCode);
620 * Check if we actually did something.
621 * wjcr is jcr of the newly migrated job.
624 char old_jobid[50], new_jobid[50];
626 edit_uint64(jcr->previous_jr.JobId, old_jobid);
627 edit_uint64(wjcr->jr.JobId, new_jobid);
629 wjcr->JobFiles = jcr->JobFiles = wjcr->SDJobFiles;
630 wjcr->JobBytes = jcr->JobBytes = wjcr->SDJobBytes;
631 wjcr->jr.RealEndTime = 0;
632 wjcr->jr.PriorJobId = jcr->previous_jr.JobId;
634 update_job_end(wjcr, TermCode);
636 /* Update final items to set them to the previous job's values */
637 Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
638 "JobTDate=%s WHERE JobId=%s",
639 jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
640 edit_uint64(jcr->previous_jr.JobTDate, ec1),
642 db_sql_query(wjcr->db, query.c_str(), NULL, NULL);
645 * If we terminated a migration normally:
646 * - mark the previous job as migrated
647 * - move any Log records to the new JobId
648 * - Purge the File records from the previous job
650 if (jcr->getJobType() == JT_MIGRATE && jcr->JobStatus == JS_Terminated) {
651 Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
652 (char)JT_MIGRATED_JOB, old_jobid);
653 db_sql_query(wjcr->db, query.c_str(), NULL, NULL);
654 UAContext *ua = new_ua_context(jcr);
655 /* Move JobLog to new JobId */
656 Mmsg(query, "UPDATE Log SET JobId=%s WHERE JobId=%s",
657 new_jobid, old_jobid);
658 db_sql_query(wjcr->db, query.c_str(), NULL, NULL);
660 if (jcr->job->PurgeMigrateJob) {
661 /* Purge old Job record */
662 purge_jobs_from_catalog(ua, old_jobid);
664 /* Purge all old file records, but leave Job record */
665 purge_files_from_jobs(ua, old_jobid);
672 * If we terminated a Copy (rather than a Migration) normally:
673 * - copy any Log records to the new JobId
674 * - set type="Job Copy" for the new job
676 if (jcr->getJobType() == JT_COPY && jcr->JobStatus == JS_Terminated) {
677 /* Copy JobLog to new JobId */
678 Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) "
679 "SELECT %s, Time, LogText FROM Log WHERE JobId=%s",
680 new_jobid, old_jobid);
681 db_sql_query(wjcr->db, query.c_str(), NULL, NULL);
682 Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
683 (char)JT_JOB_COPY, new_jobid);
684 db_sql_query(wjcr->db, query.c_str(), NULL, NULL);
687 if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
688 Jmsg(jcr, M_WARNING, 0, _("Error getting Job record for Job report: ERR=%s"),
689 db_strerror(jcr->db));
690 jcr->setJobStatus(JS_ErrorTerminated);
693 update_bootstrap_file(wjcr);
695 if (!db_get_job_volume_names(wjcr, wjcr->db, wjcr->jr.JobId, &wjcr->VolumeName)) {
697 * Note, if the job has failed, most likely it did not write any
698 * tape, so suppress this "error" message since in that case
699 * it is normal. Or look at it the other way, only for a
700 * normal exit should we complain about this error.
702 if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
703 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(wjcr->db));
705 wjcr->VolumeName[0] = 0; /* none */
708 if (wjcr->VolumeName[0]) {
709 /* Find last volume name. Multiple vols are separated by | */
710 char *p = strrchr(wjcr->VolumeName, '|');
714 p = wjcr->VolumeName; /* no |, take full name */
716 bstrncpy(mr.VolumeName, p, sizeof(mr.VolumeName));
717 if (!db_get_media_record(jcr, jcr->db, &mr)) {
718 Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
719 mr.VolumeName, db_strerror(jcr->db));
723 switch (jcr->JobStatus) {
725 if (jcr->JobErrors || jcr->SDErrors) {
726 term_msg = _("%s OK -- with warnings");
728 term_msg = _("%s OK");
732 case JS_ErrorTerminated:
733 term_msg = _("*** %s Error ***");
734 msg_type = M_ERROR; /* Generate error message */
735 if (jcr->store_bsock) {
736 jcr->store_bsock->signal(BNET_TERMINATE);
737 if (jcr->SD_msg_chan) {
738 pthread_cancel(jcr->SD_msg_chan);
741 if (wjcr->store_bsock) {
742 wjcr->store_bsock->signal(BNET_TERMINATE);
743 if (wjcr->SD_msg_chan) {
744 pthread_cancel(wjcr->SD_msg_chan);
749 term_msg = _("%s Canceled");
750 if (jcr->store_bsock) {
751 jcr->store_bsock->signal(BNET_TERMINATE);
752 if (jcr->SD_msg_chan) {
753 pthread_cancel(jcr->SD_msg_chan);
756 if (wjcr->store_bsock) {
757 wjcr->store_bsock->signal(BNET_TERMINATE);
758 if (wjcr->SD_msg_chan) {
759 pthread_cancel(wjcr->SD_msg_chan);
764 term_msg = _("Inappropriate %s term code");
768 if (jcr->getJobType() == JT_MIGRATE && jcr->previous_jr.JobId != 0) {
769 /* Mark previous job as migrated */
770 Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
771 (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
772 db_sql_query(jcr->db, query.c_str(), NULL, NULL);
774 term_msg = _("%s -- no files to %s");
777 bsnprintf(term_code, sizeof(term_code), term_msg, jcr->get_OperationName(), jcr->get_ActionName(0));
778 bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
779 bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
780 RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
784 kbps = (double)jcr->SDJobBytes / (1000 * RunTime);
787 jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
789 /* Edit string for last volume size */
790 Mmsg(vol_info, _("%s (%sB)"),
791 edit_uint64_with_commas(mr.VolBytes, ec4),
792 edit_uint64_with_suffix(mr.VolBytes, ec5));
794 Jmsg(jcr, msg_type, 0, _("%s %s %s (%s):\n"
795 " Build OS: %s %s %s\n"
796 " Prev Backup JobId: %s\n"
797 " Prev Backup Job: %s\n"
798 " New Backup JobId: %s\n"
799 " Current JobId: %s\n"
801 " Backup Level: %s%s\n"
803 " FileSet: \"%s\" %s\n"
804 " Read Pool: \"%s\" (From %s)\n"
805 " Read Storage: \"%s\" (From %s)\n"
806 " Write Pool: \"%s\" (From %s)\n"
807 " Write Storage: \"%s\" (From %s)\n"
808 " Catalog: \"%s\" (From %s)\n"
811 " Elapsed time: %s\n"
813 " SD Files Written: %s\n"
814 " SD Bytes Written: %s (%sB)\n"
816 " Volume name(s): %s\n"
817 " Volume Session Id: %d\n"
818 " Volume Session Time: %d\n"
819 " Last Volume Bytes: %s\n"
821 " SD termination status: %s\n"
822 " Termination: %s\n\n"),
823 BACULA, my_name, VERSION, LSMDATE,
824 HOST_OS, DISTNAME, DISTVER,
825 edit_uint64(jcr->previous_jr.JobId, ec6),
826 jcr->previous_jr.Job,
827 wjcr ? edit_uint64(wjcr->jr.JobId, ec7) : "0",
828 edit_uint64(jcr->jr.JobId, ec8),
830 level_to_str(jcr->getJobLevel()), jcr->since,
832 jcr->fileset->name(), jcr->FSCreateTime,
833 jcr->rpool->name(), jcr->rpool_source,
834 jcr->rstore?jcr->rstore->name():"*None*",
835 NPRT(jcr->rstore_source),
836 jcr->pool->name(), jcr->pool_source,
837 jcr->wstore?jcr->wstore->name():"*None*",
838 NPRT(jcr->wstore_source),
839 jcr->catalog->name(), jcr->catalog_source,
842 edit_utime(RunTime, elapsed, sizeof(elapsed)),
844 edit_uint64_with_commas(jcr->SDJobFiles, ec1),
845 edit_uint64_with_commas(jcr->SDJobBytes, ec2),
846 edit_uint64_with_suffix(jcr->SDJobBytes, ec3),
848 wjcr ? wjcr->VolumeName : "",
856 Dmsg1(100, "migrate_cleanup() wjcr=0x%x\n", jcr->wjcr);
861 Dmsg0(100, "Leave migrate_cleanup()\n");
864 bool set_mac_wstorage(UAContext *ua, JCR *jcr, POOL *pool, POOL *next_pool,
869 ua->error_msg(_("No Next Pool specification found in Pool \"%s\".\n"),
872 Jmsg(jcr, M_FATAL, 0, _("No Next Pool specification found in Pool \"%s\".\n"),
878 if (!next_pool->storage || next_pool->storage->size() == 0) {
879 Jmsg(jcr, M_FATAL, 0, _("No Storage specification found in Next Pool \"%s\".\n"),
884 /* If pool storage specified, use it instead of job storage for backup */
885 copy_wstorage(jcr, next_pool->storage, source);