3 * Bacula Director -- migrate.c -- responsible for doing
6 * Kern Sibbald, September MMIV
8 * Basic tasks done here:
9 * Open DB and create records for this job.
10 * Open Message Channel with Storage daemon to tell him a job will be starting.
11 * Open connection with Storage daemon and pass him commands
13 * When the Storage daemon finishes the job, update the DB.
18 Copyright (C) 2004-2006 Kern Sibbald
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 version 2 as amended with additional clauses defined in the
23 file LICENSE in the main source directory.
25 This program is distributed in the hope that it will be useful,
26 but WITHOUT ANY WARRANTY; without even the implied warranty of
27 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 the file LICENSE for additional details.
36 #include "lib/bregex.h"
41 static const int dbglevel = 100;
43 static char OKbootstrap[] = "3000 OK bootstrap\n";
44 static bool get_job_to_migrate(JCR *jcr);
46 static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
47 const char *query2, const char *type);
48 static void start_migration_job(JCR *jcr);
51 * Called here before the job is run to do the job
54 bool do_migration_init(JCR *jcr)
56 /* If we find a job to migrate it is previous_jr.JobId */
57 if (!get_job_to_migrate(jcr)) {
61 if (jcr->previous_jr.JobId == 0) {
62 return true; /* no work */
65 if (!get_or_create_fileset_record(jcr)) {
69 apply_pool_overrides(jcr);
71 jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->hdr.name);
72 if (jcr->jr.PoolId == 0) {
76 /* If pool storage specified, use it instead of job storage */
77 copy_wstorage(jcr, jcr->pool->storage, _("Pool resource"));
80 Jmsg(jcr, M_FATAL, 0, _("No Storage specification found in Job or Pool.\n"));
84 create_restore_bootstrap_file(jcr);
89 * Do a Migration of a previous job
91 * Returns: false on failure
94 bool do_migration(JCR *jcr)
101 JCR *prev_jcr; /* newly migrated job */
103 if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
104 set_jcr_job_status(jcr, JS_Terminated);
105 migration_cleanup(jcr, jcr->JobStatus);
106 return true; /* no work */
109 Dmsg4(dbglevel, "Previous: Name=%s JobId=%d Type=%c Level=%c\n",
110 jcr->previous_jr.Name, jcr->previous_jr.JobId,
111 jcr->previous_jr.JobType, jcr->previous_jr.JobLevel);
113 Dmsg4(dbglevel, "Current: Name=%s JobId=%d Type=%c Level=%c\n",
114 jcr->jr.Name, jcr->jr.JobId,
115 jcr->jr.JobType, jcr->jr.JobLevel);
118 job = (JOB *)GetResWithName(R_JOB, jcr->jr.Name);
119 prev_job = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name);
121 if (!job || !prev_job) {
126 * prev_jcr is the new Job that corresponds to the original
127 * job. It "runs" at the same time as the current
128 * migration job and becomes a new backup job that replaces
129 * the original backup job. Most operations on the current
130 * migration jcr are also done on the prev_jcr.
132 prev_jcr = jcr->previous_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
133 memcpy(&prev_jcr->previous_jr, &jcr->previous_jr, sizeof(prev_jcr->previous_jr));
135 /* Turn the prev_jcr into a "real" job */
136 set_jcr_defaults(prev_jcr, prev_job);
137 if (!setup_job(prev_jcr)) {
141 /* Now reset the job record from the previous job */
142 memcpy(&prev_jcr->jr, &jcr->previous_jr, sizeof(prev_jcr->jr));
143 /* Update the jr to reflect the new values of PoolId, FileSetId, and JobId. */
144 prev_jcr->jr.PoolId = jcr->jr.PoolId;
145 prev_jcr->jr.FileSetId = jcr->jr.FileSetId;
146 prev_jcr->jr.JobId = prev_jcr->JobId;
148 Dmsg4(dbglevel, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
149 prev_jcr->jr.Name, prev_jcr->jr.JobId,
150 prev_jcr->jr.JobType, prev_jcr->jr.JobLevel);
153 * Get the PoolId used with the original job. Then
154 * find the pool name from the database record.
156 memset(&pr, 0, sizeof(pr));
157 pr.PoolId = prev_jcr->previous_jr.PoolId;
158 if (!db_get_pool_record(jcr, jcr->db, &pr)) {
159 Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
160 edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
163 /* Get the pool resource corresponding to the original job */
164 pool = (POOL *)GetResWithName(R_POOL, pr.Name);
166 Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name);
170 /* Check Migration time and High/Low water marks */
173 /* If pool storage specified, use it for restore */
174 copy_rstorage(prev_jcr, pool->storage, _("Pool resource"));
175 copy_rstorage(jcr, pool->storage, _("Pool resource"));
177 /* If the original backup pool has a NextPool, make sure a
178 * record exists in the database.
180 if (pool->NextPool) {
181 jcr->jr.PoolId = get_or_create_pool_record(jcr, pool->NextPool->hdr.name);
182 if (jcr->jr.PoolId == 0) {
186 * put the "NextPool" resource pointer in our jcr so that we
187 * can pull the Storage reference from it.
189 prev_jcr->pool = jcr->pool = pool->NextPool;
190 prev_jcr->jr.PoolId = jcr->jr.PoolId;
191 pm_strcpy(jcr->pool_source, _("NextPool in Pool resource"));
194 /* If pool storage specified, use it instead of job storage for backup */
195 copy_wstorage(jcr, jcr->pool->storage, _("Pool resource"));
197 /* Print Job Start message */
198 Jmsg(jcr, M_INFO, 0, _("Start Migration JobId %s, Job=%s\n"),
199 edit_uint64(jcr->JobId, ed1), jcr->Job);
201 set_jcr_job_status(jcr, JS_Running);
202 set_jcr_job_status(prev_jcr, JS_Running);
203 Dmsg2(dbglevel, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel);
205 /* Update job start record for this migration job */
206 if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
207 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
211 Dmsg4(dbglevel, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
212 prev_jcr->jr.Name, prev_jcr->jr.JobId,
213 prev_jcr->jr.JobType, prev_jcr->jr.JobLevel);
215 /* Update job start record for migrated job */
216 if (!db_update_job_start_record(prev_jcr, prev_jcr->db, &prev_jcr->jr)) {
217 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(prev_jcr->db));
223 * Open a message channel connection with the Storage
224 * daemon. This is to let him know that our client
225 * will be contacting him for a backup session.
228 Dmsg0(110, "Open connection with storage daemon\n");
229 set_jcr_job_status(jcr, JS_WaitSD);
230 set_jcr_job_status(prev_jcr, JS_WaitSD);
232 * Start conversation with Storage daemon
234 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
237 sd = jcr->store_bsock;
239 * Now start a job with the Storage daemon
241 Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
242 ((STORE *)jcr->rstorage->first())->name(),
243 ((STORE *)jcr->wstorage->first())->name());
244 if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
247 Dmsg0(150, "Storage daemon connection OK\n");
249 if (!send_bootstrap_file(jcr, sd) ||
250 !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
254 if (!bnet_fsend(sd, "run")) {
259 * Now start a Storage daemon message thread
261 if (!start_storage_daemon_message_thread(jcr)) {
266 set_jcr_job_status(jcr, JS_Running);
267 set_jcr_job_status(prev_jcr, JS_Running);
269 /* Pickup Job termination data */
270 /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
271 wait_for_storage_daemon_termination(jcr);
273 set_jcr_job_status(jcr, jcr->SDJobStatus);
274 if (jcr->JobStatus != JS_Terminated) {
277 migration_cleanup(jcr, jcr->JobStatus);
279 UAContext *ua = new_ua_context(jcr);
280 purge_files_from_job(ua, jcr->previous_jr.JobId);
292 * Callback handler make list of DB Ids
294 static int dbid_handler(void *ctx, int num_fields, char **row)
296 idpkt *ids = (idpkt *)ctx;
298 Dmsg3(dbglevel, "count=%d Ids=%p %s\n", ids->count, ids->list, ids->list);
299 if (ids->count == 0) {
302 pm_strcat(ids->list, ",");
304 pm_strcat(ids->list, row[0]);
315 static int item_compare(void *item1, void *item2)
317 uitem *i1 = (uitem *)item1;
318 uitem *i2 = (uitem *)item2;
319 return strcmp(i1->item, i2->item);
322 static int unique_name_handler(void *ctx, int num_fields, char **row)
324 dlist *list = (dlist *)ctx;
326 uitem *new_item = (uitem *)malloc(sizeof(uitem));
329 memset(new_item, 0, sizeof(uitem));
330 new_item->item = bstrdup(row[0]);
331 Dmsg1(dbglevel, "Item=%s\n", row[0]);
332 item = (uitem *)list->binary_insert((void *)new_item, item_compare);
333 if (item != new_item) { /* already in list */
334 free(new_item->item);
335 free((char *)new_item);
341 /* Get Job names in Pool */
342 const char *sql_job =
343 "SELECT DISTINCT Job.Name from Job,Pool"
344 " WHERE Pool.Name='%s' AND Job.PoolId=Pool.PoolId";
346 /* Get JobIds from regex'ed Job names */
347 const char *sql_jobids_from_job =
348 "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
349 " WHERE Job.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
350 " ORDER by Job.StartTime";
352 /* Get Client names in Pool */
353 const char *sql_client =
354 "SELECT DISTINCT Client.Name from Client,Pool,Job"
355 " WHERE Pool.Name='%s' AND Job.ClientId=Client.ClientId AND"
356 " Job.PoolId=Pool.PoolId";
358 /* Get JobIds from regex'ed Client names */
359 const char *sql_jobids_from_client =
360 "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
361 " WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
362 " AND Job.ClientId=Client.ClientId "
363 " ORDER by Job.StartTime";
365 /* Get Volume names in Pool */
366 const char *sql_vol =
367 "SELECT DISTINCT VolumeName FROM Media,Pool WHERE"
368 " VolStatus in ('Full','Used','Error') AND"
369 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'";
371 /* Get JobIds from regex'ed Volume names */
372 const char *sql_jobids_from_vol =
373 "SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job"
374 " WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
375 " AND JobMedia.JobId=Job.JobId"
376 " ORDER by Job.StartTime";
382 const char *sql_smallest_vol =
383 "SELECT MediaId FROM Media,Pool WHERE"
384 " VolStatus in ('Full','Used','Error') AND"
385 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
386 " ORDER BY VolBytes ASC LIMIT 1";
388 const char *sql_oldest_vol =
389 "SELECT MediaId FROM Media,Pool WHERE"
390 " VolStatus in ('Full','Used','Error') AND"
391 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
392 " ORDER BY LastWritten ASC LIMIT 1";
394 const char *sql_jobids_from_mediaid =
395 "SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job"
396 " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId=%s"
397 " ORDER by Job.StartTime";
399 const char *sql_pool_bytes =
400 "SELECT SUM(VolBytes) FROM Media,Pool WHERE"
401 " VolStatus in ('Full','Used','Error','Append') AND"
402 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'";
404 const char *sql_vol_bytes =
405 "SELECT MediaId FROM Media,Pool WHERE"
406 " VolStatus in ('Full','Used','Error') AND"
407 " Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND"
408 " VolBytes<%s ORDER BY LastWritten ASC LIMIT 1";
411 const char *sql_ujobid =
412 "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia "
413 " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND"
414 " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId";
419 * Returns: false on error
420 * true if OK and jcr->previous_jr filled in
422 static bool get_job_to_migrate(JCR *jcr)
425 POOL_MEM query(PM_MESSAGE);
431 ids.list = get_pool_memory(PM_MESSAGE);
432 Dmsg1(dbglevel, "list=%p\n", ids.list);
436 if (jcr->MigrateJobId != 0) {
437 Dmsg1(000, "At Job start previous jobid=%u\n", jcr->MigrateJobId);
438 edit_uint64(jcr->MigrateJobId, ids.list);
441 switch (jcr->job->selection_type) {
443 if (!regex_find_jobids(jcr, &ids, sql_job, sql_jobids_from_job, "Job")) {
448 if (!regex_find_jobids(jcr, &ids, sql_client, sql_jobids_from_client, "Client")) {
453 if (!regex_find_jobids(jcr, &ids, sql_vol, sql_jobids_from_vol, "Volume")) {
458 if (!jcr->job->selection_pattern) {
459 Jmsg(jcr, M_FATAL, 0, _("No Migration SQL selection pattern specified.\n"));
462 Dmsg1(dbglevel, "SQL=%s\n", jcr->job->selection_pattern);
463 if (!db_sql_query(jcr->db, jcr->job->selection_pattern,
464 dbid_handler, (void *)&ids)) {
465 Jmsg(jcr, M_FATAL, 0,
466 _("SQL failed. ERR=%s\n"), db_strerror(jcr->db));
472 /***** Below not implemented yet *********/
473 case MT_SMALLEST_VOL:
474 Mmsg(query, sql_smallest_vol, jcr->pool->hdr.name);
475 // Mmsg(query2, sql_jobids_from_mediaid, JobIds);
476 // Dmsg1(000, "Smallest Vol Jobids=%s\n", JobIds);
479 Mmsg(query, sql_oldest_vol, jcr->pool->hdr.name);
480 // Mmsg(query2, sql_jobids_from_mediaid, JobIds);
481 // Dmsg1(000, "Oldest Vol Jobids=%s\n", JobIds);
483 case MT_POOL_OCCUPANCY:
484 Mmsg(query, sql_pool_bytes, jcr->pool->hdr.name);
485 // Dmsg1(000, "Pool Occupancy Jobids=%s\n", JobIds);
488 Dmsg0(000, "Pool time not implemented\n");
491 Jmsg(jcr, M_FATAL, 0, _("Unknown Migration Selection Type.\n"));
497 * Loop over all jobids except the last one, sending
498 * them to start_migration_job(), which will start a job
499 * for each of them. For the last JobId, we handle it below.
502 for (int i=1; i < (int)ids.count; i++) {
504 stat = get_next_jobid_from_list(&p, &JobId);
505 Dmsg2(000, "get_next_jobid stat=%d JobId=%u\n", stat, JobId);
506 jcr->MigrateJobId = JobId;
507 start_migration_job(jcr);
509 Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
511 } else if (stat == 0) {
512 Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
517 /* Now get the last JobId and handle it in the current job */
519 stat = get_next_jobid_from_list(&p, &JobId);
520 Dmsg2(000, "Last get_next_jobid stat=%d JobId=%u\n", stat, JobId);
522 Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
524 } else if (stat == 0) {
525 Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
529 jcr->previous_jr.JobId = JobId;
530 Dmsg1(100, "Previous jobid=%d\n", jcr->previous_jr.JobId);
532 if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
533 Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to migrate. ERR=%s"),
534 edit_int64(jcr->previous_jr.JobId, ed1),
535 db_strerror(jcr->db));
538 Jmsg(jcr, M_INFO, 0, _("Migration using JobId=%d Job=%s\n"),
539 jcr->previous_jr.JobId, jcr->previous_jr.Job);
542 free_pool_memory(ids.list);
546 free_pool_memory(ids.list);
550 static void start_migration_job(JCR *jcr)
552 UAContext *ua = new_ua_context(jcr);
555 Mmsg(ua->cmd, "run %s jobid=%s", jcr->job->hdr.name,
556 edit_uint64(jcr->MigrateJobId, ed1));
557 Dmsg1(dbglevel, "=============== Migration cmd=%s\n", ua->cmd);
558 parse_ua_args(ua); /* parse command */
559 int stat = run_cmd(ua, ua->cmd);
560 // int stat = (int)jcr->MigrateJobId;
562 Jmsg(jcr, M_ERROR, 0, _("Could not start migration job.\n"));
564 Jmsg(jcr, M_INFO, 0, _("Migration JobId %d started.\n"), stat);
570 static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
571 const char *query2, const char *type) {
574 uitem *last_item = NULL;
579 POOL_MEM query(PM_MESSAGE);
581 item_chain = New(dlist(item, &item->link));
582 if (!jcr->job->selection_pattern) {
583 Jmsg(jcr, M_FATAL, 0, _("No Migration %s selection pattern specified.\n"),
587 Dmsg1(dbglevel, "regex=%s\n", jcr->job->selection_pattern);
588 /* Compile regex expression */
589 rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
591 regerror(rc, &preg, prbuf, sizeof(prbuf));
592 Jmsg(jcr, M_FATAL, 0, _("Could not compile regex pattern \"%s\" ERR=%s\n"),
593 jcr->job->selection_pattern, prbuf);
596 /* Basic query for names */
597 Mmsg(query, query1, jcr->pool->hdr.name);
598 Dmsg1(dbglevel, "query1=%s\n", query.c_str());
599 if (!db_sql_query(jcr->db, query.c_str(), unique_name_handler,
600 (void *)item_chain)) {
601 Jmsg(jcr, M_FATAL, 0,
602 _("SQL to get %s failed. ERR=%s\n"), type, db_strerror(jcr->db));
605 /* Now apply the regex to the names and remove any item not matched */
606 foreach_dlist(item, item_chain) {
607 const int nmatch = 30;
608 regmatch_t pmatch[nmatch];
610 Dmsg1(dbglevel, "Remove item %s\n", last_item->item);
611 free(last_item->item);
612 item_chain->remove(last_item);
614 Dmsg1(dbglevel, "Item=%s\n", item->item);
615 rc = regexec(&preg, item->item, nmatch, pmatch, 0);
617 last_item = NULL; /* keep this one */
623 free(last_item->item);
624 Dmsg1(dbglevel, "Remove item %s\n", last_item->item);
625 item_chain->remove(last_item);
629 * At this point, we have a list of items in item_chain
630 * that have been matched by the regex, so now we need
631 * to look up their jobids.
634 foreach_dlist(item, item_chain) {
635 Dmsg2(dbglevel, "Got %s: %s\n", type, item->item);
636 Mmsg(query, query2, item->item, jcr->pool->hdr.name);
637 Dmsg1(dbglevel, "query2=%s\n", query.c_str());
638 if (!db_sql_query(jcr->db, query.c_str(), dbid_handler, (void *)ids)) {
639 Jmsg(jcr, M_FATAL, 0,
640 _("SQL failed. ERR=%s\n"), db_strerror(jcr->db));
644 if (ids->count == 0) {
645 Jmsg(jcr, M_INFO, 0, _("No %ss found to migrate.\n"), type);
649 Dmsg2(dbglevel, "Count=%d Jobids=%s\n", ids->count, ids->list);
651 Dmsg0(dbglevel, "After delete item_chain\n");
657 * Release resources allocated during backup.
659 void migration_cleanup(JCR *jcr, int TermCode)
661 char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH];
662 char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], elapsed[50];
663 char ec6[50], ec7[50], ec8[50];
664 char term_code[100], sd_term_msg[100];
665 const char *term_msg;
670 JCR *prev_jcr = jcr->previous_jcr;
671 POOL_MEM query(PM_MESSAGE);
673 Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode);
674 dequeue_messages(jcr); /* display any queued messages */
675 memset(&mr, 0, sizeof(mr));
676 set_jcr_job_status(jcr, TermCode);
677 update_job_end_record(jcr); /* update database */
680 * Check if we actually did something.
681 * prev_jcr is jcr of the newly migrated job.
684 prev_jcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
685 prev_jcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
686 prev_jcr->VolSessionId = jcr->VolSessionId;
687 prev_jcr->VolSessionTime = jcr->VolSessionTime;
688 prev_jcr->jr.RealEndTime = 0;
689 prev_jcr->jr.PriorJobId = jcr->previous_jr.JobId;
691 set_jcr_job_status(prev_jcr, TermCode);
694 update_job_end_record(prev_jcr);
696 /* Update final items to set them to the previous job's values */
697 Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
698 "JobTDate=%s WHERE JobId=%s",
699 jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
700 edit_uint64(jcr->previous_jr.JobTDate, ec1),
701 edit_uint64(prev_jcr->jr.JobId, ec2));
702 db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL);
704 /* Now marke the previous job as migrated */
705 Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
706 (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
707 db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL);
709 if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
710 Jmsg(jcr, M_WARNING, 0, _("Error getting job record for stats: %s"),
711 db_strerror(jcr->db));
712 set_jcr_job_status(jcr, JS_ErrorTerminated);
715 bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
716 if (!db_get_media_record(jcr, jcr->db, &mr)) {
717 Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
718 mr.VolumeName, db_strerror(jcr->db));
719 set_jcr_job_status(jcr, JS_ErrorTerminated);
722 update_bootstrap_file(prev_jcr);
724 if (!db_get_job_volume_names(prev_jcr, prev_jcr->db, prev_jcr->jr.JobId, &prev_jcr->VolumeName)) {
726 * Note, if the job has erred, most likely it did not write any
727 * tape, so suppress this "error" message since in that case
728 * it is normal. Or look at it the other way, only for a
729 * normal exit should we complain about this error.
731 if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
732 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(prev_jcr->db));
734 prev_jcr->VolumeName[0] = 0; /* none */
738 msg_type = M_INFO; /* by default INFO message */
739 switch (jcr->JobStatus) {
741 if (jcr->Errors || jcr->SDErrors) {
742 term_msg = _("%s OK -- with warnings");
744 term_msg = _("%s OK");
748 case JS_ErrorTerminated:
749 term_msg = _("*** %s Error ***");
750 msg_type = M_ERROR; /* Generate error message */
751 if (jcr->store_bsock) {
752 bnet_sig(jcr->store_bsock, BNET_TERMINATE);
753 if (jcr->SD_msg_chan) {
754 pthread_cancel(jcr->SD_msg_chan);
759 term_msg = _("%s Canceled");
760 if (jcr->store_bsock) {
761 bnet_sig(jcr->store_bsock, BNET_TERMINATE);
762 if (jcr->SD_msg_chan) {
763 pthread_cancel(jcr->SD_msg_chan);
768 term_msg = _("Inappropriate %s term code");
771 bsnprintf(term_code, sizeof(term_code), term_msg, "Migration");
772 bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
773 bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
774 RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
778 kbps = (double)jcr->SDJobBytes / (1000 * RunTime);
782 jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
784 Jmsg(jcr, msg_type, 0, _("Bacula %s (%s): %s\n"
785 " Prev Backup JobId: %s\n"
786 " New Backup JobId: %s\n"
787 " Migration JobId: %s\n"
788 " Migration Job: %s\n"
789 " Backup Level: %s%s\n"
791 " FileSet: \"%s\" %s\n"
792 " Pool: \"%s\" (From %s)\n"
793 " Storage: \"%s\" (From %s)\n"
796 " Elapsed time: %s\n"
798 " SD Files Written: %s\n"
799 " SD Bytes Written: %s (%sB)\n"
801 " Volume name(s): %s\n"
802 " Volume Session Id: %d\n"
803 " Volume Session Time: %d\n"
804 " Last Volume Bytes: %s (%sB)\n"
806 " SD termination status: %s\n"
807 " Termination: %s\n\n"),
811 prev_jcr ? edit_uint64(jcr->previous_jr.JobId, ec6) : "0",
812 prev_jcr ? edit_uint64(prev_jcr->jr.JobId, ec7) : "0",
813 edit_uint64(jcr->jr.JobId, ec8),
815 level_to_str(jcr->JobLevel), jcr->since,
817 jcr->fileset->name(), jcr->FSCreateTime,
818 jcr->pool->name(), jcr->pool_source,
819 jcr->wstore->name(), jcr->storage_source,
822 edit_utime(RunTime, elapsed, sizeof(elapsed)),
824 edit_uint64_with_commas(jcr->SDJobFiles, ec1),
825 edit_uint64_with_commas(jcr->SDJobBytes, ec2),
826 edit_uint64_with_suffix(jcr->SDJobBytes, ec3),
828 prev_jcr ? prev_jcr->VolumeName : "",
831 edit_uint64_with_commas(mr.VolBytes, ec4),
832 edit_uint64_with_suffix(mr.VolBytes, ec5),
837 Dmsg1(100, "migrate_cleanup() previous_jcr=0x%x\n", jcr->previous_jcr);
838 if (jcr->previous_jcr) {
839 free_jcr(jcr->previous_jcr);
840 jcr->previous_jcr = NULL;
842 Dmsg0(100, "Leave migrate_cleanup()\n");