3 * Bacula Director -- migrate.c -- responsible for doing
6 * Kern Sibbald, September MMIV
8 * Basic tasks done here:
9 * Open DB and create records for this job.
10 * Open Message Channel with Storage daemon to tell him a job will be starting.
11 * Open connection with Storage daemon and pass him commands
13 * When the Storage daemon finishes the job, update the DB.
18 Copyright (C) 2004-2006 Kern Sibbald
20 This program is free software; you can redistribute it and/or
21 modify it under the terms of the GNU General Public License
22 version 2 as amended with additional clauses defined in the
23 file LICENSE in the main source directory.
25 This program is distributed in the hope that it will be useful,
26 but WITHOUT ANY WARRANTY; without even the implied warranty of
27 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 the file LICENSE for additional details.
36 #include "lib/bregex.h"
41 static const int dbglevel = 100;
43 static char OKbootstrap[] = "3000 OK bootstrap\n";
44 static bool get_job_to_migrate(JCR *jcr);
46 static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
47 const char *query2, const char *type);
48 static void start_migration_job(JCR *jcr);
51 * Called here before the job is run to do the job
54 bool do_migration_init(JCR *jcr)
56 /* If we find a job to migrate it is previous_jr.JobId */
57 if (!get_job_to_migrate(jcr)) {
61 if (jcr->previous_jr.JobId == 0) {
62 return true; /* no work */
65 if (!get_or_create_fileset_record(jcr)) {
69 apply_pool_overrides(jcr);
71 jcr->jr.PoolId = get_or_create_pool_record(jcr, jcr->pool->hdr.name);
72 if (jcr->jr.PoolId == 0) {
76 /* If pool storage specified, use it instead of job storage */
77 copy_wstorage(jcr, jcr->pool->storage, _("Pool resource"));
80 Jmsg(jcr, M_FATAL, 0, _("No Storage specification found in Job or Pool.\n"));
84 create_restore_bootstrap_file(jcr);
89 * Do a Migration of a previous job
91 * Returns: false on failure
94 bool do_migration(JCR *jcr)
101 JCR *prev_jcr; /* newly migrated job */
103 if (jcr->previous_jr.JobId == 0 || jcr->ExpectedFiles == 0) {
104 set_jcr_job_status(jcr, JS_Terminated);
105 migration_cleanup(jcr, jcr->JobStatus);
106 return true; /* no work */
109 Dmsg4(dbglevel, "Previous: Name=%s JobId=%d Type=%c Level=%c\n",
110 jcr->previous_jr.Name, jcr->previous_jr.JobId,
111 jcr->previous_jr.JobType, jcr->previous_jr.JobLevel);
113 Dmsg4(dbglevel, "Current: Name=%s JobId=%d Type=%c Level=%c\n",
114 jcr->jr.Name, jcr->jr.JobId,
115 jcr->jr.JobType, jcr->jr.JobLevel);
118 job = (JOB *)GetResWithName(R_JOB, jcr->jr.Name);
119 prev_job = (JOB *)GetResWithName(R_JOB, jcr->previous_jr.Name);
121 if (!job || !prev_job) {
126 * prev_jcr is the new Job that corresponds to the original
127 * job. It "runs" at the same time as the current
128 * migration job and becomes a new backup job that replaces
129 * the original backup job. Most operations on the current
130 * migration jcr are also done on the prev_jcr.
132 prev_jcr = jcr->previous_jcr = new_jcr(sizeof(JCR), dird_free_jcr);
133 memcpy(&prev_jcr->previous_jr, &jcr->previous_jr, sizeof(prev_jcr->previous_jr));
135 /* Turn the prev_jcr into a "real" job */
136 set_jcr_defaults(prev_jcr, prev_job);
137 if (!setup_job(prev_jcr)) {
141 /* Now reset the job record from the previous job */
142 memcpy(&prev_jcr->jr, &jcr->previous_jr, sizeof(prev_jcr->jr));
143 /* Update the jr to reflect the new values of PoolId, FileSetId, and JobId. */
144 prev_jcr->jr.PoolId = jcr->jr.PoolId;
145 prev_jcr->jr.FileSetId = jcr->jr.FileSetId;
146 prev_jcr->jr.JobId = prev_jcr->JobId;
148 Dmsg4(dbglevel, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
149 prev_jcr->jr.Name, prev_jcr->jr.JobId,
150 prev_jcr->jr.JobType, prev_jcr->jr.JobLevel);
153 * Get the PoolId used with the original job. Then
154 * find the pool name from the database record.
156 memset(&pr, 0, sizeof(pr));
157 pr.PoolId = prev_jcr->previous_jr.PoolId;
158 if (!db_get_pool_record(jcr, jcr->db, &pr)) {
159 Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"),
160 edit_int64(pr.PoolId, ed1), db_strerror(jcr->db));
163 /* Get the pool resource corresponding to the original job */
164 pool = (POOL *)GetResWithName(R_POOL, pr.Name);
166 Jmsg(jcr, M_FATAL, 0, _("Pool resource \"%s\" not found.\n"), pr.Name);
170 /* Check Migration time and High/Low water marks */
173 /* If pool storage specified, use it for restore */
174 copy_rstorage(prev_jcr, pool->storage, _("Pool resource"));
175 copy_rstorage(jcr, pool->storage, _("Pool resource"));
177 /* If the original backup pool has a NextPool, make sure a
178 * record exists in the database.
180 if (pool->NextPool) {
181 jcr->jr.PoolId = get_or_create_pool_record(jcr, pool->NextPool->hdr.name);
182 if (jcr->jr.PoolId == 0) {
186 * put the "NextPool" resource pointer in our jcr so that we
187 * can pull the Storage reference from it.
189 prev_jcr->pool = jcr->pool = pool->NextPool;
190 prev_jcr->jr.PoolId = jcr->jr.PoolId;
191 pm_strcpy(jcr->pool_source, _("NextPool in Pool resource"));
194 /* If pool storage specified, use it instead of job storage for backup */
195 copy_wstorage(jcr, jcr->pool->storage, _("Pool resource"));
197 /* Print Job Start message */
198 Jmsg(jcr, M_INFO, 0, _("Start Migration JobId %s, Job=%s\n"),
199 edit_uint64(jcr->JobId, ed1), jcr->Job);
201 set_jcr_job_status(jcr, JS_Running);
202 set_jcr_job_status(prev_jcr, JS_Running);
203 Dmsg2(dbglevel, "JobId=%d JobLevel=%c\n", jcr->jr.JobId, jcr->jr.JobLevel);
205 /* Update job start record for this migration job */
206 if (!db_update_job_start_record(jcr, jcr->db, &jcr->jr)) {
207 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
211 Dmsg4(dbglevel, "Prev_jcr: Name=%s JobId=%d Type=%c Level=%c\n",
212 prev_jcr->jr.Name, prev_jcr->jr.JobId,
213 prev_jcr->jr.JobType, prev_jcr->jr.JobLevel);
215 /* Update job start record for migrated job */
216 if (!db_update_job_start_record(prev_jcr, prev_jcr->db, &prev_jcr->jr)) {
217 Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(prev_jcr->db));
223 * Open a message channel connection with the Storage
224 * daemon. This is to let him know that our client
225 * will be contacting him for a backup session.
228 Dmsg0(110, "Open connection with storage daemon\n");
229 set_jcr_job_status(jcr, JS_WaitSD);
230 set_jcr_job_status(prev_jcr, JS_WaitSD);
232 * Start conversation with Storage daemon
234 if (!connect_to_storage_daemon(jcr, 10, SDConnectTimeout, 1)) {
237 sd = jcr->store_bsock;
239 * Now start a job with the Storage daemon
241 Dmsg2(dbglevel, "Read store=%s, write store=%s\n",
242 ((STORE *)jcr->rstorage->first())->name(),
243 ((STORE *)jcr->wstorage->first())->name());
244 if (!start_storage_daemon_job(jcr, jcr->rstorage, jcr->wstorage)) {
247 Dmsg0(150, "Storage daemon connection OK\n");
249 if (!send_bootstrap_file(jcr, sd) ||
250 !response(jcr, sd, OKbootstrap, "Bootstrap", DISPLAY_ERROR)) {
254 if (!bnet_fsend(sd, "run")) {
259 * Now start a Storage daemon message thread
261 if (!start_storage_daemon_message_thread(jcr)) {
266 set_jcr_job_status(jcr, JS_Running);
267 set_jcr_job_status(prev_jcr, JS_Running);
269 /* Pickup Job termination data */
270 /* Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/Errors */
271 wait_for_storage_daemon_termination(jcr);
273 set_jcr_job_status(jcr, jcr->SDJobStatus);
274 if (jcr->JobStatus == JS_Terminated) {
275 migration_cleanup(jcr, jcr->JobStatus);
287 * Callback handler make list of DB Ids
289 static int dbid_handler(void *ctx, int num_fields, char **row)
291 idpkt *ids = (idpkt *)ctx;
293 Dmsg3(dbglevel, "count=%d Ids=%p %s\n", ids->count, ids->list, ids->list);
294 if (ids->count == 0) {
297 pm_strcat(ids->list, ",");
299 pm_strcat(ids->list, row[0]);
310 static int item_compare(void *item1, void *item2)
312 uitem *i1 = (uitem *)item1;
313 uitem *i2 = (uitem *)item2;
314 return strcmp(i1->item, i2->item);
317 static int unique_name_handler(void *ctx, int num_fields, char **row)
319 dlist *list = (dlist *)ctx;
321 uitem *new_item = (uitem *)malloc(sizeof(uitem));
324 memset(new_item, 0, sizeof(uitem));
325 new_item->item = bstrdup(row[0]);
326 Dmsg1(dbglevel, "Item=%s\n", row[0]);
327 item = (uitem *)list->binary_insert((void *)new_item, item_compare);
328 if (item != new_item) { /* already in list */
329 free(new_item->item);
330 free((char *)new_item);
336 /* Get Job names in Pool */
337 const char *sql_job =
338 "SELECT DISTINCT Job.Name from Job,Pool"
339 " WHERE Pool.Name='%s' AND Job.PoolId=Pool.PoolId";
341 /* Get JobIds from regex'ed Job names */
342 const char *sql_jobids_from_job =
343 "SELECT DISTINCT Job.JobId FROM Job,Pool"
344 " WHERE Job.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
345 " ORDER by Job.StartTime";
347 /* Get Client names in Pool */
348 const char *sql_client =
349 "SELECT DISTINCT Client.Name from Client,Pool,Job"
350 " WHERE Pool.Name='%s' AND Job.ClientId=Client.ClientId AND"
351 " Job.PoolId=Pool.PoolId";
353 /* Get JobIds from regex'ed Client names */
354 const char *sql_jobids_from_client =
355 "SELECT DISTINCT Job.JobId FROM Job,Pool"
356 " WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
357 " AND Job.ClientId=Client.ClientId "
358 " ORDER by Job.StartTime";
360 /* Get Volume names in Pool */
361 const char *sql_vol =
362 "SELECT DISTINCT VolumeName FROM Media,Pool WHERE"
363 " VolStatus in ('Full','Used','Error') AND"
364 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'";
366 /* Get JobIds from regex'ed Volume names */
367 const char *sql_jobids_from_vol =
368 "SELECT DISTINCT Job.JobId FROM Media,JobMedia,Job"
369 " WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
370 " AND JobMedia.JobId=Job.JobId"
371 " ORDER by Job.StartTime";
377 const char *sql_smallest_vol =
378 "SELECT MediaId FROM Media,Pool WHERE"
379 " VolStatus in ('Full','Used','Error') AND"
380 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
381 " ORDER BY VolBytes ASC LIMIT 1";
383 const char *sql_oldest_vol =
384 "SELECT MediaId FROM Media,Pool WHERE"
385 " VolStatus in ('Full','Used','Error') AND"
386 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'"
387 " ORDER BY LastWritten ASC LIMIT 1";
389 const char *sql_jobids_from_mediaid =
390 "SELECT DISTINCT Job.JobId FROM JobMedia,Job"
391 " WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId=%s"
392 " ORDER by Job.StartTime";
394 const char *sql_pool_bytes =
395 "SELECT SUM(VolBytes) FROM Media,Pool WHERE"
396 " VolStatus in ('Full','Used','Error','Append') AND"
397 " Media.PoolId=Pool.PoolId AND Pool.Name='%s'";
399 const char *sql_vol_bytes =
400 "SELECT MediaId FROM Media,Pool WHERE"
401 " VolStatus in ('Full','Used','Error') AND"
402 " Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND"
403 " VolBytes<%s ORDER BY LastWritten ASC LIMIT 1";
406 const char *sql_ujobid =
407 "SELECT DISTINCT Job.Job from Client,Pool,Media,Job,JobMedia "
408 " WHERE Media.PoolId=Pool.PoolId AND Pool.Name='%s' AND"
409 " JobMedia.JobId=Job.JobId AND Job.PoolId=Media.PoolId";
414 * Returns: false on error
415 * true if OK and jcr->previous_jr filled in
417 static bool get_job_to_migrate(JCR *jcr)
420 POOL_MEM query(PM_MESSAGE);
426 ids.list = get_pool_memory(PM_MESSAGE);
427 Dmsg1(dbglevel, "list=%p\n", ids.list);
431 if (jcr->MigrateJobId != 0) {
432 Dmsg1(000, "At Job start previous jobid=%u\n", jcr->MigrateJobId);
433 edit_uint64(jcr->MigrateJobId, ids.list);
436 switch (jcr->job->selection_type) {
438 if (!regex_find_jobids(jcr, &ids, sql_job, sql_jobids_from_job, "Job")) {
443 if (!regex_find_jobids(jcr, &ids, sql_client, sql_jobids_from_client, "Client")) {
448 if (!regex_find_jobids(jcr, &ids, sql_vol, sql_jobids_from_vol, "Volume")) {
453 if (!jcr->job->selection_pattern) {
454 Jmsg(jcr, M_FATAL, 0, _("No Migration SQL selection pattern specified.\n"));
457 Dmsg1(dbglevel, "SQL=%s\n", jcr->job->selection_pattern);
458 if (!db_sql_query(jcr->db, jcr->job->selection_pattern,
459 dbid_handler, (void *)&ids)) {
460 Jmsg(jcr, M_FATAL, 0,
461 _("SQL failed. ERR=%s\n"), db_strerror(jcr->db));
467 /***** Below not implemented yet *********/
468 case MT_SMALLEST_VOL:
469 Mmsg(query, sql_smallest_vol, jcr->pool->hdr.name);
470 // Mmsg(query2, sql_jobids_from_mediaid, JobIds);
471 // Dmsg1(000, "Smallest Vol Jobids=%s\n", JobIds);
474 Mmsg(query, sql_oldest_vol, jcr->pool->hdr.name);
475 // Mmsg(query2, sql_jobids_from_mediaid, JobIds);
476 // Dmsg1(000, "Oldest Vol Jobids=%s\n", JobIds);
478 case MT_POOL_OCCUPANCY:
479 Mmsg(query, sql_pool_bytes, jcr->pool->hdr.name);
480 // Dmsg1(000, "Pool Occupancy Jobids=%s\n", JobIds);
483 Dmsg0(000, "Pool time not implemented\n");
486 Jmsg(jcr, M_FATAL, 0, _("Unknown Migration Selection Type.\n"));
492 * Loop over all jobids except the last one, sending
493 * them to start_migration_job(), which will start a job
494 * for each of them. For the last JobId, we handle it below.
497 for (int i=1; i < (int)ids.count; i++) {
499 stat = get_next_jobid_from_list(&p, &JobId);
500 Dmsg2(000, "get_next_jobid stat=%d JobId=%u\n", stat, JobId);
501 jcr->MigrateJobId = JobId;
502 start_migration_job(jcr);
504 Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
506 } else if (stat == 0) {
507 Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
512 /* Now get the last JobId and handle it in the current job */
514 stat = get_next_jobid_from_list(&p, &JobId);
515 Dmsg2(000, "Last get_next_jobid stat=%d JobId=%u\n", stat, JobId);
517 Jmsg(jcr, M_FATAL, 0, _("Invalid JobId found.\n"));
519 } else if (stat == 0) {
520 Jmsg(jcr, M_INFO, 0, _("No JobIds found to migrate.\n"));
524 jcr->previous_jr.JobId = JobId;
525 Dmsg1(100, "Previous jobid=%d\n", jcr->previous_jr.JobId);
527 if (!db_get_job_record(jcr, jcr->db, &jcr->previous_jr)) {
528 Jmsg(jcr, M_FATAL, 0, _("Could not get job record for JobId %s to migrate. ERR=%s"),
529 edit_int64(jcr->previous_jr.JobId, ed1),
530 db_strerror(jcr->db));
533 Jmsg(jcr, M_INFO, 0, _("Migration using JobId=%d Job=%s\n"),
534 jcr->previous_jr.JobId, jcr->previous_jr.Job);
537 free_pool_memory(ids.list);
541 free_pool_memory(ids.list);
545 static void start_migration_job(JCR *jcr)
547 UAContext *ua = new_ua_context(jcr);
550 Mmsg(ua->cmd, "run %s jobid=%s", jcr->job->hdr.name,
551 edit_uint64(jcr->MigrateJobId, ed1));
552 Dmsg1(dbglevel, "=============== Migration cmd=%s\n", ua->cmd);
553 parse_ua_args(ua); /* parse command */
554 int stat = run_cmd(ua, ua->cmd);
555 // int stat = (int)jcr->MigrateJobId;
557 Jmsg(jcr, M_ERROR, 0, _("Could not start migration job.\n"));
559 Jmsg(jcr, M_INFO, 0, _("Migration JobId %d started.\n"), stat);
565 static bool regex_find_jobids(JCR *jcr, idpkt *ids, const char *query1,
566 const char *query2, const char *type) {
569 uitem *last_item = NULL;
574 POOL_MEM query(PM_MESSAGE);
576 item_chain = New(dlist(item, &item->link));
577 if (!jcr->job->selection_pattern) {
578 Jmsg(jcr, M_FATAL, 0, _("No Migration %s selection pattern specified.\n"),
582 Dmsg1(dbglevel, "regex=%s\n", jcr->job->selection_pattern);
583 /* Compile regex expression */
584 rc = regcomp(&preg, jcr->job->selection_pattern, REG_EXTENDED);
586 regerror(rc, &preg, prbuf, sizeof(prbuf));
587 Jmsg(jcr, M_FATAL, 0, _("Could not compile regex pattern \"%s\" ERR=%s\n"),
588 jcr->job->selection_pattern, prbuf);
591 /* Basic query for names */
592 Mmsg(query, query1, jcr->pool->hdr.name);
593 Dmsg1(dbglevel, "query1=%s\n", query.c_str());
594 if (!db_sql_query(jcr->db, query.c_str(), unique_name_handler,
595 (void *)item_chain)) {
596 Jmsg(jcr, M_FATAL, 0,
597 _("SQL to get %s failed. ERR=%s\n"), type, db_strerror(jcr->db));
600 /* Now apply the regex to the names and remove any item not matched */
601 foreach_dlist(item, item_chain) {
602 const int nmatch = 30;
603 regmatch_t pmatch[nmatch];
605 Dmsg1(dbglevel, "Remove item %s\n", last_item->item);
606 free(last_item->item);
607 item_chain->remove(last_item);
609 Dmsg1(dbglevel, "Item=%s\n", item->item);
610 rc = regexec(&preg, item->item, nmatch, pmatch, 0);
612 last_item = NULL; /* keep this one */
618 free(last_item->item);
619 Dmsg1(dbglevel, "Remove item %s\n", last_item->item);
620 item_chain->remove(last_item);
624 * At this point, we have a list of items in item_chain
625 * that have been matched by the regex, so now we need
626 * to look up their jobids.
629 foreach_dlist(item, item_chain) {
630 Dmsg2(dbglevel, "Got %s: %s\n", type, item->item);
631 Mmsg(query, query2, item->item, jcr->pool->hdr.name);
632 Dmsg1(dbglevel, "query2=%s\n", query.c_str());
633 if (!db_sql_query(jcr->db, query.c_str(), dbid_handler, (void *)ids)) {
634 Jmsg(jcr, M_FATAL, 0,
635 _("SQL failed. ERR=%s\n"), db_strerror(jcr->db));
639 if (ids->count == 0) {
640 Jmsg(jcr, M_INFO, 0, _("No %ss found to migrate.\n"), type);
644 Dmsg2(dbglevel, "Count=%d Jobids=%s\n", ids->count, ids->list);
646 Dmsg0(dbglevel, "After delete item_chain\n");
652 * Release resources allocated during backup.
654 void migration_cleanup(JCR *jcr, int TermCode)
656 char sdt[MAX_TIME_LENGTH], edt[MAX_TIME_LENGTH];
657 char ec1[30], ec2[30], ec3[30], ec4[30], ec5[30], elapsed[50];
658 char ec6[50], ec7[50], ec8[50];
659 char term_code[100], sd_term_msg[100];
660 const char *term_msg;
665 JCR *prev_jcr = jcr->previous_jcr;
666 POOL_MEM query(PM_MESSAGE);
668 Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode);
669 dequeue_messages(jcr); /* display any queued messages */
670 memset(&mr, 0, sizeof(mr));
671 set_jcr_job_status(jcr, TermCode);
672 update_job_end_record(jcr); /* update database */
675 * Check if we actually did something.
676 * prev_jcr is jcr of the newly migrated job.
679 prev_jcr->JobFiles = jcr->JobFiles = jcr->SDJobFiles;
680 prev_jcr->JobBytes = jcr->JobBytes = jcr->SDJobBytes;
681 prev_jcr->VolSessionId = jcr->VolSessionId;
682 prev_jcr->VolSessionTime = jcr->VolSessionTime;
683 prev_jcr->jr.RealEndTime = 0;
684 prev_jcr->jr.PriorJobId = jcr->previous_jr.JobId;
686 set_jcr_job_status(prev_jcr, TermCode);
689 update_job_end_record(prev_jcr);
691 /* Update final items to set them to the previous job's values */
692 Mmsg(query, "UPDATE Job SET StartTime='%s',EndTime='%s',"
693 "JobTDate=%s WHERE JobId=%s",
694 jcr->previous_jr.cStartTime, jcr->previous_jr.cEndTime,
695 edit_uint64(jcr->previous_jr.JobTDate, ec1),
696 edit_uint64(prev_jcr->jr.JobId, ec2));
697 db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL);
699 /* Now marke the previous job as migrated */
700 Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s",
701 (char)JT_MIGRATED_JOB, edit_uint64(jcr->previous_jr.JobId, ec1));
702 db_sql_query(prev_jcr->db, query.c_str(), NULL, NULL);
704 if (!db_get_job_record(jcr, jcr->db, &jcr->jr)) {
705 Jmsg(jcr, M_WARNING, 0, _("Error getting job record for stats: %s"),
706 db_strerror(jcr->db));
707 set_jcr_job_status(jcr, JS_ErrorTerminated);
710 bstrncpy(mr.VolumeName, jcr->VolumeName, sizeof(mr.VolumeName));
711 if (!db_get_media_record(jcr, jcr->db, &mr)) {
712 Jmsg(jcr, M_WARNING, 0, _("Error getting Media record for Volume \"%s\": ERR=%s"),
713 mr.VolumeName, db_strerror(jcr->db));
714 set_jcr_job_status(jcr, JS_ErrorTerminated);
717 update_bootstrap_file(prev_jcr);
719 if (!db_get_job_volume_names(prev_jcr, prev_jcr->db, prev_jcr->jr.JobId, &prev_jcr->VolumeName)) {
721 * Note, if the job has erred, most likely it did not write any
722 * tape, so suppress this "error" message since in that case
723 * it is normal. Or look at it the other way, only for a
724 * normal exit should we complain about this error.
726 if (jcr->JobStatus == JS_Terminated && jcr->jr.JobBytes) {
727 Jmsg(jcr, M_ERROR, 0, "%s", db_strerror(prev_jcr->db));
729 prev_jcr->VolumeName[0] = 0; /* none */
733 msg_type = M_INFO; /* by default INFO message */
734 switch (jcr->JobStatus) {
736 if (jcr->Errors || jcr->SDErrors) {
737 term_msg = _("%s OK -- with warnings");
739 term_msg = _("%s OK");
743 case JS_ErrorTerminated:
744 term_msg = _("*** %s Error ***");
745 msg_type = M_ERROR; /* Generate error message */
746 if (jcr->store_bsock) {
747 bnet_sig(jcr->store_bsock, BNET_TERMINATE);
748 if (jcr->SD_msg_chan) {
749 pthread_cancel(jcr->SD_msg_chan);
754 term_msg = _("%s Canceled");
755 if (jcr->store_bsock) {
756 bnet_sig(jcr->store_bsock, BNET_TERMINATE);
757 if (jcr->SD_msg_chan) {
758 pthread_cancel(jcr->SD_msg_chan);
763 term_msg = _("Inappropriate %s term code");
766 bsnprintf(term_code, sizeof(term_code), term_msg, "Migration");
767 bstrftimes(sdt, sizeof(sdt), jcr->jr.StartTime);
768 bstrftimes(edt, sizeof(edt), jcr->jr.EndTime);
769 RunTime = jcr->jr.EndTime - jcr->jr.StartTime;
773 kbps = (double)jcr->SDJobBytes / (1000 * RunTime);
777 jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
779 Jmsg(jcr, msg_type, 0, _("Bacula %s (%s): %s\n"
780 " Prev Backup JobId: %s\n"
781 " New Backup JobId: %s\n"
782 " Migration JobId: %s\n"
783 " Migration Job: %s\n"
784 " Backup Level: %s%s\n"
786 " FileSet: \"%s\" %s\n"
787 " Pool: \"%s\" (From %s)\n"
788 " Storage: \"%s\" (From %s)\n"
791 " Elapsed time: %s\n"
793 " SD Files Written: %s\n"
794 " SD Bytes Written: %s (%sB)\n"
796 " Volume name(s): %s\n"
797 " Volume Session Id: %d\n"
798 " Volume Session Time: %d\n"
799 " Last Volume Bytes: %s (%sB)\n"
801 " SD termination status: %s\n"
802 " Termination: %s\n\n"),
806 prev_jcr ? edit_uint64(jcr->previous_jr.JobId, ec6) : "0",
807 prev_jcr ? edit_uint64(prev_jcr->jr.JobId, ec7) : "0",
808 edit_uint64(jcr->jr.JobId, ec8),
810 level_to_str(jcr->JobLevel), jcr->since,
812 jcr->fileset->name(), jcr->FSCreateTime,
813 jcr->pool->name(), jcr->pool_source,
814 jcr->wstore->name(), jcr->storage_source,
817 edit_utime(RunTime, elapsed, sizeof(elapsed)),
819 edit_uint64_with_commas(jcr->SDJobFiles, ec1),
820 edit_uint64_with_commas(jcr->SDJobBytes, ec2),
821 edit_uint64_with_suffix(jcr->SDJobBytes, ec3),
823 prev_jcr ? prev_jcr->VolumeName : "",
826 edit_uint64_with_commas(mr.VolBytes, ec4),
827 edit_uint64_with_suffix(mr.VolBytes, ec5),
832 Dmsg1(100, "migrate_cleanup() previous_jcr=0x%x\n", jcr->previous_jcr);
833 if (jcr->previous_jcr) {
834 free_jcr(jcr->previous_jcr);
835 jcr->previous_jcr = NULL;
837 Dmsg0(100, "Leave migrate_cleanup()\n");