#define FT_REPARSE 21 /* Win NTFS reparse point */
#define FT_PLUGIN 22 /* Plugin generated filename */
#define FT_DELETED 23 /* Deleted file entry */
+#define FT_BASE 24 /* Duplicate base file entry */
/* Definitions for upper part of type word (see above). */
#define AR_DATA_STREAM (1<<16) /* Data stream id present */
char *attr; /* attributes statp */
uint32_t FileIndex;
uint32_t Stream;
+ uint32_t FileType;
JobId_t JobId;
DBId_t ClientId;
DBId_t PathId;
bool my_batch_start(JCR *jcr, B_DB *mdb);
bool my_batch_end(JCR *jcr, B_DB *mdb, const char *error);
bool my_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+bool db_create_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+bool db_create_base_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
+bool db_commit_base_file_attributes_record(JCR *jcr, B_DB *mdb);
+bool db_create_base_file_list(JCR *jcr, B_DB *mdb, char *jobids);
/* sql_delete.c */
int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr);
bool db_find_failed_job_since(JCR *jcr, B_DB *mdb, JOB_DBR *jr, POOLMEM *stime, int &JobLevel);
/* sql_get.c */
+bool db_get_base_file_list(JCR *jcr, B_DB *mdb,
+ DB_RESULT_HANDLER *result_handler,void *ctx);
int db_get_path_record(JCR *jcr, B_DB *mdb);
bool db_get_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pdbr);
int db_get_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr);
int db_get_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr);
bool db_get_query_dbids(JCR *jcr, B_DB *mdb, POOL_MEM &query, dbid_list &ids);
bool db_get_file_list(JCR *jcr, B_DB *mdb, char *jobids, DB_RESULT_HANDLER *result_handler, void *ctx);
+bool db_get_base_jobid(JCR *jcr, B_DB *mdb, JOB_DBR *jr, JobId_t *jobid);
bool db_accurate_get_jobids(JCR *jcr, B_DB *mdb, JOB_DBR *jr, db_list_ctx *jobids);
-
+bool db_get_used_base_jobids(JCR *jcr, B_DB *mdb, POOLMEM *jobids, db_list_ctx *result);
/* sql_list.c */
enum e_list_type {
HORZ_LIST,
int db_list_sql_query(JCR *jcr, B_DB *mdb, const char *query, DB_LIST_HANDLER *sendit, void *ctx, int verbose, e_list_type type);
void db_list_client_records(JCR *jcr, B_DB *mdb, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type);
void db_list_copies_records(JCR *jcr, B_DB *mdb, uint32_t limit, char *jobids, DB_LIST_HANDLER *sendit, void *ctx, e_list_type type);
+void
+db_list_base_files_for_job(JCR *jcr, B_DB *mdb, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx);
+
/* sql_update.c */
bool db_update_job_start_record(JCR *jcr, B_DB *db, JOB_DBR *jr);
if (jcr && jcr->cached_attribute) {
Dmsg0(400, "Flush last cached attribute.\n");
- if (!db_create_file_attributes_record(jcr, mdb, jcr->ar)) {
+ if (!db_create_attributes_record(jcr, mdb, jcr->ar)) {
Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
jcr->cached_attribute = false;
"FROM Client,Job WHERE Client.ClientId=Job.ClientId AND JobStatus IN ('T','W') "
"AND Type='B' ORDER BY StartTime DESC LIMIT 20";
+const char *uar_print_jobs =
+ "SELECT DISTINCT JobId,Level,JobFiles,JobBytes,StartTime,VolumeName"
+ " FROM Job JOIN JobMedia USING (JobId) JOIN Media USING (MediaId) "
+ " WHERE JobId IN (%s) "
+ " ORDER BY StartTime ASC";
+
/*
* Find all files for a particular JobId and insert them into
* the tree during a restore.
#include "bacula.h"
#include "cats.h"
+/* Get the list of the last recent version with a given jobid list
+ * This is a tricky part because with SQL the result of
+ *
+ * SELECT MAX(A), B, C, D FROM... GROUP BY (B,C)
+ *
+ * doesn't give the good result (for D).
+ *
+ * With PostgreSQL, we can use DISTINCT ON(), but with Mysql or Sqlite,
+ * we need an extra join using JobTDate.
+ */
+const char *select_recent_version_with_basejob[4] = {
+ /* MySQL */
+"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
+ "File.FilenameId AS FilenameId, LStat, MD5 "
+"FROM Job, File, ( "
+ "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM ( "
+ "SELECT JobTDate, PathId, FilenameId " /* Get all normal files */
+ "FROM File JOIN Job USING (JobId) " /* from selected backup */
+ "WHERE JobId IN (%s) "
+ "UNION ALL "
+ "SELECT JobTDate, PathId, FilenameId " /* Get all files from */
+ "FROM BaseFiles " /* BaseJob */
+ "JOIN File USING (FileId) "
+ "JOIN Job ON (BaseJobId = Job.JobId) "
+ "WHERE BaseFiles.JobId IN (%s) " /* Use Max(JobTDate) to find */
+ ") AS tmp GROUP BY PathId, FilenameId " /* the latest file version */
+ ") AS T1 "
+"WHERE (Job.JobId IN ( " /* Security, we force JobId to be valid */
+ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
+ "OR Job.JobId IN (%s)) "
+ "AND T1.JobTDate = Job.JobTDate " /* Join on JobTDate to get the orginal */
+ "AND Job.JobId = File.JobId " /* Job/File record */
+ "AND T1.PathId = File.PathId "
+ "AND T1.FilenameId = File.FilenameId",
+
+ /* Postgresql */ /* The DISTINCT ON () permits to avoid extra join */
+ "SELECT DISTINCT ON (FilenameId, PathId) StartTime, JobId, FileId, "
+ "FileIndex, PathId, FilenameId, LStat, MD5 "
+ "FROM "
+ "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 "
+ "FROM File WHERE JobId IN (%s) "
+ "UNION ALL "
+ "SELECT File.FileId, File.JobId, PathId, FilenameId, "
+ "File.FileIndex, LStat, MD5 "
+ "FROM BaseFiles JOIN File USING (FileId) "
+ "WHERE BaseFiles.JobId IN (%s) "
+ ") AS T JOIN Job USING (JobId) "
+ "ORDER BY FilenameId, PathId, StartTime DESC ",
+
+ /* SQLite */ /* See Mysql section for doc */
+"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
+ "File.FilenameId AS FilenameId, LStat, MD5 "
+"FROM Job, File, ( "
+ "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM ( "
+ "SELECT JobTDate, PathId, FilenameId "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "UNION ALL "
+ "SELECT JobTDate, PathId, FilenameId "
+ "FROM BaseFiles "
+ "JOIN File USING (FileId) "
+ "JOIN Job ON (BaseJobId = Job.JobId) "
+ "WHERE BaseFiles.JobId IN (%s) "
+ ") AS tmp GROUP BY PathId, FilenameId "
+ ") AS T1 "
+"WHERE (Job.JobId IN ( "
+ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
+ "OR Job.JobId IN (%s)) "
+ "AND T1.JobTDate = Job.JobTDate "
+ "AND Job.JobId = File.JobId "
+ "AND T1.PathId = File.PathId "
+ "AND T1.FilenameId = File.FilenameId",
+
+ /* SQLite3 */ /* See Mysql section for doc */
+"SELECT FileId, Job.JobId AS JobId, FileIndex, File.PathId AS PathId, "
+ "File.FilenameId AS FilenameId, LStat, MD5 "
+"FROM Job, File, ( "
+ "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM ( "
+ "SELECT JobTDate, PathId, FilenameId "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "UNION ALL "
+ "SELECT JobTDate, PathId, FilenameId "
+ "FROM BaseFiles "
+ "JOIN File USING (FileId) "
+ "JOIN Job ON (BaseJobId = Job.JobId) "
+ "WHERE BaseFiles.JobId IN (%s) "
+ ") AS tmp GROUP BY PathId, FilenameId "
+ ") AS T1 "
+"WHERE (Job.JobId IN ( "
+ "SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
+ "OR Job.JobId IN (%s)) "
+ "AND T1.JobTDate = Job.JobTDate "
+ "AND Job.JobId = File.JobId "
+ "AND T1.PathId = File.PathId "
+ "AND T1.FilenameId = File.FilenameId"
+};
+
+/* Get the list of the last recent version with a given BaseJob jobid list */
+const char *select_recent_version[4] = {
+ /* MySQL */
+ "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, "
+ "f1.PathId AS PathId, f1.FilenameId AS FilenameId, "
+ "f1.LStat AS LStat, f1.MD5 AS MD5 "
+ "FROM ( " /* Choose the last version for each Path/Filename */
+ "SELECT max(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "GROUP BY PathId, FilenameId "
+ ") AS t1, Job AS j1, File AS f1 "
+ "WHERE t1.JobTDate = j1.JobTDate "
+ "AND j1.JobId IN (%s) "
+ "AND t1.FilenameId = f1.FilenameId "
+ "AND t1.PathId = f1.PathId "
+ "AND j1.JobId = f1.JobId",
+
+ /* Postgresql */
+ "SELECT DISTINCT ON (FilenameId, PathId) StartTime, JobId, FileId, "
+ "FileIndex, PathId, FilenameId, LStat "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "ORDER BY FilenameId, PathId, StartTime DESC ",
+
+ /* SQLite */
+ "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, "
+ "f1.PathId AS PathId, f1.FilenameId AS FilenameId, "
+ "f1.LStat AS LStat, f1.MD5 AS MD5 "
+ "FROM ( "
+ "SELECT max(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "GROUP BY PathId, FilenameId "
+ ") AS t1, Job AS j1, File AS f1 "
+ "WHERE t1.JobTDate = j1.JobTDate "
+ "AND j1.JobId IN (%s) "
+ "AND t1.FilenameId = f1.FilenameId "
+ "AND t1.PathId = f1.PathId "
+ "AND j1.JobId = f1.JobId",
+
+ /* SQLite3 */
+ "SELECT j1.JobId AS JobId, f1.FileId AS FileId, f1.FileIndex AS FileIndex, "
+ "f1.PathId AS PathId, f1.FilenameId AS FilenameId, "
+ "f1.LStat AS LStat, f1.MD5 AS MD5 "
+ "FROM ( "
+ "SELECT max(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM File JOIN Job USING (JobId) "
+ "WHERE JobId IN (%s) "
+ "GROUP BY PathId, FilenameId "
+ ") AS t1, Job AS j1, File AS f1 "
+ "WHERE t1.JobTDate = j1.JobTDate "
+ "AND j1.JobId IN (%s) "
+ "AND t1.FilenameId = f1.FilenameId "
+ "AND t1.PathId = f1.PathId "
+ "AND j1.JobId = f1.JobId"
+};
+
/* ====== ua_prune.c */
/* List of SQL commands to create temp table and indicies */
extern const char CATS_IMP_EXP *cleanup_created_job;
extern const char CATS_IMP_EXP *cleanup_running_job;
extern const char CATS_IMP_EXP *uar_list_jobs;
+extern const char CATS_IMP_EXP *uar_print_jobs;
extern const char CATS_IMP_EXP *uar_count_files;
extern const char CATS_IMP_EXP *uar_sel_files;
extern const char CATS_IMP_EXP *uar_del_temp;
extern const char CATS_IMP_EXP *uar_jobid_fileindex_from_table;
extern const char CATS_IMP_EXP *uar_sel_jobid_temp;
+extern const char CATS_IMP_EXP *select_recent_version[4];
+extern const char CATS_IMP_EXP *select_recent_version_with_basejob[4];
extern const char CATS_IMP_EXP *create_deltabs[4];
extern const char CATS_IMP_EXP *uar_file[4];
#include "bacula.h"
#include "cats.h"
-static const int dbglevel = 500;
+static const int dbglevel = 10;
#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI
*/
bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
+ ASSERT(ar->FileType != FT_BASE);
+
Dmsg1(dbglevel, "Fname=%s\n", ar->fname);
Dmsg0(dbglevel, "put_file_into_catalog\n");
}
B_DB *bdb = jcr->db_batch;
- /*
- * Make sure we have an acceptable attributes record.
- */
- if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
- ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
- Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"),
- ar->Stream);
- Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg);
- return false;
- }
-
split_path_and_file(jcr, bdb, ar->fname);
db_lock(mdb);
Dmsg1(dbglevel, "Fname=%s\n", ar->fname);
Dmsg0(dbglevel, "put_file_into_catalog\n");
- /*
- * Make sure we have an acceptable attributes record.
- */
- if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
- ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
- Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"),
- ar->Stream);
- Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
- goto bail_out;
- }
-
split_path_and_file(jcr, mdb, ar->fname);
#endif /* ! HAVE_BATCH_FILE_INSERT */
+
+/* List of SQL commands to create temp table and indicies */
+const char *create_temp_basefile[4] = {
+ /* MySQL */
+ "CREATE TEMPORARY TABLE basefile%lld ("
+// "CREATE TABLE basefile%lld ("
+ "Path BLOB NOT NULL,"
+ "Name BLOB NOT NULL)",
+
+ /* Postgresql */
+ "CREATE TEMPORARY TABLE basefile%lld ("
+// "CREATE TABLE basefile%lld ("
+ "Path TEXT,"
+ "Name TEXT)",
+
+ /* SQLite */
+ "CREATE TEMPORARY TABLE basefile%lld ("
+ "Path TEXT,"
+ "Name TEXT)",
+
+ /* SQLite3 */
+ "CREATE TEMPORARY TABLE basefile%lld ("
+ "Path TEXT,"
+ "Name TEXT)"
+};
+
+/*
+ * Create file attributes record, or base file attributes record
+ */
+bool db_create_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+{
+ bool ret;
+
+ /*
+ * Make sure we have an acceptable attributes record.
+ */
+ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
+ ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
+ Jmsg(jcr, M_FATAL, 0, _("Attempt to put non-attributes into catalog. Stream=%d\n"));
+ return false;
+ }
+
+ if (ar->FileType != FT_BASE) {
+ ret = db_create_file_attributes_record(jcr, mdb, ar);
+
+ } else if (jcr->HasBase) {
+ ret = db_create_base_file_attributes_record(jcr, mdb, ar);
+
+ } else {
+ Jmsg0(jcr, M_FATAL, 0, _("Can't Copy/Migrate job using BaseJob"));
+ ret = true; /* in copy/migration what do we do ? */
+ }
+
+ return ret;
+}
+
+/*
+ * Create Base File record in B_DB
+ *
+ */
+bool db_create_base_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
+{
+ bool ret;
+ Dmsg1(dbglevel, "create_base_file Fname=%s\n", ar->fname);
+ Dmsg0(dbglevel, "put_base_file_into_catalog\n");
+
+ db_lock(mdb);
+ split_path_and_file(jcr, mdb, ar->fname);
+
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
+ db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+
+ mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
+ db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+
+ Mmsg(mdb->cmd, "INSERT INTO basefile%lld (Path, Name) VALUES ('%s','%s')",
+ (uint64_t)jcr->JobId, mdb->esc_path, mdb->esc_name);
+
+ ret = INSERT_DB(jcr, mdb, mdb->cmd);
+ db_unlock(mdb);
+
+ return ret;
+}
+
+/*
+ * Cleanup the base file temporary tables
+ */
+static void db_cleanup_base_file(JCR *jcr, B_DB *mdb)
+{
+ POOL_MEM buf(PM_MESSAGE);
+ Mmsg(buf, "DROP TABLE new_basefile%lld", (uint64_t) jcr->JobId);
+ db_sql_query(mdb, buf.c_str(), NULL, NULL);
+
+ Mmsg(buf, "DROP TABLE basefile%lld", (uint64_t) jcr->JobId);
+ db_sql_query(mdb, buf.c_str(), NULL, NULL);
+}
+
+/*
+ * Put all base file seen in the backup to the BaseFile table
+ * and cleanup temporary tables
+ */
+bool db_commit_base_file_attributes_record(JCR *jcr, B_DB *mdb)
+{
+ bool ret;
+ char ed1[50];
+
+ db_lock(mdb);
+
+ Mmsg(mdb->cmd,
+ "INSERT INTO BaseFiles (BaseJobId, JobId, FileId, FileIndex) "
+ "SELECT B.JobId AS BaseJobId, %s AS JobId, "
+ "B.FileId, B.FileIndex "
+ "FROM basefile%s AS A, new_basefile%s AS B "
+ "WHERE A.Path = B.Path "
+ "AND A.Name = B.Name "
+ "ORDER BY B.FileId",
+ edit_uint64(jcr->JobId, ed1), ed1, ed1);
+ ret = QUERY_DB(jcr, mdb, mdb->cmd);
+ jcr->nb_base_files_used = sql_affected_rows(mdb);
+ db_cleanup_base_file(jcr, mdb);
+
+ db_unlock(mdb);
+ return ret;
+}
+
+/*
+ * Find the last "accurate" backup state with Base jobs
+ * 1) Get all files with jobid in list (F subquery)
+ * 2) Take only the last version of each file (Temp subquery) => accurate list is ok
+ * 3) Put the result in a temporary table for the end of job
+ *
+ */
+bool db_create_base_file_list(JCR *jcr, B_DB *mdb, char *jobids)
+{
+ POOL_MEM buf;
+ bool ret=false;
+
+ db_lock(mdb);
+
+ if (!*jobids) {
+ Mmsg(mdb->errmsg, _("ERR=JobIds are empty\n"));
+ goto bail_out;
+ }
+
+ Mmsg(mdb->cmd, create_temp_basefile[db_type], (uint64_t) jcr->JobId);
+ if (!db_sql_query(mdb, mdb->cmd, NULL, NULL)) {
+ goto bail_out;
+ }
+ Mmsg(buf, select_recent_version[db_type], jobids, jobids);
+ Mmsg(mdb->cmd,
+"CREATE TEMPORARY TABLE new_basefile%lld AS "
+//"CREATE TABLE new_basefile%lld AS "
+ "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex,"
+ "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, "
+ "Temp.MD5 AS MD5 "
+ "FROM ( %s ) AS Temp "
+ "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
+ "JOIN Path ON (Path.PathId = Temp.PathId) "
+ "WHERE Temp.FileIndex > 0",
+ (uint64_t)jcr->JobId, buf.c_str());
+
+ ret = db_sql_query(mdb, mdb->cmd, NULL, NULL);
+bail_out:
+ db_unlock(mdb);
+ return ret;
+}
+
#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */
Mmsg(mdb->cmd, "SELECT VolSessionId,VolSessionTime,"
"PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus,"
"Type,Level,ClientId,Name,PriorJobId,RealEndTime,JobId,FileSetId,"
-"SchedTime,RealEndTime,ReadBytes "
+"SchedTime,RealEndTime,ReadBytes,HasBase "
"FROM Job WHERE Job='%s'", jr->Job);
} else {
Mmsg(mdb->cmd, "SELECT VolSessionId,VolSessionTime,"
"PoolId,StartTime,EndTime,JobFiles,JobBytes,JobTDate,Job,JobStatus,"
"Type,Level,ClientId,Name,PriorJobId,RealEndTime,JobId,FileSetId,"
-"SchedTime,RealEndTime,ReadBytes "
+"SchedTime,RealEndTime,ReadBytes,HasBase "
"FROM Job WHERE JobId=%s",
edit_int64(jr->JobId, ed1));
}
jr->SchedTime = str_to_utime(jr->cSchedTime);
jr->EndTime = str_to_utime(jr->cEndTime);
jr->RealEndTime = str_to_utime(jr->cRealEndTime);
+ jr->HasBase = str_to_int64(row[21]);
sql_free_result(mdb);
db_unlock(mdb);
}
/*
- * Find the last "accurate" backup state (that can take deleted files in account)
- * 1) Get all files with jobid in list (F subquery)
- * 2) Take only the last version of each file (Temp subquery) => accurate list is ok
+ * Find the last "accurate" backup state (that can take deleted files in
+ * account)
+ * 1) Get all files with jobid in list (F subquery)
+ * Get all files in BaseFiles with jobid in list
+ * 2) Take only the last version of each file (Temp subquery) => accurate list
+ * is ok
* 3) Join the result to file table to get fileindex, jobid and lstat information
*
* TODO: See if we can do the SORT only if needed (as an argument)
#define new_db_get_file_list
#ifdef new_db_get_file_list
- /* This is broken, at least if called from ua_restore.c */
+ POOL_MEM buf2(PM_MESSAGE);
+ Mmsg(buf2, select_recent_version_with_basejob[db_type],
+ jobids, jobids, jobids, jobids);
Mmsg(buf,
- "SELECT Path.Path, Filename.Name, File.FileIndex, File.JobId, File.LStat "
- "FROM ( "
- "SELECT max(FileId) as FileId, PathId, FilenameId "
- "FROM (SELECT FileId, PathId, FilenameId FROM File WHERE JobId IN (%s)) AS F "
- "GROUP BY PathId, FilenameId "
- ") AS Temp "
+"SELECT Path.Path, Filename.Name, Temp.FileIndex, Temp.JobId, LStat, MD5 "
+ "FROM ( %s ) AS Temp "
"JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) "
"JOIN Path ON (Path.PathId = Temp.PathId) "
- "JOIN File ON (File.FileId = Temp.FileId) "
-"WHERE File.FileIndex > 0 ORDER BY JobId, FileIndex ASC", /* Return sorted by JobId, */
- /* FileIndex for restore code */
- jobids);
+"WHERE FileIndex > 0 "
+"ORDER BY Temp.JobId, FileIndex ASC",/* Return sorted by JobId, */
+ /* FileIndex for restore code */
+ buf2.c_str());
#else
/*
- * I am not sure that this works the same as the code in ua_restore.c
- * but it is very similar. The accurate-test fails in a restore. Bad file count.
+ * I am not sure that this works the same as the code in ua_restore.c but it
+ * is very similar. The accurate-test fails in a restore. Bad file count.
*/
Mmsg(buf, uar_sel_files, jobids);
#endif
return db_sql_query(mdb, buf.c_str(), result_handler, ctx);
}
+/*
+ * This procedure gets the base jobid list used by jobids,
+ */
+bool db_get_used_base_jobids(JCR *jcr, B_DB *mdb,
+ POOLMEM *jobids, db_list_ctx *result)
+{
+ POOL_MEM buf;
+ Mmsg(buf,
+ "SELECT DISTINCT BaseJobId "
+ " FROM Job JOIN BaseFiles USING (JobId) "
+ " WHERE Job.HasBase = 1 "
+ " AND JobId IN (%s) ", jobids);
+ return db_sql_query(mdb, buf.c_str(), db_list_handler, result);
+}
+
/* The decision do change an incr/diff was done before
* Full : do nothing
* Differential : get the last full id
return ret;
}
+bool db_get_base_file_list(JCR *jcr, B_DB *mdb,
+ DB_RESULT_HANDLER *result_handler, void *ctx)
+{
+ POOL_MEM buf(PM_MESSAGE);
+
+ Mmsg(buf,
+ "SELECT Path, Name, FileIndex, JobId, LStat, MD5 "
+ "FROM new_basefile%lld ORDER BY JobId, FileIndex ASC",
+ (uint64_t) jcr->JobId);
+
+ return db_sql_query(mdb, buf.c_str(), result_handler, ctx);
+}
+
+bool db_get_base_jobid(JCR *jcr, B_DB *mdb, JOB_DBR *jr, JobId_t *jobid)
+{
+ char date[MAX_TIME_LENGTH];
+ int64_t id = *jobid = 0;
+ POOL_MEM query(PM_FNAME);
+
+// char clientid[50], filesetid[50];
+
+ utime_t StartTime = (jr->StartTime)?jr->StartTime:time(NULL);
+ bstrutime(date, sizeof(date), StartTime + 1);
+
+ /* we can take also client name, fileset, etc... */
+
+ Mmsg(query,
+ "SELECT JobId, Job, StartTime, EndTime, JobTDate, PurgedFiles "
+ "FROM Job "
+// "JOIN FileSet USING (FileSetId) JOIN Client USING (ClientId) "
+ "WHERE Job.Name = '%s' "
+ "AND Level='B' AND JobStatus IN ('T','W') AND Type='B' "
+// "AND FileSet.FileSet= '%s' "
+// "AND Client.Name = '%s' "
+ "AND StartTime<'%s' "
+ "ORDER BY Job.JobTDate DESC LIMIT 1",
+ jr->Name,
+// edit_uint64(jr->ClientId, clientid),
+// edit_uint64(jr->FileSetId, filesetid));
+ date);
+
+ Dmsg1(10, "db_get_base_jobid q=%s\n", query.c_str());
+ if (!db_sql_query(mdb, query.c_str(), db_int64_handler, &id)) {
+ goto bail_out;
+ }
+ *jobid = (JobId_t) id;
+
+ Dmsg1(10, "db_get_base_jobid=%lld\n", id);
+ return true;
+
+bail_out:
+ return false;
+}
+
+/*
+ * Use to build a string of int list from a query. "10,20,30"
+ */
+int db_get_int_handler(void *ctx, int num_fields, char **row)
+{
+ POOLMEM *ret = (POOLMEM *)ctx;
+ if (num_fields == 1) {
+ if (ret[0]) {
+ pm_strcat(ret, ",");
+ }
+ pm_strcat(ret, row[0]);
+ }
+ return 0;
+}
+
#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */
* Stupid MySQL is NON-STANDARD !
*/
if (db_type == SQL_TYPE_MYSQL) {
- Mmsg(mdb->cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename FROM File,"
- "Filename,Path WHERE File.JobId=%s AND Filename.FilenameId=File.FilenameId "
- "AND Path.PathId=File.PathId",
- edit_int64(jobid, ed1));
+ Mmsg(mdb->cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
+ "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s "
+ "UNION ALL "
+ "SELECT PathId, FilenameId "
+ "FROM BaseFiles JOIN File "
+ "ON (BaseFiles.FileId = File.FileId) "
+ "WHERE BaseFiles.JobId = %s"
+ ") AS F, Filename,Path "
+ "WHERE Filename.FilenameId=F.FilenameId "
+ "AND Path.PathId=F.PathId",
+ edit_int64(jobid, ed1), ed1);
} else {
- Mmsg(mdb->cmd, "SELECT Path.Path||Filename.Name AS Filename FROM File,"
- "Filename,Path WHERE File.JobId=%s AND Filename.FilenameId=File.FilenameId "
- "AND Path.PathId=File.PathId",
+ Mmsg(mdb->cmd, "SELECT Path.Path||Filename.Name AS Filename "
+ "FROM (SELECT PathId, FilenameId FROM File WHERE JobId=%s "
+ "UNION ALL "
+ "SELECT PathId, FilenameId "
+ "FROM BaseFiles JOIN File "
+ "ON (BaseFiles.FileId = File.FileId) "
+ "WHERE BaseFiles.JobId = %s"
+ ") AS F, Filename,Path "
+ "WHERE Filename.FilenameId=F.FilenameId "
+ "AND Path.PathId=F.PathId",
+ edit_int64(jobid, ed1), ed1);
+ }
+
+ if (!QUERY_DB(jcr, mdb, mdb->cmd)) {
+ db_unlock(mdb);
+ return;
+ }
+
+ list_result(jcr, mdb, sendit, ctx, HORZ_LIST);
+
+ sql_free_result(mdb);
+ db_unlock(mdb);
+}
+
+void
+db_list_base_files_for_job(JCR *jcr, B_DB *mdb, JobId_t jobid, DB_LIST_HANDLER *sendit, void *ctx)
+{
+ char ed1[50];
+ db_lock(mdb);
+
+ /*
+ * Stupid MySQL is NON-STANDARD !
+ */
+ if (db_type == SQL_TYPE_MYSQL) {
+ Mmsg(mdb->cmd, "SELECT CONCAT(Path.Path,Filename.Name) AS Filename "
+ "FROM BaseFiles, File, Filename, Path "
+ "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId "
+ "AND BaseFiles.FileId = File.FileId "
+ "AND Filename.FilenameId=File.FilenameId "
+ "AND Path.PathId=File.PathId",
edit_int64(jobid, ed1));
+ } else {
+ Mmsg(mdb->cmd, "SELECT Path.Path||Filename.Name AS Filename "
+ "FROM BaseFiles, File, Filename, Path "
+ "WHERE BaseFiles.JobId=%s AND BaseFiles.BaseJobId = File.JobId "
+ "AND BaseFiles.FileId = File.FileId "
+ "AND Filename.FilenameId=File.FilenameId "
+ "AND Path.PathId=File.PathId",
+ edit_int64(jobid, ed1));
}
if (!QUERY_DB(jcr, mdb, mdb->cmd)) {
"UPDATE Job SET JobStatus='%c',EndTime='%s',"
"ClientId=%u,JobBytes=%s,ReadBytes=%s,JobFiles=%u,JobErrors=%u,VolSessionId=%u,"
"VolSessionTime=%u,PoolId=%u,FileSetId=%u,JobTDate=%s,"
-"RealEndTime='%s',PriorJobId=%s WHERE JobId=%s",
+"RealEndTime='%s',PriorJobId=%s,HasBase=%u WHERE JobId=%s",
(char)(jr->JobStatus), dt, jr->ClientId, edit_uint64(jr->JobBytes, ed1),
edit_uint64(jr->ReadBytes, ed4),
jr->JobFiles, jr->JobErrors, jr->VolSessionId, jr->VolSessionTime,
jr->PoolId, jr->FileSetId, edit_uint64(JobTDate, ed2),
rdt,
PriorJobId,
+ jr->HasBase,
edit_int64(jr->JobId, ed3));
stat = UPDATE_DB(jcr, mdb, mdb->cmd);
return true;
}
+/* Take all base jobs from job resource and find the
+ * last L_BASE jobid.
+ */
+static bool get_base_jobids(JCR *jcr, db_list_ctx *jobids)
+{
+ JOB_DBR jr;
+ JOB *job;
+ JobId_t id;
+ char str_jobid[50];
+
+ if (!jcr->job->base) {
+ return false; /* no base job, stop accurate */
+ }
+
+ memset(&jr, 0, sizeof(JOB_DBR));
+ jr.StartTime = jcr->jr.StartTime;
+
+ foreach_alist(job, jcr->job->base) {
+ bstrncpy(jr.Name, job->name(), sizeof(jr.Name));
+ db_get_base_jobid(jcr, jcr->db, &jr, &id);
+
+ if (id) {
+ if (jobids->count) {
+ pm_strcat(jobids->list, ",");
+ }
+ pm_strcat(jobids->list, edit_uint64(id, str_jobid));
+ jobids->count++;
+ }
+ }
+
+ return jobids->count > 0;
+}
+
/*
- * Foreach files in currrent list, send "/path/fname\0LStat" to FD
+ * Foreach files in currrent list, send "/path/fname\0LStat\0MD5" to FD
*/
static int accurate_list_handler(void *ctx, int num_fields, char **row)
{
return 1;
}
- if (row[2] > 0) { /* discard when file_index == 0 */
- jcr->file_bsock->fsend("%s%s%c%s", row[0], row[1], 0, row[4]);
+ if (row[2] == 0) { /* discard when file_index == 0 */
+ return 0;
+ }
+
+ /* sending with checksum */
+ if (jcr->use_accurate_chksum
+ && num_fields == 6
+ && row[5][0] /* skip checksum = '0' */
+ && row[5][1])
+ {
+ jcr->file_bsock->fsend("%s%s%c%s%c%s",
+ row[0], row[1], 0, row[4], 0, row[5]);
+ } else {
+ jcr->file_bsock->fsend("%s%s%c%s",
+ row[0], row[1], 0, row[4]);
}
return 0;
}
+/* In this procedure, we check if the current fileset is using checksum
+ * FileSet-> Include-> Options-> Accurate/Verify/BaseJob=checksum
+ */
+static bool is_checksum_needed_by_fileset(JCR *jcr)
+{
+ FILESET *f;
+ INCEXE *inc;
+ FOPTS *fopts;
+ bool in_block=false;
+ if (!jcr->job || !jcr->job->fileset) {
+ return false;
+ }
+
+ f = jcr->job->fileset;
+
+ for (int i=0; i < f->num_includes; i++) { /* Parse all Include {} */
+ inc = f->include_items[i];
+
+ for (int j=0; j < inc->num_opts; j++) { /* Parse all Options {} */
+ fopts = inc->opts_list[j];
+
+ for (char *k=fopts->opts; *k ; k++) { /* Try to find one request */
+ switch (*k) {
+ case 'V': /* verify */
+ in_block = (jcr->get_JobType() == JT_VERIFY); /* not used now */
+ break;
+ case 'J': /* Basejob keyword */
+ in_block = (jcr->get_JobLevel() == L_FULL);
+ break;
+ case 'C': /* Accurate keyword */
+ in_block = (jcr->get_JobLevel() != L_FULL);
+ break;
+ case ':': /* End of keyword */
+ in_block = false;
+ break;
+ case '5': /* MD5 */
+ case '1': /* SHA1 */
+ if (in_block) {
+ Dmsg0(50, "Checksum will be sent to FD\n");
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ return false;
+}
+
/*
* Send current file list to FD
* DIR -> FD : accurate files=xxxx
- * DIR -> FD : /path/to/file\0Lstat
- * DIR -> FD : /path/to/dir/\0Lstat
+ * DIR -> FD : /path/to/file\0Lstat\0MD5
+ * DIR -> FD : /path/to/dir/\0Lstat\0MD5
* ...
* DIR -> FD : EOD
*/
bool send_accurate_current_files(JCR *jcr)
{
POOL_MEM buf;
+ bool ret=true;
db_list_ctx jobids;
db_list_ctx nb;
- if (!jcr->accurate || job_canceled(jcr) || jcr->get_JobLevel()==L_FULL) {
+ if (!jcr->accurate || job_canceled(jcr)) {
+ return true;
+ }
+ /* In base level, no previous job is used */
+ if (jcr->get_JobLevel() == L_BASE) {
return true;
}
- db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, &jobids);
+
+ /* Don't send and store the checksum if fileset doesn't require it */
+ jcr->use_accurate_chksum = is_checksum_needed_by_fileset(jcr);
- if (jobids.count == 0) {
- Jmsg(jcr, M_FATAL, 0, _("Cannot find previous jobids.\n"));
- return false;
+ if (jcr->get_JobLevel() == L_FULL) {
+ /* On Full mode, if no previous base job, no accurate things */
+ if (!get_base_jobids(jcr, &jobids)) {
+ goto bail_out;
+ }
+ jcr->HasBase = true;
+ Jmsg(jcr, M_INFO, 0, _("Using BaseJobId(s): %s\n"), jobids.list);
+
+ } else {
+ /* For Incr/Diff level, we search for older jobs */
+ db_accurate_get_jobids(jcr, jcr->db, &jcr->jr, &jobids);
+
+ /* We are in Incr/Diff, but no Full to build the accurate list... */
+ if (jobids.count == 0) {
+ ret=false;
+ Jmsg(jcr, M_FATAL, 0, _("Cannot find previous jobids.\n"));
+ goto bail_out;
+ }
}
+
if (jcr->JobId) { /* display the message only for real jobs */
Jmsg(jcr, M_INFO, 0, _("Sending Accurate information.\n"));
}
+
/* to be able to allocate the right size for htable */
Mmsg(buf, "SELECT sum(JobFiles) FROM Job WHERE JobId IN (%s)", jobids.list);
db_sql_query(jcr->db, buf.c_str(), db_list_handler, &nb);
Jmsg0(jcr, M_FATAL, 0, "Can't get batch sql connexion");
return false;
}
+
+ if (jcr->HasBase) {
+ jcr->nb_base_files = str_to_int64(nb.list);
+ db_create_base_file_list(jcr, jcr->db, jobids.list);
+ db_get_base_file_list(jcr, jcr->db,
+ accurate_list_handler, (void *)jcr);
- db_get_file_list(jcr, jcr->db_batch, jobids.list, accurate_list_handler, (void *)jcr);
+ } else {
+ db_get_file_list(jcr, jcr->db_batch, jobids.list,
+ accurate_list_handler, (void *)jcr);
+ }
/* TODO: close the batch connexion ? (can be used very soon) */
jcr->file_bsock->signal(BNET_EOD);
- return true;
+bail_out:
+ return ret;
}
/*
/* Pickup Job termination data */
stat = wait_for_job_termination(jcr);
db_write_batch_file_records(jcr); /* used by bulk batch file insert */
+
+ if (jcr->HasBase &&
+ !db_commit_base_file_attributes_record(jcr, jcr->db))
+ {
+ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db));
+ }
+
if (stat == JS_Terminated) {
backup_cleanup(jcr, stat);
return true;
jobstatus_to_ascii(jcr->FDJobStatus, fd_term_msg, sizeof(fd_term_msg));
jobstatus_to_ascii(jcr->SDJobStatus, sd_term_msg, sizeof(sd_term_msg));
+ if (jcr->HasBase) {
+ Dmsg3(0, "Base files/Used files %lld/%lld=%.2f%%\n", jcr->nb_base_files,
+ jcr->nb_base_files_used,
+ jcr->nb_base_files_used*100.0/jcr->nb_base_files);
+ }
// bmicrosleep(15, 0); /* for debugging SIGHUP */
Jmsg(jcr, msg_type, 0, _("%s %s %s (%s): %s\n"
uint32_t FileIndex;
uint32_t data_len;
char *p;
- int filetype;
int len;
char *fname, *attr;
ATTR_DBR *ar = NULL;
if (Stream == STREAM_UNIX_ATTRIBUTES || Stream == STREAM_UNIX_ATTRIBUTES_EX) {
if (jcr->cached_attribute) {
Dmsg2(400, "Cached attr. Stream=%d fname=%s\n", ar->Stream, ar->fname);
- if (!db_create_file_attributes_record(jcr, jcr->db, ar)) {
+ if (!db_create_attributes_record(jcr, jcr->db, ar)) {
Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
}
}
p = jcr->attr - msg + p; /* point p into jcr->attr */
skip_nonspaces(&p); /* skip FileIndex */
skip_spaces(&p);
- filetype = str_to_int32(p); /* TODO: choose between unserialize and str_to_int32 */
+ ar->FileType = str_to_int32(p); /* TODO: choose between unserialize and str_to_int32 */
skip_nonspaces(&p); /* skip FileType */
skip_spaces(&p);
fname = p;
Dmsg1(400, "dird<stored: attr=%s\n", attr);
ar->attr = attr;
ar->fname = fname;
- if (filetype == FT_DELETED) {
+ if (ar->FileType == FT_DELETED) {
ar->FileIndex = 0; /* special value */
} else {
ar->FileIndex = FileIndex;
}
bin_to_base64(digestbuf, sizeof(digestbuf), fname, len, true);
- Dmsg3(400, "DigestLen=%d Digest=%s type=%d\n", strlen(digestbuf), digestbuf, Stream);
+ Dmsg3(400, "DigestLen=%d Digest=%s type=%d\n", strlen(digestbuf),
+ digestbuf, Stream);
if (jcr->cached_attribute) {
ar->Digest = digestbuf;
ar->DigestType = type;
- Dmsg2(400, "Cached attr with digest. Stream=%d fname=%s\n", ar->Stream, ar->fname);
- if (!db_create_file_attributes_record(jcr, jcr->db, ar)) {
- Jmsg1(jcr, M_FATAL, 0, _("Attribute create error. %s"), db_strerror(jcr->db));
+ Dmsg2(400, "Cached attr with digest. Stream=%d fname=%s\n",
+ ar->Stream, ar->fname);
+
+ /* Update BaseFile table */
+ if (!db_create_attributes_record(jcr, jcr->db, ar)) {
+ Jmsg1(jcr, M_FATAL, 0, _("attribute create error. %s"),
+ db_strerror(jcr->db));
}
jcr->cached_attribute = false;
} else {
if (mode == UPDATE_AND_FIX) {
db_sql_query(db, cleanup_created_job, NULL, NULL);
db_sql_query(db, cleanup_running_job, NULL, NULL);
+ db_sql_query(db, "CREATE INDEX basefiles_jobid_idx ON BaseFiles ( JobId )" , NULL, NULL);
}
db_close_database(NULL, db);
{"cancelqueuedduplicates", store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false},
{"cancelrunningduplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false},
{"pluginoptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0},
+ {"base", store_alist_res, ITEM(res_job.base), R_JOB, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
dump_resource(-R_STORAGE, (RES *)store, sendit, sock);
}
}
+ if (res->res_job.base) {
+ JOB *job;
+ foreach_alist(job, res->res_job.base) {
+ sendit(sock, _(" --> Base %s\n"), job->name());
+ }
+ }
if (res->res_job.RunScripts) {
RUNSCRIPT *script;
foreach_alist(script, res->res_job.RunScripts) {
if (res->res_job.storage) {
delete res->res_job.storage;
}
+ if (res->res_job.base) {
+ delete res->res_job.base;
+ }
if (res->res_job.RunScripts) {
free_runscripts(res->res_job.RunScripts);
delete res->res_job.RunScripts;
res->res_job.client = res_all.res_job.client;
res->res_job.fileset = res_all.res_job.fileset;
res->res_job.storage = res_all.res_job.storage;
+ res->res_job.base = res_all.res_job.base;
res->res_job.pool = res_all.res_job.pool;
res->res_job.full_pool = res_all.res_job.full_pool;
res->res_job.inc_pool = res_all.res_job.inc_pool;
bool AllowHigherDuplicates; /* Permit Higher Level */
bool CancelQueuedDuplicates; /* Cancel queued jobs */
bool CancelRunningDuplicates; /* Cancel Running jobs */
-
+ alist *base; /* Base jobs */
/* Methods */
char *name() const;
static RES_ITEM options_items[] = {
{"compression", store_opts, {0}, 0, 0, 0},
{"signature", store_opts, {0}, 0, 0, 0},
+ {"basejob", store_opts, {0}, 0, 0, 0},
{"accurate", store_opts, {0}, 0, 0, 0},
{"verify", store_opts, {0}, 0, 0, 0},
{"onefs", store_opts, {0}, 0, 0, 0},
INC_KW_DIGEST,
INC_KW_ENCRYPTION,
INC_KW_VERIFY,
+ INC_KW_BASEJOB,
INC_KW_ACCURATE,
INC_KW_ONEFS,
INC_KW_RECURSE,
{"signature", INC_KW_DIGEST},
{"encryption", INC_KW_ENCRYPTION},
{"verify", INC_KW_VERIFY},
+ {"basejob", INC_KW_BASEJOB},
{"accurate", INC_KW_ACCURATE},
{"onefs", INC_KW_ONEFS},
{"recurse", INC_KW_RECURSE},
bstrncat(opts, lc->str, optlen);
bstrncat(opts, ":", optlen); /* terminate it */
Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen);
+ } else if (keyword == INC_KW_BASEJOB) { /* special case */
+ /* ***FIXME**** ensure these are in permitted set */
+ bstrncat(opts, "J", optlen); /* indicate BaseJob */
+ bstrncat(opts, lc->str, optlen);
+ bstrncat(opts, ":", optlen); /* terminate it */
+ Dmsg3(900, "Catopts=%s option=%s optlen=%d\n", opts, option,optlen);
} else if (keyword == INC_KW_STRIPPATH) { /* another special case */
if (!is_an_integer(lc->str)) {
scan_err1(lc, _("Expected a strip path positive integer, got:%s:"), lc->str);
jcr->jr.VolSessionId = jcr->VolSessionId;
jcr->jr.VolSessionTime = jcr->VolSessionTime;
jcr->jr.JobErrors = jcr->JobErrors;
+ jcr->jr.HasBase = jcr->HasBase;
if (!db_update_job_end_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error updating job record. %s"),
db_strerror(jcr->db));
char RestoreClientName[MAX_NAME_LENGTH]; /* restore client */
char last_jobid[20];
POOLMEM *JobIds; /* User entered string of JobIds */
+ POOLMEM *BaseJobIds; /* Base jobids */
STORE *store;
JOB *restore_job;
POOL *pool;
jr.JobId = 0;
db_list_job_records(ua->jcr, ua->db, &jr, prtit, ua, llist);
+ /* List Base files */
+ } else if (strcasecmp(ua->argk[i], NT_("basefiles")) == 0) {
+ /* TODO: cleanup this block */
+ for (j=i+1; j<ua->argc; j++) {
+ if (strcasecmp(ua->argk[j], NT_("ujobid")) == 0 && ua->argv[j]) {
+ bstrncpy(jr.Job, ua->argv[j], MAX_NAME_LENGTH);
+ jr.JobId = 0;
+ db_get_job_record(ua->jcr, ua->db, &jr);
+ jobid = jr.JobId;
+ } else if (strcasecmp(ua->argk[j], NT_("jobid")) == 0 && ua->argv[j]) {
+ jobid = str_to_int64(ua->argv[j]);
+ } else {
+ continue;
+ }
+ if (jobid > 0) {
+ db_list_base_files_for_job(ua->jcr, ua->db, jobid, prtit, ua);
+ }
+ }
+
/* List FILES */
} else if (strcasecmp(ua->argk[i], NT_("files")) == 0) {
db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
Dmsg1(050, "Delete File sql=%s\n", query.c_str());
+ Mmsg(query, "DELETE FROM BaseFiles WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+ Dmsg1(050, "Delete BaseFiles sql=%s\n", query.c_str());
+
/*
* Now mark Job as having files purged. This is necessary to
* avoid having too many Jobs to process in future prunings. If
static int get_date(UAContext *ua, char *date, int date_len);
static int restore_count_handler(void *ctx, int num_fields, char **row);
static bool insert_table_into_findex_list(UAContext *ua, RESTORE_CTX *rx, char *table);
+static void get_and_display_basejobs(UAContext *ua, RESTORE_CTX *rx);
/*
* Restore files
rx.path = get_pool_memory(PM_FNAME);
rx.fname = get_pool_memory(PM_FNAME);
rx.JobIds = get_pool_memory(PM_FNAME);
+ rx.BaseJobIds = get_pool_memory(PM_FNAME);
rx.query = get_pool_memory(PM_FNAME);
rx.bsr = new_bsr();
case 0: /* error */
goto bail_out;
case 1: /* selected by jobid */
+ get_and_display_basejobs(ua, &rx);
if (!build_directory_tree(ua, &rx)) {
ua->send_msg(_("Restore not done.\n"));
goto bail_out;
}
+/*
+ * Fill the rx->BaseJobIds and display the list
+ */
+static void get_and_display_basejobs(UAContext *ua, RESTORE_CTX *rx)
+{
+ db_list_ctx jobids;
+
+ if (!db_get_used_base_jobids(ua->jcr, ua->db, rx->JobIds, &jobids)) {
+ ua->warning_msg("%s", db_strerror(ua->db));
+ }
+
+ if (jobids.count) {
+ POOL_MEM q;
+ Mmsg(q, uar_print_jobs, jobids.list);
+ ua->send_msg(_("The restore will use the following job(s) as Base\n"));
+ db_list_sql_query(ua->jcr, ua->db, q.c_str(), prtit, ua, 1, HORZ_LIST);
+ }
+ pm_strcpy(rx->BaseJobIds, jobids.list);
+}
+
static void free_rx(RESTORE_CTX *rx)
{
free_bsr(rx->bsr);
rx->bsr = NULL;
- if (rx->JobIds) {
- free_pool_memory(rx->JobIds);
- rx->JobIds = NULL;
- }
- if (rx->fname) {
- free_pool_memory(rx->fname);
- rx->fname = NULL;
- }
- if (rx->path) {
- free_pool_memory(rx->path);
- rx->path = NULL;
- }
- if (rx->query) {
- free_pool_memory(rx->query);
- rx->query = NULL;
- }
+ free_and_null_pool_memory(rx->JobIds);
+ free_and_null_pool_memory(rx->BaseJobIds);
+ free_and_null_pool_memory(rx->fname);
+ free_and_null_pool_memory(rx->path);
+ free_and_null_pool_memory(rx->query);
free_name_list(&rx->name_list);
}
NULL
};
- *rx->JobIds = 0;
+ rx->JobIds[0] = 0;
for (i=1; i<ua->argc; i++) { /* loop through arguments */
bool found_kw = false;
ua->warning_msg(_("No Jobs selected.\n"));
return 0;
}
+
if (strchr(rx->JobIds,',')) {
ua->info_msg(_("You have selected the following JobIds: %s\n"), rx->JobIds);
} else {
if (!db_get_file_list(ua->jcr, ua->db, rx->JobIds, insert_tree_handler, (void *)&tree)) {
ua->error_msg("%s", db_strerror(ua->db));
}
+ if (*rx->BaseJobIds) {
+ pm_strcat(rx->JobIds, ",");
+ pm_strcat(rx->JobIds, rx->BaseJobIds);
+ }
#else
for (p=rx->JobIds; get_next_jobid_from_list(&p, &JobId) > 0; ) {
char ed1[50];
for (TREE_NODE *node=first_tree_node(tree.root); node; node=next_tree_node(node)) {
Dmsg2(400, "FI=%d node=0x%x\n", node->FileIndex, node);
if (node->extract || node->extract_dir) {
- Dmsg2(400, "type=%d FI=%d\n", node->type, node->FileIndex);
+ Dmsg3(400, "JobId=%lld type=%d FI=%d\n", (uint64_t)node->JobId, node->type, node->FileIndex);
add_findex(rx->bsr, node->JobId, node->FileIndex);
if (node->extract && node->type != TN_NEWDIR) {
rx->selected_files++; /* count only saved files */
}
/* Get the JobIds from that list */
- rx->JobIds[0] = 0;
- rx->last_jobid[0] = 0;
+ rx->last_jobid[0] = rx->JobIds[0] = 0;
+
if (!db_sql_query(ua->db, uar_sel_jobid_temp, jobid_handler, (void *)rx)) {
ua->warning_msg("%s\n", db_strerror(ua->db));
}
prtit, ua, HORZ_LIST);
}
/* Display a list of Jobs selected for this restore */
- db_list_sql_query(ua->jcr, ua->db, uar_list_temp, prtit, ua, 1, HORZ_LIST);
+ db_list_sql_query(ua->jcr, ua->db, uar_list_temp, prtit, ua, 1,HORZ_LIST);
ok = true;
+
} else {
ua->warning_msg(_("No jobs found.\n"));
}
for (int i=0; i < name_list->num_ids; i++) {
free(name_list->name[i]);
}
- if (name_list->name) {
- free(name_list->name);
- name_list->name = NULL;
- }
+ bfree_and_null(name_list->name);
name_list->max_ids = 0;
name_list->num_ids = 0;
}
node = insert_tree_node(row[0], row[1], type, tree->root, NULL);
JobId = str_to_int64(row[3]);
FileIndex = str_to_int64(row[2]);
+ Dmsg2(400, "JobId=%s FileIndex=%s\n", row[3], row[2]);
/*
* - The first time we see a file (node->inserted==true), we accept it.
* - In the same JobId, we accept only the first copy of a
#include "bacula.h"
#include "filed.h"
-static int dbglvl=200;
+static int dbglvl=100;
typedef struct PrivateCurFile {
hlink link;
char *fname;
- utime_t ctime;
- utime_t mtime;
+ char *lstat;
+ char *chksum;
bool seen;
} CurFile;
return true;
}
+static bool accurate_send_base_file_list(JCR *jcr)
+{
+ CurFile *elt;
+ struct stat statc;
+ int32_t LinkFIc;
+ FF_PKT *ff_pkt;
+ int stream = STREAM_UNIX_ATTRIBUTES;
+
+ if (!jcr->accurate || jcr->get_JobLevel() != L_FULL) {
+ return true;
+ }
+
+ if (jcr->file_list == NULL) {
+ return true;
+ }
+
+ ff_pkt = init_find_files();
+ ff_pkt->type = FT_BASE;
+
+ foreach_htable(elt, jcr->file_list) {
+ if (elt->seen) {
+ Dmsg2(dbglvl, "base file fname=%s seen=%i\n", elt->fname, elt->seen);
+ /* TODO: skip the decode and use directly the lstat field */
+ decode_stat(elt->lstat, &statc, &LinkFIc); /* decode catalog stat */
+ ff_pkt->fname = elt->fname;
+ ff_pkt->statp = statc;
+ encode_and_send_attributes(jcr, ff_pkt, stream);
+// free(elt->fname);
+ }
+ }
+
+ term_find_files(ff_pkt);
+ return true;
+}
+
+
/* This function is called at the end of backup
* We walk over all hash disk element, and we check
* for elt.seen.
*/
-bool accurate_send_deleted_list(JCR *jcr)
+static bool accurate_send_deleted_list(JCR *jcr)
{
CurFile *elt;
+ struct stat statc;
+ int32_t LinkFIc;
FF_PKT *ff_pkt;
int stream = STREAM_UNIX_ATTRIBUTES;
- if (!jcr->accurate || jcr->get_JobLevel() == L_FULL) {
- goto bail_out;
+ if (!jcr->accurate) {
+ return true;
}
if (jcr->file_list == NULL) {
- goto bail_out;
+ return true;
}
ff_pkt = init_find_files();
continue;
}
Dmsg2(dbglvl, "deleted fname=%s seen=%i\n", elt->fname, elt->seen);
+ /* TODO: skip the decode and use directly the lstat field */
+ decode_stat(elt->lstat, &statc, &LinkFIc); /* decode catalog stat */
ff_pkt->fname = elt->fname;
- ff_pkt->statp.st_mtime = elt->mtime;
- ff_pkt->statp.st_ctime = elt->ctime;
+ ff_pkt->statp.st_mtime = statc.st_mtime;
+ ff_pkt->statp.st_ctime = statc.st_ctime;
encode_and_send_attributes(jcr, ff_pkt, stream);
// free(elt->fname);
}
term_find_files(ff_pkt);
-bail_out:
- /* TODO: clean htable when this function is not reached ? */
- accurate_free(jcr);
return true;
}
}
}
-static bool accurate_add_file(JCR *jcr, char *fname, char *lstat)
+/* Send the deleted or the base file list and cleanup */
+bool accurate_finish(JCR *jcr)
+{
+ bool ret=true;
+ if (jcr->accurate) {
+ if (jcr->get_JobLevel() == L_FULL) {
+ ret = accurate_send_base_file_list(jcr);
+ } else {
+ ret = accurate_send_deleted_list(jcr);
+ }
+
+ accurate_free(jcr);
+ if (jcr->get_JobLevel() == L_FULL) {
+ Dmsg1(0, "Space saved with Base jobs: %lld MB\n",
+ jcr->base_size/(1024*1024));
+ }
+ }
+ return ret;
+}
+
+static bool accurate_add_file(JCR *jcr, uint32_t len,
+ char *fname, char *lstat, char *chksum)
{
bool ret = true;
CurFile elt;
- struct stat statp;
- int32_t LinkFIc;
- decode_stat(lstat, &statp, &LinkFIc); /* decode catalog stat */
- elt.ctime = statp.st_ctime;
- elt.mtime = statp.st_mtime;
elt.seen = 0;
CurFile *item;
/* we store CurFile, fname and ctime/mtime in the same chunk */
- item = (CurFile *)jcr->file_list->hash_malloc(sizeof(CurFile)+strlen(fname)+1);
+ item = (CurFile *)jcr->file_list->hash_malloc(sizeof(CurFile)+len+3);
memcpy(item, &elt, sizeof(CurFile));
+
item->fname = (char *)item+sizeof(CurFile);
strcpy(item->fname, fname);
+
+ item->lstat = item->fname+strlen(item->fname)+1;
+ strcpy(item->lstat, lstat);
+
+ item->chksum = item->lstat+strlen(item->lstat)+1;
+ strcpy(item->chksum, chksum);
+
jcr->file_list->insert(item->fname, item);
- Dmsg2(dbglvl, "add fname=<%s> lstat=%s\n", fname, lstat);
+ Dmsg3(dbglvl, "add fname=<%s> lstat=%s chksum=%s\n", fname, lstat, chksum);
return ret;
}
*/
bool accurate_check_file(JCR *jcr, FF_PKT *ff_pkt)
{
+ int digest_stream = STREAM_NONE;
+ DIGEST *digest = NULL;
+
+ struct stat statc;
+ int32_t LinkFIc;
bool stat = false;
+ char *opts;
char *fname;
CurFile elt;
- if (!jcr->accurate || jcr->get_JobLevel() == L_FULL) {
+ if (!jcr->accurate) {
return true;
}
goto bail_out;
}
+ decode_stat(elt.lstat, &statc, &LinkFIc); /* decode catalog stat */
+
+ if (jcr->get_JobLevel() == L_FULL) {
+ opts = ff_pkt->BaseJobOpts;
+ } else {
+ opts = ff_pkt->AccurateOpts;
+ }
+
/*
- * We check only mtime/ctime like with the normal
- * incremental/differential mode
+ * Loop over options supplied by user and verify the
+ * fields he requests.
*/
- if (elt.mtime != ff_pkt->statp.st_mtime) {
-// Jmsg(jcr, M_SAVED, 0, _("%s st_mtime differs\n"), fname);
- Dmsg3(dbglvl, "%s st_mtime differs (%lld!=%lld)\n",
- fname, elt.mtime, (utime_t)ff_pkt->statp.st_mtime);
- stat = true;
- } else if (!(ff_pkt->flags & FO_MTIMEONLY)
- && (elt.ctime != ff_pkt->statp.st_ctime)) {
-// Jmsg(jcr, M_SAVED, 0, _("%s st_ctime differs\n"), fname);
- Dmsg3(dbglvl, "%s st_ctime differs\n",
- fname, elt.ctime, ff_pkt->statp.st_ctime);
- stat = true;
+ for (char *p=opts; !stat && *p; p++) {
+ char ed1[30], ed2[30];
+ switch (*p) {
+ case 'i': /* compare INODEs */
+ if (statc.st_ino != ff_pkt->statp.st_ino) {
+ Dmsg3(dbglvl-1, "%s st_ino differ. Cat: %s File: %s\n",
+ fname,
+ edit_uint64((uint64_t)statc.st_ino, ed1),
+ edit_uint64((uint64_t)ff_pkt->statp.st_ino, ed2));
+ stat = true;
+ }
+ break;
+ case 'p': /* permissions bits */
+ /* TODO: If something change only in perm, user, group
+ * Backup only the attribute stream
+ */
+ if (statc.st_mode != ff_pkt->statp.st_mode) {
+ Dmsg3(dbglvl-1, "%s st_mode differ. Cat: %x File: %x\n",
+ fname,
+ (uint32_t)statc.st_mode, (uint32_t)ff_pkt->statp.st_mode);
+ stat = true;
+ }
+ break;
+ case 'n': /* number of links */
+ if (statc.st_nlink != ff_pkt->statp.st_nlink) {
+ Dmsg3(dbglvl-1, "%s st_nlink differ. Cat: %d File: %d\n",
+ fname,
+ (uint32_t)statc.st_nlink, (uint32_t)ff_pkt->statp.st_nlink);
+ stat = true;
+ }
+ break;
+ case 'u': /* user id */
+ if (statc.st_uid != ff_pkt->statp.st_uid) {
+ Dmsg3(dbglvl-1, "%s st_uid differ. Cat: %u File: %u\n",
+ fname,
+ (uint32_t)statc.st_uid, (uint32_t)ff_pkt->statp.st_uid);
+ stat = true;
+ }
+ break;
+ case 'g': /* group id */
+ if (statc.st_gid != ff_pkt->statp.st_gid) {
+ Dmsg3(dbglvl-1, "%s st_gid differ. Cat: %u File: %u\n",
+ fname,
+ (uint32_t)statc.st_gid, (uint32_t)ff_pkt->statp.st_gid);
+ stat = true;
+ }
+ break;
+ case 's': /* size */
+ if (statc.st_size != ff_pkt->statp.st_size) {
+ Dmsg3(dbglvl-1, "%s st_size differ. Cat: %s File: %s\n",
+ fname,
+ edit_uint64((uint64_t)statc.st_size, ed1),
+ edit_uint64((uint64_t)ff_pkt->statp.st_size, ed2));
+ stat = true;
+ }
+ break;
+ case 'a': /* access time */
+ if (statc.st_atime != ff_pkt->statp.st_atime) {
+ Dmsg1(dbglvl-1, "%s st_atime differs\n", fname);
+ stat = true;
+ }
+ break;
+ case 'm': /* modification time */
+ if (statc.st_mtime != ff_pkt->statp.st_mtime) {
+ Dmsg1(dbglvl-1, "%s st_mtime differs\n", fname);
+ stat = true;
+ }
+ break;
+ case 'c': /* ctime */
+ if (statc.st_ctime != ff_pkt->statp.st_ctime) {
+ Dmsg1(dbglvl-1, "%s st_ctime differs\n", fname);
+ stat = true;
+ }
+ break;
+ case 'd': /* file size decrease */
+ if (statc.st_size > ff_pkt->statp.st_size) {
+ Dmsg3(dbglvl-1, "%s st_size decrease. Cat: %s File: %s\n",
+ fname,
+ edit_uint64((uint64_t)statc.st_size, ed1),
+ edit_uint64((uint64_t)ff_pkt->statp.st_size, ed2));
+ stat = true;
+ }
+ break;
+
+ /* TODO: cleanup and factorise this function with verify.c */
+ case '5': /* compare MD5 */
+ case '1': /* compare SHA1 */
+ /*
+ * The remainder of the function is all about getting the checksum.
+ * First we initialise, then we read files, other streams and Finder Info.
+ */
+ if (!stat && ff_pkt->type != FT_LNKSAVED &&
+ (S_ISREG(ff_pkt->statp.st_mode) &&
+ ff_pkt->flags & (FO_MD5|FO_SHA1|FO_SHA256|FO_SHA512)))
+ {
+
+ if (!*elt.chksum) {
+ Jmsg(jcr, M_WARNING, 0, _("Can't verify checksum for %s\n"),
+ ff_pkt->fname);
+ stat = true;
+ break;
+ }
+
+ /*
+ * Create our digest context. If this fails, the digest will be set
+ * to NULL and not used.
+ */
+ if (ff_pkt->flags & FO_MD5) {
+ digest = crypto_digest_new(jcr, CRYPTO_DIGEST_MD5);
+ digest_stream = STREAM_MD5_DIGEST;
+
+ } else if (ff_pkt->flags & FO_SHA1) {
+ digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA1);
+ digest_stream = STREAM_SHA1_DIGEST;
+
+ } else if (ff_pkt->flags & FO_SHA256) {
+ digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA256);
+ digest_stream = STREAM_SHA256_DIGEST;
+
+ } else if (ff_pkt->flags & FO_SHA512) {
+ digest = crypto_digest_new(jcr, CRYPTO_DIGEST_SHA512);
+ digest_stream = STREAM_SHA512_DIGEST;
+ }
+
+ /* Did digest initialization fail? */
+ if (digest_stream != STREAM_NONE && digest == NULL) {
+ Jmsg(jcr, M_WARNING, 0, _("%s digest initialization failed\n"),
+ stream_to_ascii(digest_stream));
+ }
+
+ /* compute MD5 or SHA1 hash */
+ if (digest) {
+ char md[CRYPTO_DIGEST_MAX_SIZE];
+ uint32_t size;
+
+ size = sizeof(md);
+
+ if (digest_file(jcr, ff_pkt, digest) != 0) {
+ jcr->JobErrors++;
+
+ } else if (crypto_digest_finalize(digest, (uint8_t *)md, &size)) {
+ char *digest_buf;
+ const char *digest_name;
+
+ digest_buf = (char *)malloc(BASE64_SIZE(size));
+ digest_name = crypto_digest_name(digest);
+
+ bin_to_base64(digest_buf, BASE64_SIZE(size), md, size, true);
+
+ if (strcmp(digest_buf, elt.chksum)) {
+ Dmsg3(dbglvl-1, "%s chksum diff. Cat: %s File: %s\n",
+ fname,
+ elt.chksum,
+ digest_buf);
+ stat = true;
+ }
+
+ free(digest_buf);
+ }
+ crypto_digest_free(digest);
+ }
+ }
+
+ break;
+ case ':':
+ case 'J':
+ case 'C':
+ default:
+ break;
+ }
}
- accurate_mark_file_as_seen(jcr, &elt);
-// Dmsg2(dbglvl, "accurate %s = %d\n", fname, stat);
+ /* In Incr/Diff accurate mode, we mark all files as seen
+ * When in Full+Base mode, we mark only if the file match exactly
+ */
+ if (jcr->get_JobLevel() == L_FULL) {
+ if (!stat) {
+ /* compute space saved with basefile */
+ jcr->base_size += ff_pkt->statp.st_size;
+ accurate_mark_file_as_seen(jcr, &elt);
+ }
+ } else {
+ accurate_mark_file_as_seen(jcr, &elt);
+ }
bail_out:
unstrip_path(ff_pkt);
int accurate_cmd(JCR *jcr)
{
BSOCK *dir = jcr->dir_bsock;
- int len;
+ int lstat_pos, chksum_pos;
int32_t nb;
- if (!jcr->accurate || job_canceled(jcr) || jcr->get_JobLevel()==L_FULL) {
+ if (job_canceled(jcr)) {
return true;
}
if (sscanf(dir->msg, "accurate files=%ld", &nb) != 1) {
return false;
}
+ jcr->accurate = true;
+
accurate_init(jcr, nb);
/*
* buffer = sizeof(CurFile) + dirmsg
- * dirmsg = fname + \0 + lstat
+ * dirmsg = fname + \0 + lstat + \0 + checksum + \0
*/
/* get current files */
while (dir->recv() >= 0) {
- len = strlen(dir->msg) + 1;
- if (len < dir->msglen) {
- accurate_add_file(jcr, dir->msg, dir->msg + len);
+ lstat_pos = strlen(dir->msg) + 1;
+ if (lstat_pos < dir->msglen) {
+ chksum_pos = lstat_pos + strlen(dir->msg + lstat_pos) + 1;
+
+ if (chksum_pos >= dir->msglen) {
+ chksum_pos = lstat_pos - 1; /* tweak: no checksum, point to the last \0 */
+ }
+
+ accurate_add_file(jcr, dir->msglen,
+ dir->msg, /* Path */
+ dir->msg + lstat_pos, /* LStat */
+ dir->msg + chksum_pos); /* CheckSum */
}
}
jcr->xattr_data->nr_errors);
}
- accurate_send_deleted_list(jcr); /* send deleted list to SD */
+ accurate_finish(jcr); /* send deleted or base file list to SD */
stop_heartbeat_monitor(jcr);
}
/*
- * Set up signature digest handling. If this fails, the signature digest will be set to
- * NULL and not used.
+ * Set up signature digest handling. If this fails, the signature digest
+ * will be set to NULL and not used.
+ */
+ /* TODO landonf: We should really only calculate the digest once, for
+ * both verification and signing.
*/
- // TODO landonf: We should really only calculate the digest once, for both verification and signing.
if (jcr->crypto.pki_sign) {
signing_digest = crypto_digest_new(jcr, signing_algorithm);
}
fo->AccurateOpts[j] = 0;
break;
+ case 'J': /* Basejob options */
+ /* Copy BaseJob Options */
+ for (j=0; *p && *p != ':'; p++) {
+ fo->BaseJobOpts[j] = *p;
+ if (j < (int)sizeof(fo->BaseJobOpts) - 1) {
+ j++;
+ }
+ }
+ fo->BaseJobOpts[j] = 0;
+ break;
case 'P': /* strip path */
/* Get integer */
p++; /* skip P */
level = get_memory(dir->msglen+1);
Dmsg1(100, "level_cmd: %s", dir->msg);
+
+ /* keep compatibility with older directors */
if (strstr(dir->msg, "accurate")) {
jcr->accurate = true;
}
bacl_exit_code parse_acl_streams(JCR *jcr, int stream);
/* from accurate.c */
-bool accurate_send_deleted_list(JCR *jcr);
+bool accurate_finish(JCR *jcr);
bool accurate_check_file(JCR *jcr, FF_PKT *ff_pkt);
bool accurate_mark_file_as_seen(JCR *jcr, char *fname);
void accurate_free(JCR *jcr);
}
crypto_digest_update(digest, (uint8_t *)buf, n);
- jcr->JobBytes += n;
- jcr->ReadBytes += n;
+
+ /* Can be used by BaseJobs, update only for Verify jobs */
+ if (jcr->get_JobLevel() != L_FULL) {
+ jcr->JobBytes += n;
+ jcr->ReadBytes += n;
+ }
}
if (n < 0) {
berrno be;
ff->flags = 0;
ff->VerifyOpts[0] = 'V';
ff->VerifyOpts[1] = 0;
+ strcpy(ff->AccurateOpts, "C:mcs"); /* mtime+ctime+size by default */
+ strcpy(ff->BaseJobOpts, "J:mspug5"); /* mtime+size+perm+user+group+chk */
for (i=0; i<fileset->include_list.size(); i++) {
findINCEXE *incexe = (findINCEXE *)fileset->include_list.get(i);
fileset->incexe = incexe;
ff->fstypes = fo->fstype;
ff->drivetypes = fo->drivetype;
bstrncat(ff->VerifyOpts, fo->VerifyOpts, sizeof(ff->VerifyOpts));
+ bstrncat(ff->AccurateOpts, fo->AccurateOpts, sizeof(ff->AccurateOpts));
+ bstrncat(ff->BaseJobOpts, fo->BaseJobOpts, sizeof(ff->BaseJobOpts));
}
dlistString *node;
foreach_dlist(node, &incexe->name_list) {
int strip_path; /* strip path count */
char VerifyOpts[MAX_FOPTS]; /* verify options */
char AccurateOpts[MAX_FOPTS]; /* accurate mode options */
+ char BaseJobOpts[MAX_FOPTS]; /* basejob mode options */
alist regex; /* regex string(s) */
alist regexdir; /* regex string(s) for directories */
alist regexfile; /* regex string(s) for files */
bool null_output_device; /* using null output device */
bool incremental; /* incremental save */
char VerifyOpts[20];
+ char AccurateOpts[20];
+ char BaseJobOpts[20];
struct s_included_file *included_files_list;
struct s_excluded_file *excluded_files_list;
struct s_excluded_file *excluded_paths_list;
* since our last "save_time", presumably the last Full save
* or Incremental.
*/
- if ( ff_pkt->incremental
- && !S_ISDIR(ff_pkt->statp.st_mode)
+ if ( !S_ISDIR(ff_pkt->statp.st_mode)
&& !check_changes(jcr, ff_pkt))
{
Dmsg1(500, "Non-directory incremental: %s\n", ff_pkt->fname);
link[len] = 0;
ff_pkt->link = link;
- if (ff_pkt->incremental && !check_changes(jcr, ff_pkt)) {
- /* Incremental option, directory entry not changed */
+ if (!check_changes(jcr, ff_pkt)) {
+ /* Incremental/Full+Base option, directory entry not changed */
ff_pkt->type = FT_DIRNOCHG;
} else {
ff_pkt->type = FT_DIRBEGIN;
B_DB *db; /* database pointer */
B_DB *db_batch; /* database pointer for batch and accurate */
bool batch_started; /* is batch mode already started ? */
+ bool HasBase; /* True if job use base jobs */
+ uint64_t nb_base_files; /* Number of base files */
+ uint64_t nb_base_files_used; /* Number of useful files in base */
+
ATTR_DBR *ar; /* DB attribute record */
guid_list *id_list; /* User/group id to name list */
bool accurate; /* true if job is accurate */
bool stats_enabled; /* Keep all job records in a table for long term statistics */
bool no_maxtime; /* Don't check Max*Time for this JCR */
bool keep_sd_auth_key; /* Clear or not the SD auth key after connection*/
+ bool use_accurate_chksum; /* Use or not checksum option in accurate code */
#endif /* DIRECTOR_DAEMON */
bool VSS; /* VSS used by FD */
bool multi_restore; /* Dir can do multiple storage restore */
htable *file_list; /* Previous file list (accurate mode) */
+ uint64_t base_size; /* compute space saved with base job */
#endif /* FILE_DAEMON */
Name = FS_TESTJOB
Include {
Options {
+ Signature = MD5
Verify = mc
+ Accurate = mcs5
}
File=<@tmpdir@/file-list
}
Name = FS_TESTJOB2
Include {
Options {
+ Signature = MD5
Verify = mc
strippath=1
}
Name = FS_TESTJOB_ADVANCE
Include {
Options {
+ Signature = MD5
Verify = mcpgu
}
File=<@tmpdir@/file-list
--- /dev/null
+#!/bin/sh
+#
+# Run a basejob backup of the Bacula build directory
+# then restore it.
+#
+
+TestName="base-job-test"
+JobName=backup
+. scripts/functions
+$rscripts/cleanup
+
+copy_test_confs
+echo 's/backup_advance/base_backup/' > $tmp/s
+echo 's/Name = backup/Name = backup; Base = base_backup, backup/' >> $tmp/s
+sed -f $tmp/s $rscripts/bacula-dir.conf.accurate > $conf/bacula-dir.conf
+rm -f $tmp/s
+
+sed s/all,/all,saved,/ $conf/bacula-fd.conf > tmp/1
+cp tmp/1 $conf/bacula-fd.conf
+
+change_jobname BackupClient1 $JobName
+
+p() {
+ echo "##############################################" >> ${cwd}/tmp/log1.out
+ echo "$*" >> ${cwd}/tmp/log1.out
+ echo "##############################################" >> ${cwd}/tmp/log2.out
+ echo "$*" >> ${cwd}/tmp/log2.out
+}
+
+# cleanup
+rm -rf ${cwd}/build/accurate.new
+rm -rf ${cwd}/build/accurate
+
+
+# add extra files
+mkdir ${cwd}/build/accurate
+mkdir ${cwd}/build/accurate/dirtest
+echo "test test" > ${cwd}/build/accurate/dirtest/hello
+echo "test test" > ${cwd}/build/accurate/xxx
+echo "test test" > ${cwd}/build/accurate/yyy
+echo "test test" > ${cwd}/build/accurate/zzz
+echo "test test" > ${cwd}/build/accurate/zzzzzz
+echo "test test" > ${cwd}/build/accurate/xxxxxx
+echo "test test" > ${cwd}/build/accurate/yyyyyy
+echo "test test" > ${cwd}/build/accurate/xxxxxxxxx
+echo "test test" > ${cwd}/build/accurate/yyyyyyyyy
+echo "test test" > ${cwd}/build/accurate/zzzzzzzzz
+echo ${cwd}/build > ${cwd}/tmp/file-list
+
+start_test
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@output /dev/null
+messages
+label volume=TestVolume001 storage=File pool=Default
+messages
+END_OF_DATA
+
+run_bacula
+
+################################################################
+p Now do a backup using base backup
+################################################################
+
+echo ${cwd}/bin >> ${cwd}/tmp/file-list
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log1.out
+run job=base_backup level=base yes
+wait
+messages
+update volume=TestVolume001 volstatus=Used
+END_OF_DATA
+
+run_bconsole
+
+echo ${cwd}/build > ${cwd}/tmp/file-list
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log4.out
+label volume=TestVolume002 storage=File pool=Default
+run job=backup level=full yes
+wait
+messages
+@#
+@# now do a restore
+@#
+@$out ${cwd}/tmp/log2.out
+restore fileset=FS_TESTJOB where=${cwd}/tmp/bacula-restores select all done
+yes
+wait
+messages
+END_OF_DATA
+
+
+run_bconsole
+check_for_zombie_jobs storage=File
+
+check_two_logs
+check_restore_diff
+
+rm -rf ${cwd}/tmp/bacula-restores
+
+grep -e 'FD Bytes Written: *0' ${cwd}/tmp/log4.out > /dev/null
+if [ $? -ne 0 ]; then
+ print_debug "The first full job should have 0 byte in log4.out"
+ bstat=2
+fi
+
+################################################################
+p Now do a backup after making few changes
+################################################################
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log1.out
+update volume=TestVolume002 volstatus=Used
+label volume=TestVolume003 storage=File pool=Default
+run job=backup level=incremental yes
+wait
+messages
+@#
+@# now do a restore
+@#
+@$out ${cwd}/tmp/log2.out
+restore fileset=FS_TESTJOB where=${cwd}/tmp/bacula-restores select all done
+yes
+wait
+messages
+END_OF_DATA
+
+rm ${cwd}/build/accurate/yyyyyy # delete a file
+rm -rf ${cwd}/build/accurate/dirtest
+
+
+run_bconsole
+check_for_zombie_jobs storage=File
+
+check_two_logs
+check_restore_diff
+check_files_written ${cwd}/tmp/log1.out 4
+
+rm -rf ${cwd}/tmp/bacula-restores
+
+################################################################
+p Test the job purge
+################################################################
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+@$out ${cwd}/tmp/log3.out
+sql
+SELECT count(*) FROM BaseFiles;
+
+purge volume=TestVolume002
+messages
+sql
+SELECT count(*) FROM BaseFiles;
+
+END_OF_DATA
+
+run_bconsole
+
+grep -e ' 0 *|' ${cwd}/tmp/log3.out > /dev/null
+if [ $? -ne 0 ]; then
+ print_debug "Can't purge the base job"
+ dstat=2
+fi
+
+
+################################################################
+p Test list commands
+################################################################
+
+touch ${cwd}/build/po/fr.po
+
+cat <<END_OF_DATA >${cwd}/tmp/bconcmds
+run level=full job=backup yes
+wait
+messages
+@$out ${cwd}/tmp/log5.out
+list basefiles jobid=6
+@$out ${cwd}/tmp/log6.out
+list files jobid=6
+messages
+END_OF_DATA
+
+run_bconsole
+
+grep po/fr.po ${cwd}/tmp/log5.out > /dev/null
+if [ $? -eq 0 ]; then
+ print_debug "Should not display fr.po as basefile"
+ bstat=2
+fi
+
+export bstat dstat
+
+stop_bacula
+end_test