X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fcats%2Fsql_create.c;h=5290b99e55dc26179c07d72fccce4fe0abb5e6de;hb=4e1e69c429661e10090b37c3c980f1cc6548ee86;hp=c51261a0acc2221e3d3747cec29804e5298d120d;hpb=22d4c45f6ecf91fcf6121cd20cfb725c4585739b;p=bacula%2Fbacula diff --git a/bacula/src/cats/sql_create.c b/bacula/src/cats/sql_create.c index c51261a0ac..5290b99e55 100644 --- a/bacula/src/cats/sql_create.c +++ b/bacula/src/cats/sql_create.c @@ -30,7 +30,7 @@ * * Kern Sibbald, March 2000 * - * Version $Id$ + * Version $Id: sql_create.c 8407 2009-01-28 10:47:21Z ricozz $ */ /* The following is necessary so that we do not include @@ -41,9 +41,9 @@ #include "bacula.h" #include "cats.h" -static const int dbglevel = 500; +static const int dbglevel = 100; -#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI +#if HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI /* ----------------------------------------------------------------------- * @@ -66,10 +66,12 @@ static int db_create_filename_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar); bool db_create_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr) { + POOL_MEM buf; char dt[MAX_TIME_LENGTH]; time_t stime; struct tm tm; bool ok; + int len; utime_t JobTDate; char ed1[30],ed2[30]; @@ -82,13 +84,18 @@ db_create_job_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr) strftime(dt, sizeof(dt), "%Y-%m-%d %H:%M:%S", &tm); JobTDate = (utime_t)stime; + len = strlen(jcr->comment); + buf.check_size(len*2+1); + db_escape_string(jcr, mdb, buf.c_str(), jcr->comment, len); + /* Must create it */ Mmsg(mdb->cmd, -"INSERT INTO Job (Job,Name,Type,Level,JobStatus,SchedTime,JobTDate,ClientId) " -"VALUES ('%s','%s','%c','%c','%c','%s',%s,%s)", +"INSERT INTO Job (Job,Name,Type,Level,JobStatus,SchedTime,JobTDate," + "ClientId,Comment) " +"VALUES ('%s','%s','%c','%c','%c','%s',%s,%s,'%s')", jr->Job, jr->Name, (char)(jr->JobType), (char)(jr->JobLevel), (char)(jr->JobStatus), dt, edit_uint64(JobTDate, ed1), - edit_int64(jr->ClientId, ed2)); + edit_int64(jr->ClientId, ed2), buf.c_str()); if (!INSERT_DB(jcr, mdb, mdb->cmd)) { Mmsg2(&mdb->errmsg, _("Create DB Job record %s failed. ERR=%s\n"), @@ -132,13 +139,12 @@ db_create_jobmedia_record(JCR *jcr, B_DB *mdb, JOBMEDIA_DBR *jm) */ Mmsg(mdb->cmd, "INSERT INTO JobMedia (JobId,MediaId,FirstIndex,LastIndex," - "StartFile,EndFile,StartBlock,EndBlock,VolIndex,Copy) " - "VALUES (%s,%s,%u,%u,%u,%u,%u,%u,%u,%u)", + "StartFile,EndFile,StartBlock,EndBlock,VolIndex) " + "VALUES (%s,%s,%u,%u,%u,%u,%u,%u,%u)", edit_int64(jm->JobId, ed1), edit_int64(jm->MediaId, ed2), jm->FirstIndex, jm->LastIndex, - jm->StartFile, jm->EndFile, jm->StartBlock, jm->EndBlock,count, - jm->Copy); + jm->StartFile, jm->EndFile, jm->StartBlock, jm->EndBlock,count); Dmsg0(300, mdb->cmd); if (!INSERT_DB(jcr, mdb, mdb->cmd)) { @@ -219,6 +225,7 @@ db_create_pool_record(JCR *jcr, B_DB *mdb, POOL_DBR *pr) stat = true; } db_unlock(mdb); + Dmsg0(500, "Create Pool: done\n"); return stat; } @@ -286,6 +293,7 @@ bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr) sr->StorageId = 0; sr->created = false; + /* Check if it already exists */ if (QUERY_DB(jcr, mdb, mdb->cmd)) { mdb->num_rows = sql_num_rows(mdb); /* If more than one, report error, but return first row */ @@ -909,39 +917,33 @@ bool db_write_batch_file_records(JCR *jcr) * is a single FileName record and a single Path record, no matter * how many times it occurs. This is this subroutine, we separate * the file and the path and fill temporary tables with this three records. + * + * Note: all routines that call this expect to be able to call + * db_strerror(mdb) to get the error message, so the error message + * MUST be edited into mdb->errmsg before returning an error status. */ bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) { + ASSERT(ar->FileType != FT_BASE); + Dmsg1(dbglevel, "Fname=%s\n", ar->fname); Dmsg0(dbglevel, "put_file_into_catalog\n"); /* Open the dedicated connexion */ if (!jcr->batch_started) { - if (!db_open_batch_connexion(jcr, mdb)) { - return false; + return false; /* error already printed */ } if (!sql_batch_start(jcr, jcr->db_batch)) { Mmsg1(&mdb->errmsg, "Can't start batch mode: ERR=%s", db_strerror(jcr->db_batch)); - Jmsg1(jcr, M_FATAL, 0, "%s", mdb->errmsg); + Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg); return false; } jcr->batch_started = true; } B_DB *bdb = jcr->db_batch; - /* - * Make sure we have an acceptable attributes record. - */ - if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || - ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { - Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), - ar->Stream); - Jmsg(jcr, M_FATAL, 0, "%s", mdb->errmsg); - return false; - } - split_path_and_file(jcr, bdb, ar->fname); @@ -972,17 +974,6 @@ bool db_create_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) db_lock(mdb); Dmsg1(dbglevel, "Fname=%s\n", ar->fname); Dmsg0(dbglevel, "put_file_into_catalog\n"); - /* - * Make sure we have an acceptable attributes record. - */ - if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || - ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { - Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), - ar->Stream); - Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg); - goto bail_out; - } - split_path_and_file(jcr, mdb, ar->fname); @@ -1106,4 +1097,176 @@ bool db_write_batch_file_records(JCR *jcr) #endif /* ! HAVE_BATCH_FILE_INSERT */ -#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_DBI */ + +/* List of SQL commands to create temp table and indicies */ +const char *create_temp_basefile[5] = { + /* MySQL */ + "CREATE TEMPORARY TABLE basefile%lld (" +// "CREATE TABLE basefile%lld (" + "Path BLOB NOT NULL," + "Name BLOB NOT NULL)", + + /* Postgresql */ + "CREATE TEMPORARY TABLE basefile%lld (" +// "CREATE TABLE basefile%lld (" + "Path TEXT," + "Name TEXT)", + + /* SQLite */ + "CREATE TEMPORARY TABLE basefile%lld (" + "Path TEXT," + "Name TEXT)", + + /* SQLite3 */ + "CREATE TEMPORARY TABLE basefile%lld (" + "Path TEXT," + "Name TEXT)", + + /* Ingres */ + "DECLARE GLOBAL TEMPORARY TABLE basefile%lld (" + "Path TEXT NOT NULL," + "Name TEXT NOT NULL)" +}; + +/* + * Create file attributes record, or base file attributes record + */ +bool db_create_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) +{ + bool ret; + + /* + * Make sure we have an acceptable attributes record. + */ + if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || + ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { + Jmsg(jcr, M_FATAL, 0, _("Attempt to put non-attributes into catalog. Stream=%d\n")); + return false; + } + + if (ar->FileType != FT_BASE) { + ret = db_create_file_attributes_record(jcr, mdb, ar); + + } else if (jcr->HasBase) { + ret = db_create_base_file_attributes_record(jcr, mdb, ar); + + } else { + Jmsg0(jcr, M_FATAL, 0, _("Can't Copy/Migrate job using BaseJob")); + ret = true; /* in copy/migration what do we do ? */ + } + + return ret; +} + +/* + * Create Base File record in B_DB + * + */ +bool db_create_base_file_attributes_record(JCR *jcr, B_DB *mdb, ATTR_DBR *ar) +{ + bool ret; + Dmsg1(dbglevel, "create_base_file Fname=%s\n", ar->fname); + Dmsg0(dbglevel, "put_base_file_into_catalog\n"); + + db_lock(mdb); + split_path_and_file(jcr, mdb, ar->fname); + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1); + db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl); + + Mmsg(mdb->cmd, "INSERT INTO basefile%lld (Path, Name) VALUES ('%s','%s')", + (uint64_t)jcr->JobId, mdb->esc_path, mdb->esc_name); + + ret = INSERT_DB(jcr, mdb, mdb->cmd); + db_unlock(mdb); + + return ret; +} + +/* + * Cleanup the base file temporary tables + */ +static void db_cleanup_base_file(JCR *jcr, B_DB *mdb) +{ + POOL_MEM buf(PM_MESSAGE); + Mmsg(buf, "DROP TABLE new_basefile%lld", (uint64_t) jcr->JobId); + db_sql_query(mdb, buf.c_str(), NULL, NULL); + + Mmsg(buf, "DROP TABLE basefile%lld", (uint64_t) jcr->JobId); + db_sql_query(mdb, buf.c_str(), NULL, NULL); +} + +/* + * Put all base file seen in the backup to the BaseFile table + * and cleanup temporary tables + */ +bool db_commit_base_file_attributes_record(JCR *jcr, B_DB *mdb) +{ + bool ret; + char ed1[50]; + + db_lock(mdb); + + Mmsg(mdb->cmd, + "INSERT INTO BaseFiles (BaseJobId, JobId, FileId, FileIndex) " + "SELECT B.JobId AS BaseJobId, %s AS JobId, " + "B.FileId, B.FileIndex " + "FROM basefile%s AS A, new_basefile%s AS B " + "WHERE A.Path = B.Path " + "AND A.Name = B.Name " + "ORDER BY B.FileId", + edit_uint64(jcr->JobId, ed1), ed1, ed1); + ret = db_sql_query(mdb, mdb->cmd, NULL, NULL); + jcr->nb_base_files_used = sql_affected_rows(mdb); + db_cleanup_base_file(jcr, mdb); + + db_unlock(mdb); + return ret; +} + +/* + * Find the last "accurate" backup state with Base jobs + * 1) Get all files with jobid in list (F subquery) + * 2) Take only the last version of each file (Temp subquery) => accurate list is ok + * 3) Put the result in a temporary table for the end of job + * + */ +bool db_create_base_file_list(JCR *jcr, B_DB *mdb, char *jobids) +{ + POOL_MEM buf; + bool ret=false; + + db_lock(mdb); + + if (!*jobids) { + Mmsg(mdb->errmsg, _("ERR=JobIds are empty\n")); + goto bail_out; + } + + Mmsg(mdb->cmd, create_temp_basefile[db_type], (uint64_t) jcr->JobId); + if (!db_sql_query(mdb, mdb->cmd, NULL, NULL)) { + goto bail_out; + } + Mmsg(buf, select_recent_version[db_type], jobids, jobids); + Mmsg(mdb->cmd, +"CREATE TEMPORARY TABLE new_basefile%lld AS " +//"CREATE TABLE new_basefile%lld AS " + "SELECT Path.Path AS Path, Filename.Name AS Name, Temp.FileIndex AS FileIndex," + "Temp.JobId AS JobId, Temp.LStat AS LStat, Temp.FileId AS FileId, " + "Temp.MD5 AS MD5 " + "FROM ( %s ) AS Temp " + "JOIN Filename ON (Filename.FilenameId = Temp.FilenameId) " + "JOIN Path ON (Path.PathId = Temp.PathId) " + "WHERE Temp.FileIndex > 0", + (uint64_t)jcr->JobId, buf.c_str()); + + ret = db_sql_query(mdb, mdb->cmd, NULL, NULL); +bail_out: + db_unlock(mdb); + return ret; +} + +#endif /* HAVE_SQLITE3 || HAVE_MYSQL || HAVE_SQLITE || HAVE_POSTGRESQL || HAVE_INGRES || HAVE_DBI */