From 46205bab364750557e69071ba7d60b754069a334 Mon Sep 17 00:00:00 2001 From: Kern Sibbald Date: Sat, 10 Feb 2007 14:25:21 +0000 Subject: [PATCH] kes Apply Eric's scratch patch that moves a purged Volume to the RecyclePool. Question: how is RecyclePool set? what happens to the ScratchPool? kes Apply Eric's media patch that collects read/write media times as well as byte counts. This patch requires a simultaneous upgrade of the DIR and SD. Note, there should be some way to turn of timing. I'm not sure times are in Bacula units. kes Apply Eric's batch-insert patch. git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@4157 91ce42f0-d328-0410-95d8-f526ca767f89 --- bacula/kernstodo | 2 + bacula/patches/testing/batch-insert.patch | 741 ------------------ bacula/patches/testing/cancel-bug.patch | 11 - bacula/patches/testing/cancel-bug.readme | 11 - bacula/patches/testing/media-stats.patch | 352 --------- bacula/patches/testing/media-stats.readme | 17 - .../project-include-jobid-in-spool-name.patch | 14 - ...project-include-jobid-in-spool-name.readme | 11 - bacula/patches/testing/scratch.patch | 47 -- bacula/patches/testing/scratch.readme | 12 - bacula/projects | 1 + bacula/src/baconfig.h | 4 +- bacula/src/cats/cats.h | 81 +- bacula/src/cats/mysql.c | 32 + bacula/src/cats/postgresql.c | 199 +++++ bacula/src/cats/protos.h | 4 + bacula/src/cats/sql_create.c | 201 +++++ bacula/src/cats/sql_find.c | 6 +- bacula/src/cats/sql_get.c | 6 +- bacula/src/cats/sqlite.c | 14 + bacula/src/dird/backup.c | 3 + bacula/src/dird/catreq.c | 2 +- bacula/src/dird/dird.c | 2 + bacula/src/dird/getmsg.c | 13 +- bacula/src/dird/job.c | 4 + bacula/src/dird/protos.h | 3 + bacula/src/dird/ua_cmds.c | 22 +- bacula/src/dird/ua_purge.c | 17 +- bacula/src/dird/ua_update.c | 2 +- bacula/src/dird/verify.c | 2 +- bacula/src/filed/verify.c | 22 +- bacula/src/jcr.h | 1 + bacula/src/lib/dlist.c | 22 +- bacula/src/lib/dlist.h | 6 +- bacula/src/qt-console/main.ui | 6 + bacula/src/qt-console/run/run.cpp | 51 +- bacula/src/qt-console/run/run.h | 1 + bacula/src/qt-console/run/run.ui | 347 ++++---- bacula/src/stored/acquire.c | 4 +- bacula/src/stored/ansi_label.c | 8 +- bacula/src/stored/askdir.c | 5 - bacula/src/stored/block.c | 14 +- bacula/src/stored/dev.c | 66 +- bacula/src/stored/dev.h | 12 + bacula/src/version.h | 6 +- bacula/technotes-2.1 | 10 + 46 files changed, 940 insertions(+), 1477 deletions(-) delete mode 100644 bacula/patches/testing/batch-insert.patch delete mode 100644 bacula/patches/testing/cancel-bug.patch delete mode 100644 bacula/patches/testing/cancel-bug.readme delete mode 100644 bacula/patches/testing/media-stats.patch delete mode 100644 bacula/patches/testing/media-stats.readme delete mode 100644 bacula/patches/testing/project-include-jobid-in-spool-name.patch delete mode 100644 bacula/patches/testing/project-include-jobid-in-spool-name.readme delete mode 100644 bacula/patches/testing/scratch.patch delete mode 100644 bacula/patches/testing/scratch.readme diff --git a/bacula/kernstodo b/bacula/kernstodo index ef54bfd5db..1195f92ff3 100644 --- a/bacula/kernstodo +++ b/bacula/kernstodo @@ -41,6 +41,8 @@ Document: Priority: +- Ensure that moving a purged Volume in ua_purge.c to the RecyclePool + does the right thing. - Why doesn't @"xxx abc" work in a conf file? - Figure out some way to "automatically" backup conf changes. - Look at using posix_fadvise(2) for backups -- see bug #751. diff --git a/bacula/patches/testing/batch-insert.patch b/bacula/patches/testing/batch-insert.patch deleted file mode 100644 index 706391518a..0000000000 --- a/bacula/patches/testing/batch-insert.patch +++ /dev/null @@ -1,741 +0,0 @@ -diff -Naur cvs/src/cats/cats.h my/src/cats/cats.h ---- cvs/src/cats/cats.h 2006-12-06 15:11:53.000000000 +0100 -+++ my/src/cats/cats.h 2007-01-10 19:21:42.000000000 +0100 -@@ -141,6 +141,7 @@ - POOLMEM *fname; /* Filename only */ - POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ -+ POOLMEM *esc_name2; /* Escaped file/path name */ - int fnl; /* file name length */ - int pnl; /* path name length */ - }; -@@ -170,8 +171,14 @@ - #define sql_fetch_field(x) my_sqlite_fetch_field(x) - #define sql_num_fields(x) ((x)->ncolumn) - #define SQL_ROW char** -- -- -+#define sql_batch_start(x) db_batch_start(x) -+#define sql_batch_end(x,y) db_batch_end(x,y) -+#define sql_batch_insert(x,y) db_batch_insert(x,y) -+#define sql_batch_lock_path_query my_sqlite_batch_lock_query -+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query -+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query -+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query -+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query - - /* In cats/sqlite.c */ - void my_sqlite_free_table(B_DB *mdb); -@@ -179,6 +186,10 @@ - int my_sqlite_query(B_DB *mdb, const char *cmd); - void my_sqlite_field_seek(B_DB *mdb, int field); - SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb); -+extern char* my_sqlite_batch_lock_query; -+extern char* my_sqlite_batch_unlock_query; -+extern char* my_sqlite_batch_fill_filename_query; -+extern char* my_sqlite_batch_fill_path_query; - - - #else -@@ -249,6 +260,7 @@ - POOLMEM *fname; /* Filename only */ - POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ -+ POOLMEM *esc_name2; /* Escaped file/path name */ - int fnl; /* file name length */ - int pnl; /* path name length */ - }; -@@ -289,8 +301,14 @@ - #define sql_fetch_field(x) my_sqlite_fetch_field(x) - #define sql_num_fields(x) ((x)->ncolumn) - #define SQL_ROW char** -- -- -+#define sql_batch_start(x) db_batch_start(x) -+#define sql_batch_end(x,y) db_batch_end(x,y) -+#define sql_batch_insert(x,y) db_batch_insert(x,y) -+#define sql_batch_lock_path_query my_sqlite_batch_lock_query -+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query -+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query -+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query -+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query - - /* In cats/sqlite.c */ - void my_sqlite_free_table(B_DB *mdb); -@@ -298,6 +316,10 @@ - int my_sqlite_query(B_DB *mdb, const char *cmd); - void my_sqlite_field_seek(B_DB *mdb, int field); - SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb); -+extern char* my_sqlite_batch_lock_query; -+extern char* my_sqlite_batch_unlock_query; -+extern char* my_sqlite_batch_fill_filename_query; -+extern char* my_sqlite_batch_fill_path_query; - - - #else -@@ -341,6 +363,7 @@ - POOLMEM *fname; /* Filename only */ - POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ -+ POOLMEM *esc_name2; /* Escaped file/path name */ - int fnl; /* file name length */ - int pnl; /* path name length */ - }; -@@ -362,9 +385,25 @@ - #define sql_field_seek(x, y) mysql_field_seek((x)->result, (y)) - #define sql_fetch_field(x) mysql_fetch_field((x)->result) - #define sql_num_fields(x) (int)mysql_num_fields((x)->result) -+#define sql_batch_start(x) db_batch_start(x) -+#define sql_batch_end(x,y) db_batch_end(x,y) -+#define sql_batch_insert(x,y) db_batch_insert(x,y) -+#define sql_batch_lock_path_query my_mysql_batch_lock_path_query -+#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query -+#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query -+#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query -+#define sql_batch_fill_path_query my_mysql_batch_fill_path_query - #define SQL_ROW MYSQL_ROW - #define SQL_FIELD MYSQL_FIELD - -+ -+int my_mysql_batch_start(B_DB *mdb); -+extern char* my_mysql_batch_lock_path_query; -+extern char* my_mysql_batch_lock_filename_query; -+extern char* my_mysql_batch_unlock_tables_query; -+extern char* my_mysql_batch_fill_filename_query; -+extern char* my_mysql_batch_fill_path_query; -+ - #else - - #ifdef HAVE_POSTGRESQL -@@ -425,6 +464,7 @@ - POOLMEM *fname; /* Filename only */ - POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ -+ POOLMEM *esc_name2; /* Escaped file/path name */ - int fnl; /* file name length */ - int pnl; /* path name length */ - }; -@@ -436,7 +476,19 @@ - int my_postgresql_currval (B_DB *mdb, char *table_name); - void my_postgresql_field_seek (B_DB *mdb, int row); - POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb); -- -+int my_postgresql_lock_table(B_DB *mdb, const char *table); -+int my_postgresql_unlock_table(B_DB *mdb); -+int my_postgresql_batch_start(B_DB *mdb); -+int my_postgresql_batch_end(B_DB *mdb, const char *error); -+typedef struct ATTR_DBR ATTR_DBR; -+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar); -+char *my_postgresql_copy_escape(char *dest, char *src, size_t len); -+ -+extern char* my_pg_batch_lock_path_query; -+extern char* my_pg_batch_lock_filename_query; -+extern char* my_pg_batch_unlock_tables_query; -+extern char* my_pg_batch_fill_filename_query; -+extern char* my_pg_batch_fill_path_query; - - /* "Generic" names for easier conversion */ - #define sql_store_result(x) ((x)->result) -@@ -452,6 +504,17 @@ - #define sql_field_seek(x, y) my_postgresql_field_seek((x), (y)) - #define sql_fetch_field(x) my_postgresql_fetch_field(x) - #define sql_num_fields(x) ((x)->num_fields) -+#define sql_batch_start(x) my_postgresql_batch_start(x) -+#define sql_batch_end(x,y) my_postgresql_batch_end(x,y) -+#define sql_batch_insert(x,y) my_postgresql_batch_insert(x,y) -+#define sql_lock_table(x,y) my_postgresql_lock_table(x, y) -+#define sql_unlock_table(x,y) my_postgresql_unlock_table(x) -+#define sql_batch_lock_path_query my_pg_batch_lock_path_query -+#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query -+#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query -+#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query -+#define sql_batch_fill_path_query my_pg_batch_fill_path_query -+ - #define SQL_ROW POSTGRESQL_ROW - #define SQL_FIELD POSTGRESQL_FIELD - -diff -Naur cvs/src/cats/mysql.c my/src/cats/mysql.c ---- cvs/src/cats/mysql.c 2006-12-09 14:41:50.000000000 +0100 -+++ my/src/cats/mysql.c 2007-01-10 19:21:42.000000000 +0100 -@@ -121,6 +121,7 @@ - mdb->fname = get_pool_memory(PM_FNAME); - mdb->path = get_pool_memory(PM_FNAME); - mdb->esc_name = get_pool_memory(PM_FNAME); -+ mdb->esc_name2 = get_pool_memory(PM_FNAME); - qinsert(&db_list, &mdb->bq); /* put db in list */ - V(mutex); - return mdb; -@@ -231,6 +232,7 @@ - free_pool_memory(mdb->fname); - free_pool_memory(mdb->path); - free_pool_memory(mdb->esc_name); -+ free_pool_memory(mdb->esc_name2); - if (mdb->db_name) { - free(mdb->db_name); - } -@@ -372,4 +374,34 @@ - - } - -+char *my_mysql_batch_lock_path_query = "LOCK TABLES Path write, " -+ " batch write, " -+ " Path as p write "; -+ -+ -+char *my_mysql_batch_lock_filename_query = "LOCK TABLES Filename write, " -+ " batch write, " -+ " Filename as f write "; -+ -+char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES"; -+ -+char *my_mysql_batch_fill_path_query = "INSERT IGNORE INTO Path (Path) " -+ " SELECT a.Path FROM " -+ " (SELECT DISTINCT Path " -+ " FROM batch) AS a " -+ " WHERE NOT EXISTS " -+ " (SELECT Path " -+ " FROM Path AS p " -+ " WHERE p.Path = a.Path) "; -+ -+char *my_mysql_batch_fill_filename_query = "INSERT IGNORE INTO Filename (Name)" -+ " SELECT a.Name FROM " -+ " (SELECT DISTINCT Name " -+ " FROM batch) AS a " -+ " WHERE NOT EXISTS " -+ " (SELECT Name " -+ " FROM Filename AS f " -+ " WHERE f.Name = a.Name) "; -+ - #endif /* HAVE_MYSQL */ -+ -diff -Naur cvs/src/cats/postgresql.c my/src/cats/postgresql.c ---- cvs/src/cats/postgresql.c 2006-12-06 15:11:53.000000000 +0100 -+++ my/src/cats/postgresql.c 2007-01-10 19:25:47.000000000 +0100 -@@ -124,6 +124,7 @@ - mdb->fname = get_pool_memory(PM_FNAME); - mdb->path = get_pool_memory(PM_FNAME); - mdb->esc_name = get_pool_memory(PM_FNAME); -+ mdb->esc_name2 = get_pool_memory(PM_FNAME); - mdb->allow_transactions = mult_db_connections; - qinsert(&db_list, &mdb->bq); /* put db in list */ - V(mutex); -@@ -228,6 +229,7 @@ - free_pool_memory(mdb->fname); - free_pool_memory(mdb->path); - free_pool_memory(mdb->esc_name); -+ free_pool_memory(mdb->esc_name2); - if (mdb->db_name) { - free(mdb->db_name); - } -@@ -538,5 +540,202 @@ - return id; - } - -+int my_postgresql_lock_table(B_DB *mdb, const char *table) -+{ -+ my_postgresql_query(mdb, "BEGIN"); -+ Mmsg(mdb->cmd, "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table); -+ return my_postgresql_query(mdb, mdb->cmd); -+} -+ -+int my_postgresql_unlock_table(B_DB *mdb) -+{ -+ return my_postgresql_query(mdb, "COMMIT"); -+} -+ -+int my_postgresql_batch_start(B_DB *mdb) -+{ -+ Dmsg0(500, "my_postgresql_batch_start started\n"); -+ -+ if (my_postgresql_query(mdb, -+ " CREATE TEMPORARY TABLE batch " -+ " (fileindex int, " -+ " jobid int, " -+ " path varchar, " -+ " name varchar, " -+ " lstat varchar, " -+ " md5 varchar)") == 1) -+ { -+ Dmsg0(500, "my_postgresql_batch_start failed\n"); -+ return 1; -+ } -+ -+ // We are starting a new query. reset everything. -+ mdb->num_rows = -1; -+ mdb->row_number = -1; -+ mdb->field_number = -1; -+ -+ if (mdb->result != NULL) { -+ my_postgresql_free_result(mdb); -+ } -+ -+ mdb->result = PQexec(mdb->db, "COPY batch FROM STDIN"); -+ mdb->status = PQresultStatus(mdb->result); -+ if (mdb->status == PGRES_COPY_IN) { -+ // how many fields in the set? -+ mdb->num_fields = (int) PQnfields(mdb->result); -+ mdb->num_rows = 0; -+ mdb->status = 0; -+ } else { -+ Dmsg0(500, "we failed\n"); -+ mdb->status = 1; -+ } -+ -+ Dmsg0(500, "my_postgresql_batch_start finishing\n"); -+ -+ return mdb->status; -+} -+ -+/* set error to something to abort operation */ -+int my_postgresql_batch_end(B_DB *mdb, const char *error) -+{ -+ int res; -+ int count=30; -+ Dmsg0(500, "my_postgresql_batch_end started\n"); -+ -+ if (!mdb) { /* no files ? */ -+ return 0; -+ } -+ -+ do { -+ res = PQputCopyEnd(mdb->db, error); -+ } while (res == 0 && --count > 0); -+ -+ if (res == 1) { -+ Dmsg0(500, "ok\n"); -+ mdb->status = 0; -+ } -+ -+ if (res <= 0) { -+ Dmsg0(500, "we failed\n"); -+ mdb->status = 1; -+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db)); -+ } -+ -+ Dmsg0(500, "my_postgresql_batch_end finishing\n"); -+ -+ return mdb->status; -+} -+ -+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar) -+{ -+ int res; -+ int count=30; -+ size_t len; -+ char *digest; -+ char ed1[50]; -+ -+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); -+ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl); -+ -+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1); -+ my_postgresql_copy_escape(mdb->esc_name2, mdb->path, mdb->pnl); -+ -+ if (ar->Digest == NULL || ar->Digest[0] == 0) { -+ digest = "0"; -+ } else { -+ digest = ar->Digest; -+ } -+ -+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n", -+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->path, -+ mdb->fname, ar->attr, digest); -+ -+ do { -+ res = PQputCopyData(mdb->db, -+ mdb->cmd, -+ len); -+ } while (res == 0 && --count > 0); -+ -+ if (res == 1) { -+ Dmsg0(500, "ok\n"); -+ mdb->changes++; -+ mdb->status = 0; -+ } -+ -+ if (res <= 0) { -+ Dmsg0(500, "we failed\n"); -+ mdb->status = 1; -+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db)); -+ } -+ -+ Dmsg0(500, "my_postgresql_batch_insert finishing\n"); -+ -+ return mdb->status; -+} -+ -+/* -+ * Escape strings so that PostgreSQL is happy on COPY -+ * -+ * NOTE! len is the length of the old string. Your new -+ * string must be long enough (max 2*old+1) to hold -+ * the escaped output. -+ */ -+char *my_postgresql_copy_escape(char *dest, char *src, size_t len) -+{ -+ /* we have to escape \t, \n, \r, \ */ -+ char c = '\0' ; -+ -+ while (len > 0 && *src) { -+ switch (*src) { -+ case '\n': -+ c = 'n'; -+ break; -+ case '\\': -+ c = '\\'; -+ break; -+ case '\t': -+ c = 't'; -+ break; -+ case '\r': -+ c = 'r'; -+ break; -+ default: -+ c = '\0' ; -+ } -+ -+ if (c) { -+ *dest = '\\'; -+ dest++; -+ *dest = c; -+ } else { -+ *dest = *src; -+ } -+ -+ len--; -+ src++; -+ dest++; -+ } -+ -+ *dest = '\0'; -+ return dest; -+} -+ -+char *my_pg_batch_lock_path_query = "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE"; -+ -+ -+char *my_pg_batch_lock_filename_query = "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE"; -+ -+char *my_pg_batch_unlock_tables_query = "COMMIT"; -+ -+char *my_pg_batch_fill_path_query = "INSERT INTO Path (Path) " -+ " SELECT a.Path FROM " -+ " (SELECT DISTINCT Path FROM batch) AS a " -+ " WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) "; -+ - -+char *my_pg_batch_fill_filename_query = "INSERT INTO Filename (Name) " -+ " SELECT a.Name FROM " -+ " (SELECT DISTINCT Name FROM batch) as a " -+ " WHERE NOT EXISTS " -+ " (SELECT Name FROM Filename WHERE Name = a.Name)"; - #endif /* HAVE_POSTGRESQL */ -diff -Naur cvs/src/cats/protos.h my/src/cats/protos.h ---- cvs/src/cats/protos.h 2006-12-06 15:11:53.000000000 +0100 -+++ my/src/cats/protos.h 2007-01-10 19:21:42.000000000 +0100 -@@ -67,6 +67,10 @@ - bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr); - bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr); - bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr); -+int db_create_batch_file_record(JCR *jcr); -+int db_batch_start(B_DB *mdb); -+int db_batch_end(B_DB *mdb, const char *error); -+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar); - - /* delete.c */ - int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr); -diff -Naur cvs/src/cats/sql_create.c my/src/cats/sql_create.c ---- cvs/src/cats/sql_create.c 2006-12-06 15:11:53.000000000 +0100 -+++ my/src/cats/sql_create.c 2007-01-10 21:46:59.000000000 +0100 -@@ -664,9 +664,208 @@ - * }; - */ - -+/* All db_batch_* functions are used to do bulk batch insert in File/Filename/Path -+ * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1" -+ * in baconfig.h -+ * -+ * To sum up : -+ * - bulk load a temp table -+ * - insert missing filenames into filename with a single query (lock filenames -+ * - table before that to avoid possible duplicate inserts with concurrent update) -+ * - insert missing paths into path with another single query -+ * - then insert the join between the temp, filename and path tables into file. -+ */ -+ -+int db_batch_start(B_DB *mdb) -+{ -+ return sql_query(mdb, -+ " CREATE TEMPORARY TABLE batch " -+ " (fileindex integer, " -+ " jobid integer, " -+ " path blob, " -+ " name blob, " -+ " lstat tinyblob, " -+ " md5 tinyblob) "); -+} -+ -+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar) -+{ -+ size_t len; -+ char *digest; -+ char ed1[50]; -+ -+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); -+ db_escape_string(mdb->esc_name, mdb->fname, mdb->fnl); -+ -+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1); -+ db_escape_string(mdb->esc_name2, mdb->path, mdb->pnl); -+ -+ if (ar->Digest == NULL || ar->Digest[0] == 0) { -+ digest = "0"; -+ } else { -+ digest = ar->Digest; -+ } -+ -+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')", -+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->path, -+ mdb->fname, ar->attr, digest); -+ -+ sql_query(mdb, mdb->cmd); -+ -+ return mdb->status; -+} -+ -+/* set error to something to abort operation */ -+int db_batch_end(B_DB *mdb, const char *error) -+{ -+ -+ Dmsg0(50, "db_batch_end started"); -+ -+ if (mdb) { -+ mdb->status = 0; -+ return mdb->status; -+ } -+ return 0; -+} -+ -+int db_create_batch_file_record(JCR *jcr) -+{ -+ Dmsg0(50,"db_create_file_record : no files"); -+ -+ if (!jcr->db_batch) { /* no files to backup ? */ -+ Dmsg0(50,"db_create_file_record : no files\n"); -+ return 0; -+ } -+ -+ if (sql_batch_end(jcr->db_batch, NULL)) { -+ Jmsg(jcr, M_FATAL, 0, "Bad batch end %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ /* we have to lock tables */ -+ if (sql_query(jcr->db_batch, sql_batch_lock_path_query)) -+ { -+ Jmsg(jcr, M_FATAL, 0, "Can't lock Path table %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ if (sql_query(jcr->db_batch, sql_batch_fill_path_query)) -+ { -+ Jmsg(jcr, M_FATAL, 0, "Can't fill Path table %s\n",jcr->db_batch->errmsg); -+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query); -+ return 1; -+ } -+ -+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) -+ { -+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Path table %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ /* we have to lock tables */ -+ if (sql_query(jcr->db_batch, sql_batch_lock_filename_query)) -+ { -+ Jmsg(jcr, M_FATAL, 0, "Can't lock Filename table %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ if (sql_query(jcr->db_batch, sql_batch_fill_filename_query)) -+ { -+ Jmsg(jcr,M_FATAL,0,"Can't fill Filename table %s\n",jcr->db_batch->errmsg); -+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query); -+ return 1; -+ } -+ -+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) { -+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Filename table %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ if (sql_query(jcr->db_batch, -+ " INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5)" -+ " SELECT batch.FileIndex, batch.JobId, Path.PathId, " -+ " Filename.FilenameId,batch.LStat, batch.MD5 " -+ " FROM batch " -+ " JOIN Path ON (batch.Path = Path.Path) " -+ " JOIN Filename ON (batch.Name = Filename.Name) ")) -+ { -+ Jmsg(jcr, M_FATAL, 0, "Can't fill File table %s\n", jcr->db_batch->errmsg); -+ return 1; -+ } -+ -+ sql_query(jcr->db_batch, "DROP TABLE batch"); -+ -+ return 0; -+} -+ -+#ifdef HAVE_BATCH_FILE_INSERT -+/* -+ * Create File record in B_DB -+ * -+ * In order to reduce database size, we store the File attributes, -+ * the FileName, and the Path separately. In principle, there -+ * is a single FileName record and a single Path record, no matter -+ * how many times it occurs. This is this subroutine, we separate -+ * the file and the path and fill temporary tables with this three records. -+ */ -+int db_create_file_attributes_record(JCR *jcr, B_DB *_mdb, ATTR_DBR *ar) -+{ -+ -+ Dmsg1(dbglevel, "Fname=%s\n", ar->fname); -+ Dmsg0(dbglevel, "put_file_into_catalog\n"); -+ -+ if (!jcr->db_batch) { -+ jcr->db_batch = db_init_database(jcr, -+ jcr->db->db_name, -+ jcr->db->db_user, -+ jcr->db->db_password, -+ jcr->db->db_address, -+ jcr->db->db_port, -+ jcr->db->db_socket, -+ 1 /* multi_db = true */); -+ -+ if (!jcr->db_batch || !db_open_database(jcr, jcr->db_batch)) { -+ Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"), -+ jcr->db->db_name); -+ if (jcr->db_batch) { -+ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch)); -+ } -+ return 0; -+ } -+ -+ sql_batch_start(jcr->db_batch); -+ } -+ -+ B_DB *mdb = jcr->db_batch; -+ -+ /* -+ * Make sure we have an acceptable attributes record. -+ */ -+ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || -+ ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { -+ Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), -+ ar->Stream); -+ Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg); -+ return 0; -+ } -+ -+ split_path_and_file(jcr, mdb, ar->fname); - - - /* -+ if (jcr->changes > 100000) { -+ sql_batch_end(mdb, NULL); -+ sql_batch_start(mdb); -+ jcr->changes = 0; -+ } -+*/ -+ -+ return (sql_batch_insert(mdb, ar) == 0); -+} -+ -+#else /* ! HAVE_BATCH_FILE_INSERT */ -+ -+/* - * Create File record in B_DB - * - * In order to reduce database size, we store the File attributes, -@@ -721,6 +920,8 @@ - return 0; - } - -+#endif /* ! HAVE_BATCH_FILE_INSERT */ -+ - /* - * This is the master File entry containing the attributes. - * The filename and path records have already been created. -diff -Naur cvs/src/cats/sqlite.c my/src/cats/sqlite.c ---- cvs/src/cats/sqlite.c 2006-12-06 15:11:53.000000000 +0100 -+++ my/src/cats/sqlite.c 2007-01-10 19:21:42.000000000 +0100 -@@ -108,6 +108,7 @@ - mdb->fname = get_pool_memory(PM_FNAME); - mdb->path = get_pool_memory(PM_FNAME); - mdb->esc_name = get_pool_memory(PM_FNAME); -+ mdb->esc_name2 = get_pool_memory(PM_FNAME); - mdb->allow_transactions = mult_db_connections; - qinsert(&db_list, &mdb->bq); /* put db in list */ - V(mutex); -@@ -213,6 +214,7 @@ - free_pool_memory(mdb->fname); - free_pool_memory(mdb->path); - free_pool_memory(mdb->esc_name); -+ free_pool_memory(mdb->esc_name2); - if (mdb->db_name) { - free(mdb->db_name); - } -@@ -433,4 +435,16 @@ - return mdb->fields[mdb->field++]; - } - -+char *my_sqlite_batch_lock_query = "BEGIN"; -+char *my_sqlite_batch_unlock_query = "COMMIT"; -+char *my_sqlite_batch_fill_path_query = "INSERT INTO Path (Path) " -+ " SELECT DISTINCT Path FROM batch " -+ " EXCEPT SELECT Path FROM Path "; -+ -+char *my_sqlite_batch_fill_filename_query = "INSERT INTO Filename (Name) " -+ " SELECT DISTINCT Name FROM batch " -+ " EXCEPT SELECT Name FROM Filename "; -+ -+ -+ - #endif /* HAVE_SQLITE */ -diff -Naur cvs/src/dird/backup.c my/src/dird/backup.c ---- cvs/src/dird/backup.c 2006-12-13 11:57:52.000000000 +0100 -+++ my/src/dird/backup.c 2007-01-10 19:21:42.000000000 +0100 -@@ -233,6 +233,9 @@ - - /* Pickup Job termination data */ - stat = wait_for_job_termination(jcr); -+#ifdef HAVE_BATCH_FILE_INSERT -+ db_create_batch_file_record(jcr); /* used by bulk batch file insert */ -+#endif - if (stat == JS_Terminated) { - backup_cleanup(jcr, stat); - return true; -diff -Naur cvs/src/dird/jobq.c my/src/dird/jobq.c ---- cvs/src/dird/jobq.c 2006-11-24 11:29:37.000000000 +0100 -+++ my/src/dird/jobq.c 2007-01-10 19:21:42.000000000 +0100 -@@ -563,6 +563,10 @@ - db_close_database(jcr, jcr->db); - jcr->db = NULL; - } -+ if (jcr->db_batch) { -+ db_close_database(jcr, jcr->db_batch); -+ jcr->db_batch = NULL; -+ } - Dmsg2(2300, "====== Termination job=%d use_cnt=%d\n", jcr->JobId, jcr->use_count()); - jcr->SDJobStatus = 0; - V(jq->mutex); /* release internal lock */ -diff -Naur cvs/src/jcr.h my/src/jcr.h ---- cvs/src/jcr.h 2006-12-19 21:57:38.000000000 +0100 -+++ my/src/jcr.h 2007-01-10 19:21:42.000000000 +0100 -@@ -184,6 +184,7 @@ - bool cached_attribute; /* set if attribute is cached */ - POOLMEM *attr; /* Attribute string from SD */ - B_DB *db; /* database pointer */ -+ B_DB *db_batch; /* database pointer for batch insert */ - ATTR_DBR *ar; /* DB attribute record */ - - /* Daemon specific part of JCR */ diff --git a/bacula/patches/testing/cancel-bug.patch b/bacula/patches/testing/cancel-bug.patch deleted file mode 100644 index 4b6f55453f..0000000000 --- a/bacula/patches/testing/cancel-bug.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- src/dird/msgchan.c 8 Dec 2006 14:27:10 -0000 1.66 -+++ src/dird/msgchan.c 5 Feb 2007 14:15:52 -0000 -@@ -316,7 +316,7 @@ - Jmsg1(jcr, M_ABORT, 0, _("Cannot create message thread: %s\n"), be.strerror(status)); - } - /* Wait for thread to start */ -- while (jcr->SD_msg_chan == 0) { -+ while (jcr->SD_msg_chan == 0 && cr->sd_msg_thread_done == false) { - bmicrosleep(0, 50); - } - Dmsg1(100, "SD msg_thread started. use=%d\n", jcr->use_count()); diff --git a/bacula/patches/testing/cancel-bug.readme b/bacula/patches/testing/cancel-bug.readme deleted file mode 100644 index 36d22b8935..0000000000 --- a/bacula/patches/testing/cancel-bug.readme +++ /dev/null @@ -1,11 +0,0 @@ -From: Eric Bollengier - -Sometime, when my system is very busy, if i cancel a job -before i was initialised, bacula fall in a race case. -and the job thread never exits from start_storage_daemon_message_thread(). - - -$Log$ -Revision 1.1 2007/02/05 14:19:22 ricozz -ebl add - diff --git a/bacula/patches/testing/media-stats.patch b/bacula/patches/testing/media-stats.patch deleted file mode 100644 index 0b505e5cef..0000000000 --- a/bacula/patches/testing/media-stats.patch +++ /dev/null @@ -1,352 +0,0 @@ -diff -Naur org/bacula-2.0.0/src/baconfig.h bacula-2.0.0/src/baconfig.h ---- org/bacula-2.0.0/src/baconfig.h 2006-12-17 14:36:35.000000000 +0100 -+++ bacula-2.0.0/src/baconfig.h 2007-01-09 16:10:02.000000000 +0100 -@@ -106,8 +106,8 @@ - #define OSDependentInit() - #define tape_open open - #define tape_ioctl ioctl --#define tape_read read --#define tape_write write -+#define tape_read ::read -+#define tape_write ::write - #define tape_close ::close - - #endif -diff -Naur org/bacula-2.0.0/src/cats/sql_find.c bacula-2.0.0/src/cats/sql_find.c ---- org/bacula-2.0.0/src/cats/sql_find.c 2006-11-27 11:02:59.000000000 +0100 -+++ bacula-2.0.0/src/cats/sql_find.c 2007-01-09 16:10:02.000000000 +0100 -@@ -283,7 +283,7 @@ - "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," - "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot," - "FirstWritten,LastWritten,VolStatus,InChanger,VolParts," -- "LabelType " -+ "LabelType,VolReadTime,VolWriteTime " - "FROM Media WHERE PoolId=%s AND MediaType='%s' AND VolStatus IN ('Full'," - "'Recycle','Purged','Used','Append') AND Enabled=1 " - "ORDER BY LastWritten LIMIT 1", -@@ -308,7 +308,7 @@ - "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," - "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot," - "FirstWritten,LastWritten,VolStatus,InChanger,VolParts," -- "LabelType " -+ "LabelType,VolReadTime,VolWriteTime " - "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 " - "AND VolStatus='%s' " - "%s " -@@ -371,6 +371,8 @@ - mr->InChanger = str_to_int64(row[20]); - mr->VolParts = str_to_int64(row[21]); - mr->LabelType = str_to_int64(row[22]); -+ mr->VolReadTime = str_to_uint64(row[23]); -+ mr->VolWriteTime = str_to_uint64(row[24]); - mr->Enabled = 1; /* ensured via query */ - sql_free_result(mdb); - -diff -Naur org/bacula-2.0.0/src/cats/sql_get.c bacula-2.0.0/src/cats/sql_get.c ---- org/bacula-2.0.0/src/cats/sql_get.c 2006-11-27 11:02:59.000000000 +0100 -+++ bacula-2.0.0/src/cats/sql_get.c 2007-01-09 16:10:02.000000000 +0100 -@@ -872,7 +872,7 @@ - "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," - "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId," - "Enabled,LocationId,RecycleCount,InitialWrite," -- "ScratchPoolId,RecyclePoolId " -+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime " - "FROM Media WHERE MediaId=%s", - edit_int64(mr->MediaId, ed1)); - } else { /* find by name */ -@@ -882,7 +882,7 @@ - "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," - "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId," - "Enabled,LocationId,RecycleCount,InitialWrite," -- "ScratchPoolId,RecyclePoolId " -+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime " - "FROM Media WHERE VolumeName='%s'", mr->VolumeName); - } - -@@ -938,6 +938,8 @@ - mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); - mr->ScratchPoolId = str_to_int64(row[33]); - mr->RecyclePoolId = str_to_int64(row[34]); -+ mr->VolReadTime = str_to_int64(row[35]); -+ mr->VolWriteTime = str_to_int64(row[36]); - - ok = true; - } -diff -Naur org/bacula-2.0.0/src/dird/catreq.c bacula-2.0.0/src/dird/catreq.c ---- org/bacula-2.0.0/src/dird/catreq.c 2006-12-23 17:33:52.000000000 +0100 -+++ bacula-2.0.0/src/dird/catreq.c 2007-01-09 16:10:02.000000000 +0100 -@@ -280,7 +280,7 @@ - mr.VolWriteTime = sdmr.VolWriteTime; - mr.VolParts = sdmr.VolParts; - bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus)); -- if (jcr->wstore->StorageId) { -+ if (jcr->wstore && jcr->wstore->StorageId) { - mr.StorageId = jcr->wstore->StorageId; - } - -diff -Naur org/bacula-2.0.0/src/dird/dird.c bacula-2.0.0/src/dird/dird.c ---- org/bacula-2.0.0/src/dird/dird.c 2006-12-22 16:01:05.000000000 +0100 -+++ bacula-2.0.0/src/dird/dird.c 2007-01-09 16:10:02.000000000 +0100 -@@ -269,6 +269,8 @@ - - init_job_server(director->MaxConcurrentJobs); - -+// init_device_resources(); -+ - Dmsg0(200, "wait for next job\n"); - /* Main loop -- call scheduler to get next job to run */ - while ( (jcr = wait_for_next_job(runjob)) ) { -diff -Naur org/bacula-2.0.0/src/dird/getmsg.c bacula-2.0.0/src/dird/getmsg.c ---- org/bacula-2.0.0/src/dird/getmsg.c 2006-11-21 14:20:09.000000000 +0100 -+++ bacula-2.0.0/src/dird/getmsg.c 2007-01-09 16:10:02.000000000 +0100 -@@ -62,7 +62,9 @@ - "open=%d labeled=%d offline=%d " - "reserved=%d max_writers=%d " - "autoselect=%d autochanger=%d " -- "changer_name=%127s media_type=%127s volume_name=%127s\n"; -+ "changer_name=%127s media_type=%127s volume_name=%127s " -+ "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d " -+ "DevWriteBytes=%d\n"; - #endif - - -@@ -243,6 +245,7 @@ - int dev_open, dev_append, dev_read, dev_labeled; - int dev_offline, dev_autochanger, dev_autoselect; - int dev_num_writers, dev_max_writers, dev_reserved; -+ uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes; - uint64_t dev_PoolId; - Dmsg1(100, "msg); - if (sscanf(bs->msg, Device_update, -@@ -253,7 +256,9 @@ - &dev_max_writers, &dev_autoselect, - &dev_autochanger, - changer_name.c_str(), media_type.c_str(), -- volume_name.c_str()) != 15) { -+ volume_name.c_str(), -+ &dev_read_time, &dev_write_time, &dev_read_bytes, -+ &dev_write_bytes) != 19) { - Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); - } else { - unbash_spaces(dev_name); -@@ -283,6 +288,10 @@ - dev->max_writers = dev_max_writers; - dev->reserved = dev_reserved; - dev->found = true; -+ dev->DevReadTime = dev_read_time; /* TODO : have to update database */ -+ dev->DevWriteTime = dev_write_time; -+ dev->DevReadBytes = dev_read_bytes; -+ dev->DevWriteBytes = dev_write_bytes; - } - continue; - } -diff -Naur org/bacula-2.0.0/src/stored/acquire.c bacula-2.0.0/src/stored/acquire.c ---- org/bacula-2.0.0/src/stored/acquire.c 2006-12-16 16:30:22.000000000 +0100 -+++ bacula-2.0.0/src/stored/acquire.c 2007-01-09 16:10:02.000000000 +0100 -@@ -461,8 +461,8 @@ - - if (dev->can_read()) { - dev->clear_read(); /* clear read bit */ -- -- /******FIXME**** send read volume usage statistics to director */ -+ Dmsg0(100, "dir_update_vol_info. Release0\n"); -+ dir_update_volume_info(dcr, false); /* send Volume info to Director */ - - } else if (dev->num_writers > 0) { - /* -diff -Naur org/bacula-2.0.0/src/stored/ansi_label.c bacula-2.0.0/src/stored/ansi_label.c ---- org/bacula-2.0.0/src/stored/ansi_label.c 2006-11-21 18:03:45.000000000 +0100 -+++ bacula-2.0.0/src/stored/ansi_label.c 2007-01-09 16:10:02.000000000 +0100 -@@ -87,7 +87,7 @@ - /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */ - for (i=0; i < 6; i++) { - do { -- stat = tape_read(dev->fd, label, sizeof(label)); -+ stat = dev->read(label, sizeof(label)); - } while (stat == -1 && errno == EINTR); - if (stat < 0) { - berrno be; -@@ -309,7 +309,7 @@ - } else { - label[79] = '3'; /* ANSI label flag */ - } -- stat = tape_write(dev->fd, label, sizeof(label)); -+ stat = dev->write(label, sizeof(label)); - if (stat != sizeof(label)) { - berrno be; - Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. ERR=%s\n"), -@@ -341,7 +341,7 @@ - * This could come at the end of a tape, ignore - * EOT errors. - */ -- stat = tape_write(dev->fd, label, sizeof(label)); -+ stat = dev->write(label, sizeof(label)); - if (stat != sizeof(label)) { - berrno be; - if (stat == -1) { -@@ -370,7 +370,7 @@ - label[4] = 'V'; - ascii_to_ebcdic(label, label, sizeof(label)); - } -- stat = tape_write(dev->fd, label, sizeof(label)); -+ stat = dev->write(label, sizeof(label)); - if (stat != sizeof(label)) { - berrno be; - if (stat == -1) { -diff -Naur org/bacula-2.0.0/src/stored/askdir.c bacula-2.0.0/src/stored/askdir.c ---- org/bacula-2.0.0/src/stored/askdir.c 2006-12-08 15:27:10.000000000 +0100 -+++ bacula-2.0.0/src/stored/askdir.c 2007-01-09 16:10:02.000000000 +0100 -@@ -308,11 +308,6 @@ - Pmsg0(000, _("NULL Volume name. This shouldn't happen!!!\n")); - return false; - } -- if (dev->can_read()) { -- Jmsg0(jcr, M_FATAL, 0, _("Attempt to update_volume_info in read mode!!!\n")); -- Pmsg0(000, _("Attempt to update_volume_info in read mode!!!\n")); -- return false; -- } - - Dmsg1(100, "Update cat VolFiles=%d\n", dev->file); - /* Just labeled or relabeled the tape */ -diff -Naur org/bacula-2.0.0/src/stored/block.c bacula-2.0.0/src/stored/block.c ---- org/bacula-2.0.0/src/stored/block.c 2006-12-16 12:10:17.000000000 +0100 -+++ bacula-2.0.0/src/stored/block.c 2007-01-09 16:10:02.000000000 +0100 -@@ -537,11 +537,8 @@ - bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */ - dev->clrerror(-1); - } -- if (dev->is_tape()) { -- stat = tape_write(dev->fd, block->buf, (size_t)wlen); -- } else { -- stat = write(dev->fd, block->buf, (size_t)wlen); -- } -+ stat = dev->write(block->buf, (size_t)wlen); -+ - } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3); - - #ifdef DEBUG_BLOCK_ZEROING -@@ -979,11 +976,8 @@ - bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */ - dev->clrerror(-1); - } -- if (dev->is_tape()) { -- stat = tape_read(dev->fd, block->buf, (size_t)block->buf_len); -- } else { -- stat = read(dev->fd, block->buf, (size_t)block->buf_len); -- } -+ stat = dev->read(block->buf, (size_t)block->buf_len); -+ - } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3); - if (stat < 0) { - berrno be; -diff -Naur org/bacula-2.0.0/src/stored/dev.c bacula-2.0.0/src/stored/dev.c ---- org/bacula-2.0.0/src/stored/dev.c 2006-12-22 16:01:05.000000000 +0100 -+++ bacula-2.0.0/src/stored/dev.c 2007-01-09 16:10:02.000000000 +0100 -@@ -1325,7 +1325,7 @@ - mt_com.mt_count = 1; - while (num-- && !at_eot()) { - Dmsg0(100, "Doing read before fsf\n"); -- if ((stat = tape_read(fd, (char *)rbuf, rbuf_len)) < 0) { -+ if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) { - if (errno == ENOMEM) { /* tape record exceeds buf len */ - stat = rbuf_len; /* This is OK */ - /* -@@ -2192,6 +2192,68 @@ - } - } - -+/* return the last timer interval (ms) */ -+int DEVICE::get_timer_count() -+{ -+ uint64_t old = last_timer; -+ struct timeval tv; -+ gettimeofday(&tv, NULL); -+ last_timer = tv.tv_usec + tv.tv_sec * 1000000; -+ -+ return last_timer - old; -+} -+ -+/* read from fd */ -+ssize_t DEVICE::read(void *buf, size_t len) -+{ -+ ssize_t read_len ; -+ -+ get_timer_count(); -+ -+ if (this->is_tape()) { -+ read_len = tape_read(fd, buf, len); -+ } else { -+ read_len = ::read(fd, buf, len); -+ } -+ -+ last_tick = get_timer_count(); -+ -+ DevReadTime += last_tick; -+ VolCatInfo.VolReadTime += last_tick; -+ -+ if (read_len > 0) { /* skip error */ -+ DevReadBytes += read_len; -+ VolCatInfo.VolCatRBytes += read_len; -+ } -+ -+ return read_len; -+} -+ -+/* write to fd */ -+ssize_t DEVICE::write(const void *buf, size_t len) -+{ -+ ssize_t write_len ; -+ -+ get_timer_count(); -+ -+ if (this->is_tape()) { -+ write_len = tape_write(fd, buf, len); -+ } else { -+ write_len = ::write(fd, buf, len); -+ } -+ -+ last_tick = get_timer_count(); -+ -+ DevWriteTime += last_tick; -+ VolCatInfo.VolWriteTime += last_tick; -+ -+ if (write_len > 0) { /* skip error */ -+ DevWriteBytes += write_len; -+ VolCatInfo.VolCatBytes += write_len; -+ } -+ -+ return write_len; -+} - - /* Return the resource name for the device */ - const char *DEVICE::name() const -diff -Naur org/bacula-2.0.0/src/stored/dev.h bacula-2.0.0/src/stored/dev.h ---- org/bacula-2.0.0/src/stored/dev.h 2006-12-14 12:41:01.000000000 +0100 -+++ bacula-2.0.0/src/stored/dev.h 2007-01-09 16:10:02.000000000 +0100 -@@ -280,7 +280,17 @@ - int rem_wait_sec; - int num_wait; - -+ uint64_t last_timer; /* used by read/write/seek to get stats (usec) */ -+ int last_tick; /* contains last read/write time (usec) */ -+ -+ uint64_t DevReadTime; -+ uint64_t DevWriteTime; -+ uint64_t DevWriteBytes; -+ uint64_t DevReadBytes; -+ - /* Methods */ -+ int get_timer_count(); /* return the last timer interval (ms) */ -+ - int has_cap(int cap) const { return capabilities & cap; } - void clear_cap(int cap) { capabilities &= ~cap; } - void set_cap(int cap) { capabilities |= cap; } -@@ -363,6 +373,8 @@ - bool truncate(DCR *dcr); /* in dev.c */ - int open(DCR *dcr, int mode); /* in dev.c */ - void term(void); /* in dev.c */ -+ ssize_t read(void *buf, size_t len); /* in dev.c */ -+ ssize_t write(const void *buf, size_t len); /* in dev.c */ - bool rewind(DCR *dcr); /* in dev.c */ - bool mount(int timeout); /* in dev.c */ - bool unmount(int timeout); /* in dev.c */ diff --git a/bacula/patches/testing/media-stats.readme b/bacula/patches/testing/media-stats.readme deleted file mode 100644 index 7b63639bd6..0000000000 --- a/bacula/patches/testing/media-stats.readme +++ /dev/null @@ -1,17 +0,0 @@ -From: Eric Bollengier - -This patch allow you to have media statistics - - VolReadTime - - VolWriteTime - - ... - -The next step, is to have Device statistics. - -$Log$ -Revision 1.1 2006/12/20 18:47:42 ricozz -ebl works with 1.39.30 - -Revision 1.1 2006/12/19 21:33:06 ricozz -ebl ok against 1.39.30 - - diff --git a/bacula/patches/testing/project-include-jobid-in-spool-name.patch b/bacula/patches/testing/project-include-jobid-in-spool-name.patch deleted file mode 100644 index e4c990ecc2..0000000000 --- a/bacula/patches/testing/project-include-jobid-in-spool-name.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff -Naur org/bacula-2.0.0/src/stored/spool.c bacula-2.0.0/src/stored/spool.c ---- org/bacula-2.0.0/src/stored/spool.c 2006-11-27 11:03:03.000000000 +0100 -+++ bacula-2.0.0/src/stored/spool.c 2007-01-09 16:15:02.000000000 +0100 -@@ -154,8 +154,8 @@ - } else { - dir = working_directory; - } -- Mmsg(name, "%s/%s.data.%s.%s.spool", dir, my_name, dcr->jcr->Job, -- dcr->device->hdr.name); -+ Mmsg(name, "%s/%s.data.%u.%s.%s.spool", dir, my_name, dcr->jcr->JobId, -+ dcr->jcr->Job, dcr->device->hdr.name); - } - - diff --git a/bacula/patches/testing/project-include-jobid-in-spool-name.readme b/bacula/patches/testing/project-include-jobid-in-spool-name.readme deleted file mode 100644 index a2a3fee8be..0000000000 --- a/bacula/patches/testing/project-include-jobid-in-spool-name.readme +++ /dev/null @@ -1,11 +0,0 @@ -From: Eric Bollengier - -This patch implements the include JobID in spool file name project - -$Log$ -Revision 1.2 2006/12/30 17:18:33 ricozz -ebl Works with 1.39.35 - -Revision 1.1 2006/12/30 09:30:03 ricozz -ebl works against 1.39.34 - diff --git a/bacula/patches/testing/scratch.patch b/bacula/patches/testing/scratch.patch deleted file mode 100644 index 81166aa413..0000000000 --- a/bacula/patches/testing/scratch.patch +++ /dev/null @@ -1,47 +0,0 @@ -diff -Naur org/bacula-2.0.0/src/dird/protos.h bacula-2.0.0/src/dird/protos.h ---- org/bacula-2.0.0/src/dird/protos.h 2006-12-23 17:33:52.000000000 +0100 -+++ bacula-2.0.0/src/dird/protos.h 2007-01-09 16:12:18.000000000 +0100 -@@ -195,6 +195,9 @@ - int get_num_drives_from_SD(UAContext *ua); - void update_slots(UAContext *ua); - -+/* ua_update.c */ -+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr); -+ - /* ua_output.c */ - void prtit(void *ctx, const char *msg); - int complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool); -diff -Naur org/bacula-2.0.0/src/dird/ua_purge.c bacula-2.0.0/src/dird/ua_purge.c ---- org/bacula-2.0.0/src/dird/ua_purge.c 2006-12-22 16:01:05.000000000 +0100 -+++ bacula-2.0.0/src/dird/ua_purge.c 2007-01-09 16:12:18.000000000 +0100 -@@ -605,6 +605,18 @@ - } - pm_strcpy(jcr->VolumeName, mr->VolumeName); - generate_job_event(jcr, "VolumePurged"); -+ if (mr->RecyclePoolId && mr->RecyclePoolId != mr->PoolId) { -+ POOL_DBR oldpr, newpr; -+ memset(&oldpr, 0, sizeof(POOL_DBR)); -+ memset(&newpr, 0, sizeof(POOL_DBR)); -+ newpr.PoolId = mr->RecyclePoolId; -+ oldpr.PoolId = mr->PoolId; -+ if (db_get_pool_record(jcr, ua->db, &oldpr) && db_get_pool_record(jcr, ua->db, &newpr)) { -+ update_vol_pool(ua, newpr.Name, mr, &oldpr); -+ } else { -+ bsendmsg(ua, "%s", db_strerror(ua->db)); -+ } -+ } - /* Send message to Job report, if it is a *real* job */ - if (jcr && jcr->JobId > 0) { - Jmsg1(jcr, M_INFO, 0, _("All records pruned from Volume \"%s\"; marking it \"Purged\"\n"), -diff -Naur org/bacula-2.0.0/src/dird/ua_update.c bacula-2.0.0/src/dird/ua_update.c ---- org/bacula-2.0.0/src/dird/ua_update.c 2006-12-23 17:33:52.000000000 +0100 -+++ bacula-2.0.0/src/dird/ua_update.c 2007-01-09 16:12:18.000000000 +0100 -@@ -290,7 +290,7 @@ - } - - /* Modify the Pool in which this Volume is located */ --static void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr) -+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr) - { - POOL_DBR pr; - POOLMEM *query; diff --git a/bacula/patches/testing/scratch.readme b/bacula/patches/testing/scratch.readme deleted file mode 100644 index 3fbed90cce..0000000000 --- a/bacula/patches/testing/scratch.readme +++ /dev/null @@ -1,12 +0,0 @@ -From: Eric Bollengier - -This patch allow you to : - - move Purged media to there RecyclePool - -$Log$ -Revision 1.2 2007/01/09 15:18:20 ricozz -ebl works with 2.0.0 - -Revision 1.1 2006/12/20 18:47:42 ricozz -ebl works with 1.39.30 - diff --git a/bacula/projects b/bacula/projects index 9bf174feaa..24c6573c08 100644 --- a/bacula/projects +++ b/bacula/projects @@ -574,6 +574,7 @@ Item 17: Restore only file attributes (permissions, ACL, owner, group...) Notes: If the file is here, we skip restore and we change rights. If the file isn't here, we can create an empty one and apply rights or do nothing. + Item 18: Quick release of FD-SD connection after backup. Origin: Frank Volf (frank at deze dot org) Date: 17 November 2005 diff --git a/bacula/src/baconfig.h b/bacula/src/baconfig.h index 4c7ae5ef31..b5d5afb48d 100644 --- a/bacula/src/baconfig.h +++ b/bacula/src/baconfig.h @@ -106,8 +106,8 @@ void InitWinAPIWrapper(); #define OSDependentInit() #define tape_open open #define tape_ioctl ioctl -#define tape_read read -#define tape_write write +#define tape_read ::read +#define tape_write ::write #define tape_close ::close #endif diff --git a/bacula/src/cats/cats.h b/bacula/src/cats/cats.h index 609dd43931..6b77929e8c 100644 --- a/bacula/src/cats/cats.h +++ b/bacula/src/cats/cats.h @@ -140,7 +140,8 @@ struct B_DB { int changes; /* changes during transaction */ POOLMEM *fname; /* Filename only */ POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ + POOLMEM *esc_name; /* Escaped file name */ + POOLMEM *esc_name2; /* Escaped path name */ int fnl; /* file name length */ int pnl; /* path name length */ }; @@ -170,8 +171,14 @@ struct B_DB { #define sql_fetch_field(x) my_sqlite_fetch_field(x) #define sql_num_fields(x) ((x)->ncolumn) #define SQL_ROW char** - - +#define sql_batch_start(x) db_batch_start(x) +#define sql_batch_end(x,y) db_batch_end(x,y) +#define sql_batch_insert(x,y) db_batch_insert(x,y) +#define sql_batch_lock_path_query my_sqlite_batch_lock_query +#define sql_batch_lock_filename_query my_sqlite_batch_lock_query +#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query +#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query +#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query /* In cats/sqlite.c */ void my_sqlite_free_table(B_DB *mdb); @@ -179,6 +186,10 @@ SQL_ROW my_sqlite_fetch_row(B_DB *mdb); int my_sqlite_query(B_DB *mdb, const char *cmd); void my_sqlite_field_seek(B_DB *mdb, int field); SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb); +extern char* my_sqlite_batch_lock_query; +extern char* my_sqlite_batch_unlock_query; +extern char* my_sqlite_batch_fill_filename_query; +extern char* my_sqlite_batch_fill_path_query; #else @@ -248,7 +259,8 @@ struct B_DB { int changes; /* changes during transaction */ POOLMEM *fname; /* Filename only */ POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ + POOLMEM *esc_name; /* Escaped file name */ + POOLMEM *esc_name2; /* Escaped path name */ int fnl; /* file name length */ int pnl; /* path name length */ }; @@ -289,8 +301,14 @@ struct B_DB { #define sql_fetch_field(x) my_sqlite_fetch_field(x) #define sql_num_fields(x) ((x)->ncolumn) #define SQL_ROW char** - - +#define sql_batch_start(x) db_batch_start(x) +#define sql_batch_end(x,y) db_batch_end(x,y) +#define sql_batch_insert(x,y) db_batch_insert(x,y) +#define sql_batch_lock_path_query my_sqlite_batch_lock_query +#define sql_batch_lock_filename_query my_sqlite_batch_lock_query +#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query +#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query +#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query /* In cats/sqlite.c */ void my_sqlite_free_table(B_DB *mdb); @@ -298,6 +316,10 @@ SQL_ROW my_sqlite_fetch_row(B_DB *mdb); int my_sqlite_query(B_DB *mdb, const char *cmd); void my_sqlite_field_seek(B_DB *mdb, int field); SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb); +extern char* my_sqlite_batch_lock_query; +extern char* my_sqlite_batch_unlock_query; +extern char* my_sqlite_batch_fill_filename_query; +extern char* my_sqlite_batch_fill_path_query; #else @@ -340,7 +362,8 @@ struct B_DB { int changes; /* changes made to db */ POOLMEM *fname; /* Filename only */ POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ + POOLMEM *esc_name; /* Escaped file name */ + POOLMEM *esc_name2; /* Escaped path name */ int fnl; /* file name length */ int pnl; /* path name length */ }; @@ -362,9 +385,25 @@ struct B_DB { #define sql_field_seek(x, y) mysql_field_seek((x)->result, (y)) #define sql_fetch_field(x) mysql_fetch_field((x)->result) #define sql_num_fields(x) (int)mysql_num_fields((x)->result) +#define sql_batch_start(x) db_batch_start(x) +#define sql_batch_end(x,y) db_batch_end(x,y) +#define sql_batch_insert(x,y) db_batch_insert(x,y) +#define sql_batch_lock_path_query my_mysql_batch_lock_path_query +#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query +#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query +#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query +#define sql_batch_fill_path_query my_mysql_batch_fill_path_query #define SQL_ROW MYSQL_ROW #define SQL_FIELD MYSQL_FIELD + +int my_mysql_batch_start(B_DB *mdb); +extern char* my_mysql_batch_lock_path_query; +extern char* my_mysql_batch_lock_filename_query; +extern char* my_mysql_batch_unlock_tables_query; +extern char* my_mysql_batch_fill_filename_query; +extern char* my_mysql_batch_fill_path_query; + #else #ifdef HAVE_POSTGRESQL @@ -424,7 +463,8 @@ struct B_DB { int changes; /* changes made to db */ POOLMEM *fname; /* Filename only */ POOLMEM *path; /* Path only */ - POOLMEM *esc_name; /* Escaped file/path name */ + POOLMEM *esc_name; /* Escaped file name */ + POOLMEM *esc_name2; /* Escaped path name */ int fnl; /* file name length */ int pnl; /* path name length */ }; @@ -436,7 +476,19 @@ void my_postgresql_data_seek (B_DB *mdb, int row); int my_postgresql_currval (B_DB *mdb, char *table_name); void my_postgresql_field_seek (B_DB *mdb, int row); POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb); - +int my_postgresql_lock_table(B_DB *mdb, const char *table); +int my_postgresql_unlock_table(B_DB *mdb); +int my_postgresql_batch_start(B_DB *mdb); +int my_postgresql_batch_end(B_DB *mdb, const char *error); +typedef struct ATTR_DBR ATTR_DBR; +int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar); +char *my_postgresql_copy_escape(char *dest, char *src, size_t len); + +extern char* my_pg_batch_lock_path_query; +extern char* my_pg_batch_lock_filename_query; +extern char* my_pg_batch_unlock_tables_query; +extern char* my_pg_batch_fill_filename_query; +extern char* my_pg_batch_fill_path_query; /* "Generic" names for easier conversion */ #define sql_store_result(x) ((x)->result) @@ -452,6 +504,17 @@ POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb); #define sql_field_seek(x, y) my_postgresql_field_seek((x), (y)) #define sql_fetch_field(x) my_postgresql_fetch_field(x) #define sql_num_fields(x) ((x)->num_fields) +#define sql_batch_start(x) my_postgresql_batch_start(x) +#define sql_batch_end(x,y) my_postgresql_batch_end(x,y) +#define sql_batch_insert(x,y) my_postgresql_batch_insert(x,y) +#define sql_lock_table(x,y) my_postgresql_lock_table(x, y) +#define sql_unlock_table(x,y) my_postgresql_unlock_table(x) +#define sql_batch_lock_path_query my_pg_batch_lock_path_query +#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query +#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query +#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query +#define sql_batch_fill_path_query my_pg_batch_fill_path_query + #define SQL_ROW POSTGRESQL_ROW #define SQL_FIELD POSTGRESQL_FIELD diff --git a/bacula/src/cats/mysql.c b/bacula/src/cats/mysql.c index 946cdb39f2..fc2d008e68 100644 --- a/bacula/src/cats/mysql.c +++ b/bacula/src/cats/mysql.c @@ -121,6 +121,7 @@ db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_name2 = get_pool_memory(PM_FNAME); qinsert(&db_list, &mdb->bq); /* put db in list */ V(mutex); return mdb; @@ -231,6 +232,7 @@ db_close_database(JCR *jcr, B_DB *mdb) free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_name2); if (mdb->db_name) { free(mdb->db_name); } @@ -372,4 +374,34 @@ int db_sql_query(B_DB *mdb, const char *query, DB_RESULT_HANDLER *result_handler } +char *my_mysql_batch_lock_path_query = "LOCK TABLES Path write, " + " batch write, " + " Path as p write "; + + +char *my_mysql_batch_lock_filename_query = "LOCK TABLES Filename write, " + " batch write, " + " Filename as f write "; + +char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES"; + +char *my_mysql_batch_fill_path_query = "INSERT IGNORE INTO Path (Path) " + " SELECT a.Path FROM " + " (SELECT DISTINCT Path " + " FROM batch) AS a " + " WHERE NOT EXISTS " + " (SELECT Path " + " FROM Path AS p " + " WHERE p.Path = a.Path) "; + +char *my_mysql_batch_fill_filename_query = "INSERT IGNORE INTO Filename (Name)" + " SELECT a.Name FROM " + " (SELECT DISTINCT Name " + " FROM batch) AS a " + " WHERE NOT EXISTS " + " (SELECT Name " + " FROM Filename AS f " + " WHERE f.Name = a.Name) "; + #endif /* HAVE_MYSQL */ + diff --git a/bacula/src/cats/postgresql.c b/bacula/src/cats/postgresql.c index 33769d9642..d573b09f42 100644 --- a/bacula/src/cats/postgresql.c +++ b/bacula/src/cats/postgresql.c @@ -124,6 +124,7 @@ db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_name2 = get_pool_memory(PM_FNAME); mdb->allow_transactions = mult_db_connections; qinsert(&db_list, &mdb->bq); /* put db in list */ V(mutex); @@ -228,6 +229,7 @@ db_close_database(JCR *jcr, B_DB *mdb) free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_name2); if (mdb->db_name) { free(mdb->db_name); } @@ -538,5 +540,202 @@ int my_postgresql_currval(B_DB *mdb, char *table_name) return id; } +int my_postgresql_lock_table(B_DB *mdb, const char *table) +{ + my_postgresql_query(mdb, "BEGIN"); + Mmsg(mdb->cmd, "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table); + return my_postgresql_query(mdb, mdb->cmd); +} + +int my_postgresql_unlock_table(B_DB *mdb) +{ + return my_postgresql_query(mdb, "COMMIT"); +} + +int my_postgresql_batch_start(B_DB *mdb) +{ + Dmsg0(500, "my_postgresql_batch_start started\n"); + + if (my_postgresql_query(mdb, + " CREATE TEMPORARY TABLE batch " + " (fileindex int, " + " jobid int, " + " path varchar, " + " name varchar, " + " lstat varchar, " + " md5 varchar)") == 1) + { + Dmsg0(500, "my_postgresql_batch_start failed\n"); + return 1; + } + + // We are starting a new query. reset everything. + mdb->num_rows = -1; + mdb->row_number = -1; + mdb->field_number = -1; + + if (mdb->result != NULL) { + my_postgresql_free_result(mdb); + } + + mdb->result = PQexec(mdb->db, "COPY batch FROM STDIN"); + mdb->status = PQresultStatus(mdb->result); + if (mdb->status == PGRES_COPY_IN) { + // how many fields in the set? + mdb->num_fields = (int) PQnfields(mdb->result); + mdb->num_rows = 0; + mdb->status = 0; + } else { + Dmsg0(500, "we failed\n"); + mdb->status = 1; + } + + Dmsg0(500, "my_postgresql_batch_start finishing\n"); + + return mdb->status; +} + +/* set error to something to abort operation */ +int my_postgresql_batch_end(B_DB *mdb, const char *error) +{ + int res; + int count=30; + Dmsg0(500, "my_postgresql_batch_end started\n"); + + if (!mdb) { /* no files ? */ + return 0; + } + + do { + res = PQputCopyEnd(mdb->db, error); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(500, "ok\n"); + mdb->status = 0; + } + + if (res <= 0) { + Dmsg0(500, "we failed\n"); + mdb->status = 1; + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db)); + } + + Dmsg0(500, "my_postgresql_batch_end finishing\n"); + + return mdb->status; +} + +int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar) +{ + int res; + int count=30; + size_t len; + char *digest; + char ed1[50]; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1); + my_postgresql_copy_escape(mdb->esc_name2, mdb->path, mdb->pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n", + ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->path, + mdb->fname, ar->attr, digest); + + do { + res = PQputCopyData(mdb->db, + mdb->cmd, + len); + } while (res == 0 && --count > 0); + + if (res == 1) { + Dmsg0(500, "ok\n"); + mdb->changes++; + mdb->status = 0; + } + + if (res <= 0) { + Dmsg0(500, "we failed\n"); + mdb->status = 1; + Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db)); + } + + Dmsg0(500, "my_postgresql_batch_insert finishing\n"); + + return mdb->status; +} + +/* + * Escape strings so that PostgreSQL is happy on COPY + * + * NOTE! len is the length of the old string. Your new + * string must be long enough (max 2*old+1) to hold + * the escaped output. + */ +char *my_postgresql_copy_escape(char *dest, char *src, size_t len) +{ + /* we have to escape \t, \n, \r, \ */ + char c = '\0' ; + + while (len > 0 && *src) { + switch (*src) { + case '\n': + c = 'n'; + break; + case '\\': + c = '\\'; + break; + case '\t': + c = 't'; + break; + case '\r': + c = 'r'; + break; + default: + c = '\0' ; + } + + if (c) { + *dest = '\\'; + dest++; + *dest = c; + } else { + *dest = *src; + } + + len--; + src++; + dest++; + } + + *dest = '\0'; + return dest; +} + +char *my_pg_batch_lock_path_query = "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE"; + + +char *my_pg_batch_lock_filename_query = "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE"; + +char *my_pg_batch_unlock_tables_query = "COMMIT"; + +char *my_pg_batch_fill_path_query = "INSERT INTO Path (Path) " + " SELECT a.Path FROM " + " (SELECT DISTINCT Path FROM batch) AS a " + " WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) "; + +char *my_pg_batch_fill_filename_query = "INSERT INTO Filename (Name) " + " SELECT a.Name FROM " + " (SELECT DISTINCT Name FROM batch) as a " + " WHERE NOT EXISTS " + " (SELECT Name FROM Filename WHERE Name = a.Name)"; #endif /* HAVE_POSTGRESQL */ diff --git a/bacula/src/cats/protos.h b/bacula/src/cats/protos.h index 7a7d727e0f..4b29aed94d 100644 --- a/bacula/src/cats/protos.h +++ b/bacula/src/cats/protos.h @@ -67,6 +67,10 @@ int db_create_counter_record(JCR *jcr, B_DB *mdb, COUNTER_DBR *cr); bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr); bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr); bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr); +int db_create_batch_file_record(JCR *jcr); +int db_batch_start(B_DB *mdb); +int db_batch_end(B_DB *mdb, const char *error); +int db_batch_insert(B_DB *mdb, ATTR_DBR *ar); /* delete.c */ int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr); diff --git a/bacula/src/cats/sql_create.c b/bacula/src/cats/sql_create.c index e27d98151b..cac0e2f4fa 100644 --- a/bacula/src/cats/sql_create.c +++ b/bacula/src/cats/sql_create.c @@ -665,7 +665,206 @@ bool db_create_fileset_record(JCR *jcr, B_DB *mdb, FILESET_DBR *fsr) * }; */ +/* All db_batch_* functions are used to do bulk batch insert in File/Filename/Path + * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1" + * in baconfig.h + * + * To sum up : + * - bulk load a temp table + * - insert missing filenames into filename with a single query (lock filenames + * - table before that to avoid possible duplicate inserts with concurrent update) + * - insert missing paths into path with another single query + * - then insert the join between the temp, filename and path tables into file. + */ + +int db_batch_start(B_DB *mdb) +{ + return sql_query(mdb, + " CREATE TEMPORARY TABLE batch " + " (fileindex integer, " + " jobid integer, " + " path blob, " + " name blob, " + " lstat tinyblob, " + " md5 tinyblob) "); +} + +int db_batch_insert(B_DB *mdb, ATTR_DBR *ar) +{ + size_t len; + char *digest; + char ed1[50]; + + mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1); + db_escape_string(mdb->esc_name, mdb->fname, mdb->fnl); + + mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1); + db_escape_string(mdb->esc_name2, mdb->path, mdb->pnl); + + if (ar->Digest == NULL || ar->Digest[0] == 0) { + digest = "0"; + } else { + digest = ar->Digest; + } + + len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')", + ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->path, + mdb->fname, ar->attr, digest); + + sql_query(mdb, mdb->cmd); + + return mdb->status; +} + +/* set error to something to abort operation */ +int db_batch_end(B_DB *mdb, const char *error) +{ + + Dmsg0(50, "db_batch_end started"); + + if (mdb) { + mdb->status = 0; + return mdb->status; + } + return 0; +} + +int db_create_batch_file_record(JCR *jcr) +{ + Dmsg0(50,"db_create_file_record : no files"); + + if (!jcr->db_batch) { /* no files to backup ? */ + Dmsg0(50,"db_create_file_record : no files\n"); + return 0; + } + + if (sql_batch_end(jcr->db_batch, NULL)) { + Jmsg(jcr, M_FATAL, 0, "Bad batch end %s\n", jcr->db_batch->errmsg); + return 1; + } + + /* we have to lock tables */ + if (sql_query(jcr->db_batch, sql_batch_lock_path_query)) + { + Jmsg(jcr, M_FATAL, 0, "Can't lock Path table %s\n", jcr->db_batch->errmsg); + return 1; + } + + if (sql_query(jcr->db_batch, sql_batch_fill_path_query)) + { + Jmsg(jcr, M_FATAL, 0, "Can't fill Path table %s\n",jcr->db_batch->errmsg); + sql_query(jcr->db_batch, sql_batch_unlock_tables_query); + return 1; + } + + if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) + { + Jmsg(jcr, M_FATAL, 0, "Can't unlock Path table %s\n", jcr->db_batch->errmsg); + return 1; + } + + /* we have to lock tables */ + if (sql_query(jcr->db_batch, sql_batch_lock_filename_query)) + { + Jmsg(jcr, M_FATAL, 0, "Can't lock Filename table %s\n", jcr->db_batch->errmsg); + return 1; + } + + if (sql_query(jcr->db_batch, sql_batch_fill_filename_query)) + { + Jmsg(jcr,M_FATAL,0,"Can't fill Filename table %s\n",jcr->db_batch->errmsg); + sql_query(jcr->db_batch, sql_batch_unlock_tables_query); + return 1; + } + + if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) { + Jmsg(jcr, M_FATAL, 0, "Can't unlock Filename table %s\n", jcr->db_batch->errmsg); + return 1; + } + + if (sql_query(jcr->db_batch, + " INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5)" + " SELECT batch.FileIndex, batch.JobId, Path.PathId, " + " Filename.FilenameId,batch.LStat, batch.MD5 " + " FROM batch " + " JOIN Path ON (batch.Path = Path.Path) " + " JOIN Filename ON (batch.Name = Filename.Name) ")) + { + Jmsg(jcr, M_FATAL, 0, "Can't fill File table %s\n", jcr->db_batch->errmsg); + return 1; + } + sql_query(jcr->db_batch, "DROP TABLE batch"); + + return 0; +} + +#ifdef HAVE_BATCH_FILE_INSERT +/* + * Create File record in B_DB + * + * In order to reduce database size, we store the File attributes, + * the FileName, and the Path separately. In principle, there + * is a single FileName record and a single Path record, no matter + * how many times it occurs. This is this subroutine, we separate + * the file and the path and fill temporary tables with this three records. + */ +int db_create_file_attributes_record(JCR *jcr, B_DB *_mdb, ATTR_DBR *ar) +{ + + Dmsg1(dbglevel, "Fname=%s\n", ar->fname); + Dmsg0(dbglevel, "put_file_into_catalog\n"); + + if (!jcr->db_batch) { + jcr->db_batch = db_init_database(jcr, + jcr->db->db_name, + jcr->db->db_user, + jcr->db->db_password, + jcr->db->db_address, + jcr->db->db_port, + jcr->db->db_socket, + 1 /* multi_db = true */); + + if (!jcr->db_batch || !db_open_database(jcr, jcr->db_batch)) { + Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"), + jcr->db->db_name); + if (jcr->db_batch) { + Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch)); + } + return 0; + } + + sql_batch_start(jcr->db_batch); + } + + B_DB *mdb = jcr->db_batch; + + /* + * Make sure we have an acceptable attributes record. + */ + if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES || + ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) { + Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"), + ar->Stream); + Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg); + return 0; + } + + split_path_and_file(jcr, mdb, ar->fname); + + +/* + if (jcr->changes > 100000) { + sql_batch_end(mdb, NULL); + sql_batch_start(mdb); + jcr->changes = 0; + } +*/ + + return (sql_batch_insert(mdb, ar) == 0); +} + +#else /* ! HAVE_BATCH_FILE_INSERT */ /* * Create File record in B_DB @@ -722,6 +921,8 @@ bail_out: return 0; } +#endif /* ! HAVE_BATCH_FILE_INSERT */ + /* * This is the master File entry containing the attributes. * The filename and path records have already been created. diff --git a/bacula/src/cats/sql_find.c b/bacula/src/cats/sql_find.c index f1e6f4cae1..a9438ca6cb 100644 --- a/bacula/src/cats/sql_find.c +++ b/bacula/src/cats/sql_find.c @@ -283,7 +283,7 @@ db_find_next_volume(JCR *jcr, B_DB *mdb, int item, bool InChanger, MEDIA_DBR *mr "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot," "FirstWritten,LastWritten,VolStatus,InChanger,VolParts," - "LabelType " + "LabelType,VolReadTime,VolWriteTime " "FROM Media WHERE PoolId=%s AND MediaType='%s' AND VolStatus IN ('Full'," "'Recycle','Purged','Used','Append') AND Enabled=1 " "ORDER BY LastWritten LIMIT 1", @@ -308,7 +308,7 @@ db_find_next_volume(JCR *jcr, B_DB *mdb, int item, bool InChanger, MEDIA_DBR *mr "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes," "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot," "FirstWritten,LastWritten,VolStatus,InChanger,VolParts," - "LabelType " + "LabelType,VolReadTime,VolWriteTime " "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 " "AND VolStatus='%s' " "%s " @@ -371,6 +371,8 @@ db_find_next_volume(JCR *jcr, B_DB *mdb, int item, bool InChanger, MEDIA_DBR *mr mr->InChanger = str_to_int64(row[20]); mr->VolParts = str_to_int64(row[21]); mr->LabelType = str_to_int64(row[22]); + mr->VolReadTime = str_to_uint64(row[23]); + mr->VolWriteTime = str_to_uint64(row[24]); mr->Enabled = 1; /* ensured via query */ sql_free_result(mdb); diff --git a/bacula/src/cats/sql_get.c b/bacula/src/cats/sql_get.c index 9176853496..9174a14d1a 100644 --- a/bacula/src/cats/sql_get.c +++ b/bacula/src/cats/sql_get.c @@ -876,7 +876,7 @@ bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr) "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId," "Enabled,LocationId,RecycleCount,InitialWrite," - "ScratchPoolId,RecyclePoolId " + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime " "FROM Media WHERE MediaId=%s", edit_int64(mr->MediaId, ed1)); } else { /* find by name */ @@ -886,7 +886,7 @@ bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr) "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger," "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId," "Enabled,LocationId,RecycleCount,InitialWrite," - "ScratchPoolId,RecyclePoolId " + "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime " "FROM Media WHERE VolumeName='%s'", mr->VolumeName); } @@ -942,6 +942,8 @@ bool db_get_media_record(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr) mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite); mr->ScratchPoolId = str_to_int64(row[33]); mr->RecyclePoolId = str_to_int64(row[34]); + mr->VolReadTime = str_to_int64(row[35]); + mr->VolWriteTime = str_to_int64(row[36]); ok = true; } diff --git a/bacula/src/cats/sqlite.c b/bacula/src/cats/sqlite.c index d5aac93a5a..6c9b7f498a 100644 --- a/bacula/src/cats/sqlite.c +++ b/bacula/src/cats/sqlite.c @@ -108,6 +108,7 @@ db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char mdb->fname = get_pool_memory(PM_FNAME); mdb->path = get_pool_memory(PM_FNAME); mdb->esc_name = get_pool_memory(PM_FNAME); + mdb->esc_name2 = get_pool_memory(PM_FNAME); mdb->allow_transactions = mult_db_connections; qinsert(&db_list, &mdb->bq); /* put db in list */ V(mutex); @@ -215,6 +216,7 @@ db_close_database(JCR *jcr, B_DB *mdb) free_pool_memory(mdb->fname); free_pool_memory(mdb->path); free_pool_memory(mdb->esc_name); + free_pool_memory(mdb->esc_name2); if (mdb->db_name) { free(mdb->db_name); } @@ -435,4 +437,16 @@ SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb) return mdb->fields[mdb->field++]; } +char *my_sqlite_batch_lock_query = "BEGIN"; +char *my_sqlite_batch_unlock_query = "COMMIT"; +char *my_sqlite_batch_fill_path_query = "INSERT INTO Path (Path) " + " SELECT DISTINCT Path FROM batch " + " EXCEPT SELECT Path FROM Path "; + +char *my_sqlite_batch_fill_filename_query = "INSERT INTO Filename (Name) " + " SELECT DISTINCT Name FROM batch " + " EXCEPT SELECT Name FROM Filename "; + + + #endif /* HAVE_SQLITE */ diff --git a/bacula/src/dird/backup.c b/bacula/src/dird/backup.c index 8b9dfd8db2..480a5918fb 100644 --- a/bacula/src/dird/backup.c +++ b/bacula/src/dird/backup.c @@ -233,6 +233,9 @@ bool do_backup(JCR *jcr) /* Pickup Job termination data */ stat = wait_for_job_termination(jcr); +#ifdef HAVE_BATCH_FILE_INSERT + db_create_batch_file_record(jcr); /* used by bulk batch file insert */ +#endif if (stat == JS_Terminated) { backup_cleanup(jcr, stat); return true; diff --git a/bacula/src/dird/catreq.c b/bacula/src/dird/catreq.c index eeae8ed4e5..87e4293277 100644 --- a/bacula/src/dird/catreq.c +++ b/bacula/src/dird/catreq.c @@ -280,7 +280,7 @@ void catalog_request(JCR *jcr, BSOCK *bs) mr.VolWriteTime = sdmr.VolWriteTime; mr.VolParts = sdmr.VolParts; bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus)); - if (jcr->wstore->StorageId) { + if (jcr->wstore && jcr->wstore->StorageId) { mr.StorageId = jcr->wstore->StorageId; } diff --git a/bacula/src/dird/dird.c b/bacula/src/dird/dird.c index 5081f08561..c4b4433d8e 100644 --- a/bacula/src/dird/dird.c +++ b/bacula/src/dird/dird.c @@ -269,6 +269,8 @@ int main (int argc, char *argv[]) init_job_server(director->MaxConcurrentJobs); +// init_device_resources(); + Dmsg0(200, "wait for next job\n"); /* Main loop -- call scheduler to get next job to run */ while ( (jcr = wait_for_next_job(runjob)) ) { diff --git a/bacula/src/dird/getmsg.c b/bacula/src/dird/getmsg.c index 7d50a1b944..4a7b758c93 100644 --- a/bacula/src/dird/getmsg.c +++ b/bacula/src/dird/getmsg.c @@ -62,7 +62,9 @@ static char Device_update[] = "DevUpd Job=%127s " "open=%d labeled=%d offline=%d " "reserved=%d max_writers=%d " "autoselect=%d autochanger=%d " - "changer_name=%127s media_type=%127s volume_name=%127s\n"; + "changer_name=%127s media_type=%127s volume_name=%127s " + "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d " + "DevWriteBytes=%d\n"; #endif @@ -243,6 +245,7 @@ int bget_dirmsg(BSOCK *bs) int dev_open, dev_append, dev_read, dev_labeled; int dev_offline, dev_autochanger, dev_autoselect; int dev_num_writers, dev_max_writers, dev_reserved; + uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes; uint64_t dev_PoolId; Dmsg1(100, "msg); if (sscanf(bs->msg, Device_update, @@ -253,7 +256,9 @@ int bget_dirmsg(BSOCK *bs) &dev_max_writers, &dev_autoselect, &dev_autochanger, changer_name.c_str(), media_type.c_str(), - volume_name.c_str()) != 15) { + volume_name.c_str(), + &dev_read_time, &dev_write_time, &dev_read_bytes, + &dev_write_bytes) != 19) { Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg); } else { unbash_spaces(dev_name); @@ -283,6 +288,10 @@ int bget_dirmsg(BSOCK *bs) dev->max_writers = dev_max_writers; dev->reserved = dev_reserved; dev->found = true; + dev->DevReadTime = dev_read_time; /* TODO : have to update database */ + dev->DevWriteTime = dev_write_time; + dev->DevReadBytes = dev_read_bytes; + dev->DevWriteBytes = dev_write_bytes; } continue; } diff --git a/bacula/src/dird/job.c b/bacula/src/dird/job.c index 806888779b..b158ce2d1f 100644 --- a/bacula/src/dird/job.c +++ b/bacula/src/dird/job.c @@ -886,6 +886,10 @@ void dird_free_jcr(JCR *jcr) db_close_database(jcr, jcr->db); jcr->db = NULL; } + if (jcr->db_batch) { + db_close_database(jcr, jcr->db_batch); + jcr->db_batch = NULL; + } if (jcr->stime) { Dmsg0(200, "Free JCR stime\n"); free_pool_memory(jcr->stime); diff --git a/bacula/src/dird/protos.h b/bacula/src/dird/protos.h index 793c259e95..a7870e7fad 100644 --- a/bacula/src/dird/protos.h +++ b/bacula/src/dird/protos.h @@ -195,6 +195,9 @@ bool is_volume_name_legal(UAContext *ua, const char *name); int get_num_drives_from_SD(UAContext *ua); void update_slots(UAContext *ua); +/* ua_update.c */ +void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr); + /* ua_output.c */ void prtit(void *ctx, const char *msg); int complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool); diff --git a/bacula/src/dird/ua_cmds.c b/bacula/src/dird/ua_cmds.c index 268e5158b2..93b3357d33 100644 --- a/bacula/src/dird/ua_cmds.c +++ b/bacula/src/dird/ua_cmds.c @@ -384,10 +384,6 @@ static int cancel_cmd(UAContext *ua, const char *cmd) JCR *jcr = NULL; char JobName[MAX_NAME_LENGTH]; - if (!open_client_db(ua)) { - return 1; - } - for (i=1; iargc; i++) { if (strcasecmp(ua->argk[i], NT_("jobid")) == 0) { uint32_t JobId; @@ -434,28 +430,38 @@ static int cancel_cmd(UAContext *ua, const char *cmd) * throw up a list and ask the user to select one. */ char buf[1000]; + int tjobs = 0; /* total # number jobs */ /* Count Jobs running */ foreach_jcr(jcr) { if (jcr->JobId == 0) { /* this is us */ continue; } + tjobs++; /* count of all jobs */ if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) { continue; /* skip not authorized */ } - njobs++; + njobs++; /* count of authorized jobs */ } endeach_jcr(jcr); - if (njobs == 0) { - bsendmsg(ua, _("No Jobs running.\n")); + if (njobs == 0) { /* no authorized */ + if (tjobs == 0) { + bsendmsg(ua, _("No Jobs running.\n")); + } else { + bsendmsg(ua, _("None of your jobs are running.\n")); + } return 1; } + start_prompt(ua, _("Select Job:\n")); foreach_jcr(jcr) { char ed1[50]; if (jcr->JobId == 0) { /* this is us */ continue; } + if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) { + continue; /* skip not authorized */ + } bsnprintf(buf, sizeof(buf), _("JobId=%s Job=%s"), edit_int64(jcr->JobId, ed1), jcr->Job); add_prompt(ua, buf); } @@ -472,7 +478,7 @@ static int cancel_cmd(UAContext *ua, const char *cmd) sscanf(buf, "JobId=%d Job=%127s", &njobs, JobName); jcr = get_jcr_by_full_name(JobName); if (!jcr) { - bsendmsg(ua, _("Job %s not found.\n"), JobName); + bsendmsg(ua, _("Job \"%s\" not found.\n"), JobName); return 1; } } diff --git a/bacula/src/dird/ua_purge.c b/bacula/src/dird/ua_purge.c index 91ddc1ea29..14c77edafe 100644 --- a/bacula/src/dird/ua_purge.c +++ b/bacula/src/dird/ua_purge.c @@ -13,7 +13,7 @@ /* Bacula® - The Network Backup Solution - Copyright (C) 2002-2006 Free Software Foundation Europe e.V. + Copyright (C) 2002-2007 Free Software Foundation Europe e.V. The main author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. @@ -605,6 +605,21 @@ bool mark_media_purged(UAContext *ua, MEDIA_DBR *mr) } pm_strcpy(jcr->VolumeName, mr->VolumeName); generate_job_event(jcr, "VolumePurged"); + /* + * If the RecyclePool is defined, move the volume there + */ + if (mr->RecyclePoolId && mr->RecyclePoolId != mr->PoolId) { + POOL_DBR oldpr, newpr; + memset(&oldpr, 0, sizeof(POOL_DBR)); + memset(&newpr, 0, sizeof(POOL_DBR)); + newpr.PoolId = mr->RecyclePoolId; + oldpr.PoolId = mr->PoolId; + if (db_get_pool_record(jcr, ua->db, &oldpr) && db_get_pool_record(jcr, ua->db, &newpr)) { + update_vol_pool(ua, newpr.Name, mr, &oldpr); + } else { + bsendmsg(ua, "%s", db_strerror(ua->db)); + } + } /* Send message to Job report, if it is a *real* job */ if (jcr && jcr->JobId > 0) { Jmsg1(jcr, M_INFO, 0, _("All records pruned from Volume \"%s\"; marking it \"Purged\"\n"), diff --git a/bacula/src/dird/ua_update.c b/bacula/src/dird/ua_update.c index 5e2688c38d..e81d67bfea 100644 --- a/bacula/src/dird/ua_update.c +++ b/bacula/src/dird/ua_update.c @@ -290,7 +290,7 @@ static void update_volslot(UAContext *ua, char *val, MEDIA_DBR *mr) } /* Modify the Pool in which this Volume is located */ -static void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr) +void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr) { POOL_DBR pr; POOLMEM *query; diff --git a/bacula/src/dird/verify.c b/bacula/src/dird/verify.c index 0997fb6fb4..674cdd8c58 100644 --- a/bacula/src/dird/verify.c +++ b/bacula/src/dird/verify.c @@ -756,7 +756,7 @@ static int missing_handler(void *ctx, int num_fields, char **row) } if (!jcr->fn_printed) { Jmsg(jcr, M_INFO, 0, "\n"); - Jmsg(jcr, M_INFO, 0, _("The following files are missing:\n")); + Jmsg(jcr, M_INFO, 0, _("The following files are in the Catalog but not on disk:\n")); jcr->fn_printed = true; } Jmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:""); diff --git a/bacula/src/filed/verify.c b/bacula/src/filed/verify.c index 14a667106d..86403d533f 100644 --- a/bacula/src/filed/verify.c +++ b/bacula/src/filed/verify.c @@ -1,15 +1,7 @@ -/* - * Bacula File Daemon verify.c Verify files. - * - * Kern Sibbald, October MM - * - * Version $Id$ - * - */ /* Bacula® - The Network Backup Solution - Copyright (C) 2000-2006 Free Software Foundation Europe e.V. + Copyright (C) 2000-2007 Free Software Foundation Europe e.V. The main author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. @@ -33,6 +25,14 @@ (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich, Switzerland, email:ftf@fsfeurope.org. */ +/* + * Bacula File Daemon verify.c Verify files. + * + * Kern Sibbald, October MM + * + * Version $Id$ + * + */ #include "bacula.h" #include "filed.h" @@ -102,6 +102,7 @@ static int verify_file(FF_PKT *ff_pkt, void *pkt, bool top_level) Dmsg2(30, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link); break; case FT_DIRBEGIN: + jcr->num_files_examined--; /* correct file count */ return 1; /* ignored */ case FT_DIREND: Dmsg1(30, "FT_DIR saving: %s\n", ff_pkt->fname); @@ -145,7 +146,8 @@ static int verify_file(FF_PKT *ff_pkt, void *pkt, bool top_level) return 1; case FT_NORECURSE: Jmsg(jcr, M_SKIPPED, 1, _(" Recursion turned off. Directory skipped: %s\n"), ff_pkt->fname); - return 1; + ff_pkt->type = FT_DIREND; /* directory entry was backed up */ + break; case FT_NOFSCHG: Jmsg(jcr, M_SKIPPED, 1, _(" File system change prohibited. Directory skipped: %s\n"), ff_pkt->fname); return 1; diff --git a/bacula/src/jcr.h b/bacula/src/jcr.h index cb3cb885f0..9e48272f5f 100644 --- a/bacula/src/jcr.h +++ b/bacula/src/jcr.h @@ -184,6 +184,7 @@ public: bool cached_attribute; /* set if attribute is cached */ POOLMEM *attr; /* Attribute string from SD */ B_DB *db; /* database pointer */ + B_DB *db_batch; /* database pointer for batch insert */ ATTR_DBR *ar; /* DB attribute record */ /* Daemon specific part of JCR */ diff --git a/bacula/src/lib/dlist.c b/bacula/src/lib/dlist.c index 1dd5be68d8..f89ba9c942 100644 --- a/bacula/src/lib/dlist.c +++ b/bacula/src/lib/dlist.c @@ -79,7 +79,7 @@ void dlist::prepend(void *item) void dlist::insert_before(void *item, void *where) { - dlink *where_link = (dlink *)((char *)where+loffset); + dlink *where_link = get_link(where); set_next(item, where); set_prev(item, where_link->prev); @@ -96,7 +96,7 @@ void dlist::insert_before(void *item, void *where) void dlist::insert_after(void *item, void *where) { - dlink *where_link = (dlink *)((char *)where+loffset); + dlink *where_link = get_link(where); set_next(item, where_link->next); set_prev(item, where); @@ -290,7 +290,7 @@ void *dlist::binary_search(void *item, int compare(void *item1, void *item2)) void dlist::remove(void *item) { void *xitem; - dlink *ilink = (dlink *)(((char *)item)+loffset); /* item's link */ + dlink *ilink = get_link(item); /* item's link */ if (item == head) { head = ilink->next; if (head) { @@ -316,20 +316,20 @@ void dlist::remove(void *item) } } -void * dlist::next(const void *item) const +void *dlist::next(void *item) { if (item == NULL) { return head; } - return ((dlink *)(((char *)item)+loffset))->next; + return get_next(item); } -void * dlist::prev(const void *item) const +void *dlist::prev(void *item) { if (item == NULL) { return tail; } - return ((dlink *)(((char *)item)+loffset))->prev; + return get_prev(item); } @@ -337,7 +337,7 @@ void * dlist::prev(const void *item) const void dlist::destroy() { for (void *n=head; n; ) { - void *ni = ((dlink *)(((char *)n)+loffset))->next; + void *ni = get_next(n); free(n); n = ni; } @@ -521,7 +521,7 @@ int main() * it. */ dlist chain; - dlistString *node; + chain.append(new_dlistString("This is a long test line")); #define CNT 26 printf("append %d dlistString items\n", CNT*CNT*CNT); strcpy(buf, "ZZZ"); @@ -533,8 +533,7 @@ int main() if ((count & 0x3FF) == 0) { Dmsg1(000, "At %d\n", count); } - node = new_dlistString(buf); - chain.append(node); + chain.append(new_dlistString(buf)); buf[1]--; } buf[1] = 'Z'; @@ -544,6 +543,7 @@ int main() buf[0]--; } printf("dlistString items appended, walking chain\n"); + dlistString *node; foreach_dlist(node, &chain) { printf("%s\n", node->c_str()); } diff --git a/bacula/src/lib/dlist.h b/bacula/src/lib/dlist.h index e78b1dc906..f24efc5bf6 100644 --- a/bacula/src/lib/dlist.h +++ b/bacula/src/lib/dlist.h @@ -93,8 +93,8 @@ public: void remove(void *item); bool empty() const; int size() const; - void *next(const void *item) const; - void *prev(const void *item) const; + void *next(void *item); + void *prev(void *item); void destroy(); void *first() const; void *last() const; @@ -165,7 +165,7 @@ inline void *dlist::get_next(void *item) inline dlink *dlist::get_link(void *item) { - return (dlink *)((dlink *)(((char *)item)+loffset)); + return (dlink *)(((char *)item)+loffset); } diff --git a/bacula/src/qt-console/main.ui b/bacula/src/qt-console/main.ui index 747b73ae34..33e7cf90ea 100644 --- a/bacula/src/qt-console/main.ui +++ b/bacula/src/qt-console/main.ui @@ -50,6 +50,9 @@ 16777215 + + Qt::ClickFocus + Page Selector @@ -109,6 +112,9 @@ 0 + + Qt::ClickFocus + -1 diff --git a/bacula/src/qt-console/run/run.cpp b/bacula/src/qt-console/run/run.cpp index a733dc3879..7711c98c7c 100644 --- a/bacula/src/qt-console/run/run.cpp +++ b/bacula/src/qt-console/run/run.cpp @@ -38,42 +38,47 @@ runDialog::runDialog(Console *console) { + QDateTime dt; + m_console = console; setupUi(this); - storageCombo->addItems(console->storage_list); + jobCombo->addItems(console->job_list); + filesetCombo->addItems(console->fileset_list); + levelCombo->addItems(console->level_list); + clientCombo->addItems(console->client_list); poolCombo->addItems(console->pool_list); + storageCombo->addItems(console->storage_list); + dateTimeEdit->setDateTime(dt.currentDateTime()); this->show(); } void runDialog::accept() { - printf("Storage=%s\n" - "Pool=%s\n", - storageCombo->currentText().toUtf8().data(), - poolCombo->currentText().toUtf8().data()); - this->hide(); - delete this; - -#ifdef xxx - volume = get_entry_text(label_dialog, "label_entry_volume"); - - slot = get_spin_text(label_dialog, "label_slot"); - - if (!pool || !storage || !volume || !(*volume)) { - set_status_ready(); - return; - } + char cmd[1000]; + this->hide(); + bsnprintf(cmd, sizeof(cmd), - "label volume=\"%s\" pool=\"%s\" storage=\"%s\" slot=%s\n", - volume, pool, storage, slot); - write_director(cmd); - set_text(cmd, strlen(cmd)); -#endif + "run job=\"%s\" fileset=\"%s\" level=%s client=\"%s\" pool=\"%s\" " + "when=\"%s\" storage=\"%s\" priority=\"%d\" yes\n", + jobCombo->currentText().toUtf8().data(), + filesetCombo->currentText().toUtf8().data(), + levelCombo->currentText().toUtf8().data(), + clientCombo->currentText().toUtf8().data(), + poolCombo->currentText().toUtf8().data(), +// dateTimeEdit->textFromDateTime(dateTimeEdit->dateTime()).toUtf8().data(), + "", + storageCombo->currentText().toUtf8().data(), + prioritySpin->value()); + +// m_console->write(cmd); + m_console->set_text(cmd); + delete this; } + void runDialog::reject() { - printf("Rejected\n"); + mainWin->set_status(" Canceled"); this->hide(); delete this; } diff --git a/bacula/src/qt-console/run/run.h b/bacula/src/qt-console/run/run.h index 3d33268931..4d22b5fdf4 100644 --- a/bacula/src/qt-console/run/run.h +++ b/bacula/src/qt-console/run/run.h @@ -18,6 +18,7 @@ public slots: void reject(); private: + Console *m_console; }; diff --git a/bacula/src/qt-console/run/run.ui b/bacula/src/qt-console/run/run.ui index 8976c02903..4924a28916 100644 --- a/bacula/src/qt-console/run/run.ui +++ b/bacula/src/qt-console/run/run.ui @@ -12,72 +12,86 @@ Run Dialog - + 9 6 - - + + 0 6 - - - - - - - - 5 - 0 - 0 - 0 - + + + + Qt::Horizontal - + - 150 - 0 + 71 + 21 - + - - - - Bootstrap: - - - true - - - bootstrap + + + + + 16777215 + 30 + - - - - - Client: - - - clientCombo + <h3>Run a Job</h3> - - - - 10000 + + + + Qt::Horizontal - + + + 81 + 20 + + + + + + + + + Qt::Vertical + + + QSizePolicy::Maximum + + + + 572 + 16 + + + + + + + + 0 + + + 6 + @@ -88,34 +102,34 @@ - - + + - Storage: + Messages: - storageCombo + messagesCombo - - - - - + + + + Qt::Vertical + + + + 20 + 171 + + + - - - - Pool: - - - poolCombo - - + + @@ -127,75 +141,125 @@ - - + + - Level: + FileSet: - levelCombo + filesetCombo - - - - Qt::Vertical + + + + false - + - 20 - 171 + 200 + 0 - - - - - - FileSet: - - - filesetCombo + + false - + + + + 0 + 2 + 0 + 2000 + 1 + 1 + + + + yyyy-mm-dd hh:mm:ss + + + true + + + + + + + 10000 + + + 1 + + + 10 + + - - + + + + + + + + - Priority: + Pool: - prioritySpin + poolCombo - - + + - Messages: + Bootstrap: + + + true - messagesCombo + bootstrap - - + + + + + 5 + 0 + 0 + 0 + + - 200 + 150 0 + + + + Client: + + + clientCombo + + + @@ -206,6 +270,36 @@ + + + + Storage: + + + storageCombo + + + + + + + Level: + + + levelCombo + + + + + + + Priority: + + + prioritySpin + + + @@ -214,7 +308,7 @@ - + Qt::Vertical @@ -230,7 +324,7 @@ - + Qt::Horizontal @@ -240,71 +334,6 @@ - - - - Qt::Vertical - - - QSizePolicy::Maximum - - - - 572 - 16 - - - - - - - - 0 - - - 6 - - - - - Qt::Horizontal - - - - 71 - 21 - - - - - - - - - 16777215 - 30 - - - - <h3>Run a Job</h3> - - - - - - - Qt::Horizontal - - - - 81 - 20 - - - - - - diff --git a/bacula/src/stored/acquire.c b/bacula/src/stored/acquire.c index 69c8fa2049..5748591267 100644 --- a/bacula/src/stored/acquire.c +++ b/bacula/src/stored/acquire.c @@ -463,8 +463,8 @@ bool release_device(DCR *dcr) if (dev->can_read()) { dev->clear_read(); /* clear read bit */ - - /******FIXME**** send read volume usage statistics to director */ + Dmsg0(100, "dir_update_vol_info. Release0\n"); + dir_update_volume_info(dcr, false); /* send Volume info to Director */ } else if (dev->num_writers > 0) { /* diff --git a/bacula/src/stored/ansi_label.c b/bacula/src/stored/ansi_label.c index a686d75b3c..3e1489278b 100644 --- a/bacula/src/stored/ansi_label.c +++ b/bacula/src/stored/ansi_label.c @@ -87,7 +87,7 @@ int read_ansi_ibm_label(DCR *dcr) /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */ for (i=0; i < 6; i++) { do { - stat = tape_read(dev->fd, label, sizeof(label)); + stat = dev->read(label, sizeof(label)); } while (stat == -1 && errno == EINTR); if (stat < 0) { berrno be; @@ -309,7 +309,7 @@ bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName) } else { label[79] = '3'; /* ANSI label flag */ } - stat = tape_write(dev->fd, label, sizeof(label)); + stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. ERR=%s\n"), @@ -341,7 +341,7 @@ bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName) * This could come at the end of a tape, ignore * EOT errors. */ - stat = tape_write(dev->fd, label, sizeof(label)); + stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; if (stat == -1) { @@ -370,7 +370,7 @@ bool write_ansi_ibm_labels(DCR *dcr, int type, const char *VolName) label[4] = 'V'; ascii_to_ebcdic(label, label, sizeof(label)); } - stat = tape_write(dev->fd, label, sizeof(label)); + stat = dev->write(label, sizeof(label)); if (stat != sizeof(label)) { berrno be; if (stat == -1) { diff --git a/bacula/src/stored/askdir.c b/bacula/src/stored/askdir.c index 6525bfe7de..25a0e55c06 100644 --- a/bacula/src/stored/askdir.c +++ b/bacula/src/stored/askdir.c @@ -321,11 +321,6 @@ bool dir_update_volume_info(DCR *dcr, bool label) Pmsg0(000, _("NULL Volume name. This shouldn't happen!!!\n")); return false; } - if (dev->can_read()) { - Jmsg0(jcr, M_FATAL, 0, _("Attempt to update_volume_info in read mode!!!\n")); - Pmsg0(000, _("Attempt to update_volume_info in read mode!!!\n")); - return false; - } /* Lock during Volume update */ P(vol_info_mutex); diff --git a/bacula/src/stored/block.c b/bacula/src/stored/block.c index 2c3f08e56c..1e4bb96451 100644 --- a/bacula/src/stored/block.c +++ b/bacula/src/stored/block.c @@ -537,11 +537,8 @@ bool write_block_to_dev(DCR *dcr) bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */ dev->clrerror(-1); } - if (dev->is_tape()) { - stat = tape_write(dev->fd, block->buf, (size_t)wlen); - } else { - stat = write(dev->fd, block->buf, (size_t)wlen); - } + stat = dev->write(block->buf, (size_t)wlen); + } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3); #ifdef DEBUG_BLOCK_ZEROING @@ -978,11 +975,8 @@ reread: bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */ dev->clrerror(-1); } - if (dev->is_tape()) { - stat = tape_read(dev->fd, block->buf, (size_t)block->buf_len); - } else { - stat = read(dev->fd, block->buf, (size_t)block->buf_len); - } + stat = dev->read(block->buf, (size_t)block->buf_len); + } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3); if (stat < 0) { berrno be; diff --git a/bacula/src/stored/dev.c b/bacula/src/stored/dev.c index 491149be00..8ff9b6108c 100644 --- a/bacula/src/stored/dev.c +++ b/bacula/src/stored/dev.c @@ -31,7 +31,7 @@ /* Bacula® - The Network Backup Solution - Copyright (C) 2000-2006 Free Software Foundation Europe e.V. + Copyright (C) 2000-2007 Free Software Foundation Europe e.V. The main author of Bacula is Kern Sibbald, with contributions from many others, a complete list can be found in the file AUTHORS. @@ -1325,7 +1325,7 @@ bool DEVICE::fsf(int num) mt_com.mt_count = 1; while (num-- && !at_eot()) { Dmsg0(100, "Doing read before fsf\n"); - if ((stat = tape_read(fd, (char *)rbuf, rbuf_len)) < 0) { + if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) { if (errno == ENOMEM) { /* tape record exceeds buf len */ stat = rbuf_len; /* This is OK */ /* @@ -2192,6 +2192,68 @@ void DEVICE::edit_mount_codes(POOL_MEM &omsg, const char *imsg) } } +/* return the last timer interval (ms) */ +int DEVICE::get_timer_count() +{ + uint64_t old = last_timer; + struct timeval tv; + gettimeofday(&tv, NULL); + last_timer = tv.tv_usec + tv.tv_sec * 1000000; + + return last_timer - old; +} + +/* read from fd */ +ssize_t DEVICE::read(void *buf, size_t len) +{ + ssize_t read_len ; + +// get_timer_count(); + + if (this->is_tape()) { + read_len = tape_read(fd, buf, len); + } else { + read_len = ::read(fd, buf, len); + } + +// last_tick = get_timer_count(); + + DevReadTime += last_tick; + VolCatInfo.VolReadTime += last_tick; + + if (read_len > 0) { /* skip error */ + DevReadBytes += read_len; + VolCatInfo.VolCatRBytes += read_len; + } + + return read_len; +} + +/* write to fd */ +ssize_t DEVICE::write(const void *buf, size_t len) +{ + ssize_t write_len ; + +// get_timer_count(); + + if (this->is_tape()) { + write_len = tape_write(fd, buf, len); + } else { + write_len = ::write(fd, buf, len); + } + +// last_tick = get_timer_count(); + + DevWriteTime += last_tick; + VolCatInfo.VolWriteTime += last_tick; + + if (write_len > 0) { /* skip error */ + DevWriteBytes += write_len; + VolCatInfo.VolCatBytes += write_len; + } + + return write_len; +} /* Return the resource name for the device */ const char *DEVICE::name() const diff --git a/bacula/src/stored/dev.h b/bacula/src/stored/dev.h index 0ff099dc4c..183ba942bf 100644 --- a/bacula/src/stored/dev.h +++ b/bacula/src/stored/dev.h @@ -280,7 +280,17 @@ public: int rem_wait_sec; int num_wait; + uint64_t last_timer; /* used by read/write/seek to get stats (usec) */ + int last_tick; /* contains last read/write time (usec) */ + + uint64_t DevReadTime; + uint64_t DevWriteTime; + uint64_t DevWriteBytes; + uint64_t DevReadBytes; + /* Methods */ + int get_timer_count(); /* return the last timer interval (ms) */ + int has_cap(int cap) const { return capabilities & cap; } void clear_cap(int cap) { capabilities &= ~cap; } void set_cap(int cap) { capabilities |= cap; } @@ -363,6 +373,8 @@ public: bool truncate(DCR *dcr); /* in dev.c */ int open(DCR *dcr, int mode); /* in dev.c */ void term(void); /* in dev.c */ + ssize_t read(void *buf, size_t len); /* in dev.c */ + ssize_t write(const void *buf, size_t len); /* in dev.c */ bool rewind(DCR *dcr); /* in dev.c */ bool mount(int timeout); /* in dev.c */ bool unmount(int timeout); /* in dev.c */ diff --git a/bacula/src/version.h b/bacula/src/version.h index 81acf9e55f..63c9173121 100644 --- a/bacula/src/version.h +++ b/bacula/src/version.h @@ -3,9 +3,9 @@ */ #undef VERSION -#define VERSION "2.1.3" -#define BDATE "09 February 2007" -#define LSMDATE "09Feb07" +#define VERSION "2.1.4" +#define BDATE "10 February 2007" +#define LSMDATE "10Feb07" #define PROG_COPYRIGHT "Copyright (C) %d-2007 Free Software Foundation Europe e.V.\n" #define BYEAR "2007" /* year for copyright messages in progs */ diff --git a/bacula/technotes-2.1 b/bacula/technotes-2.1 index 37b7758d43..a70ce27f2a 100644 --- a/bacula/technotes-2.1 +++ b/bacula/technotes-2.1 @@ -1,6 +1,16 @@ Technical notes on version 2.1 General: a +10Feb07 +kes Apply Eric's scratch patch that moves a purged Volume to + the RecyclePool. Question: how is RecyclePool set? what + happens to the ScratchPool? +kes Apply Eric's media patch that collects read/write media + times as well as byte counts. This patch requires a + simultaneous upgrade of the DIR and SD. Note, there + should be some way to turn of timing. I'm not sure + times are in Bacula units. +kes Apply Eric's batch-insert patch. 09Feb07 kes Update projects list. 08Feb07 -- 2.39.5