Priority:
+- Ensure that moving a purged Volume in ua_purge.c to the RecyclePool
+ does the right thing.
- Why doesn't @"xxx abc" work in a conf file?
- Figure out some way to "automatically" backup conf changes.
- Look at using posix_fadvise(2) for backups -- see bug #751.
+++ /dev/null
-diff -Naur cvs/src/cats/cats.h my/src/cats/cats.h
---- cvs/src/cats/cats.h 2006-12-06 15:11:53.000000000 +0100
-+++ my/src/cats/cats.h 2007-01-10 19:21:42.000000000 +0100
-@@ -141,6 +141,7 @@
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
-+ POOLMEM *esc_name2; /* Escaped file/path name */
- int fnl; /* file name length */
- int pnl; /* path name length */
- };
-@@ -170,8 +171,14 @@
- #define sql_fetch_field(x) my_sqlite_fetch_field(x)
- #define sql_num_fields(x) ((x)->ncolumn)
- #define SQL_ROW char**
--
--
-+#define sql_batch_start(x) db_batch_start(x)
-+#define sql_batch_end(x,y) db_batch_end(x,y)
-+#define sql_batch_insert(x,y) db_batch_insert(x,y)
-+#define sql_batch_lock_path_query my_sqlite_batch_lock_query
-+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
-+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
-+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
-+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
-
- /* In cats/sqlite.c */
- void my_sqlite_free_table(B_DB *mdb);
-@@ -179,6 +186,10 @@
- int my_sqlite_query(B_DB *mdb, const char *cmd);
- void my_sqlite_field_seek(B_DB *mdb, int field);
- SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
-+extern char* my_sqlite_batch_lock_query;
-+extern char* my_sqlite_batch_unlock_query;
-+extern char* my_sqlite_batch_fill_filename_query;
-+extern char* my_sqlite_batch_fill_path_query;
-
-
- #else
-@@ -249,6 +260,7 @@
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
-+ POOLMEM *esc_name2; /* Escaped file/path name */
- int fnl; /* file name length */
- int pnl; /* path name length */
- };
-@@ -289,8 +301,14 @@
- #define sql_fetch_field(x) my_sqlite_fetch_field(x)
- #define sql_num_fields(x) ((x)->ncolumn)
- #define SQL_ROW char**
--
--
-+#define sql_batch_start(x) db_batch_start(x)
-+#define sql_batch_end(x,y) db_batch_end(x,y)
-+#define sql_batch_insert(x,y) db_batch_insert(x,y)
-+#define sql_batch_lock_path_query my_sqlite_batch_lock_query
-+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
-+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
-+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
-+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
-
- /* In cats/sqlite.c */
- void my_sqlite_free_table(B_DB *mdb);
-@@ -298,6 +316,10 @@
- int my_sqlite_query(B_DB *mdb, const char *cmd);
- void my_sqlite_field_seek(B_DB *mdb, int field);
- SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
-+extern char* my_sqlite_batch_lock_query;
-+extern char* my_sqlite_batch_unlock_query;
-+extern char* my_sqlite_batch_fill_filename_query;
-+extern char* my_sqlite_batch_fill_path_query;
-
-
- #else
-@@ -341,6 +363,7 @@
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
-+ POOLMEM *esc_name2; /* Escaped file/path name */
- int fnl; /* file name length */
- int pnl; /* path name length */
- };
-@@ -362,9 +385,25 @@
- #define sql_field_seek(x, y) mysql_field_seek((x)->result, (y))
- #define sql_fetch_field(x) mysql_fetch_field((x)->result)
- #define sql_num_fields(x) (int)mysql_num_fields((x)->result)
-+#define sql_batch_start(x) db_batch_start(x)
-+#define sql_batch_end(x,y) db_batch_end(x,y)
-+#define sql_batch_insert(x,y) db_batch_insert(x,y)
-+#define sql_batch_lock_path_query my_mysql_batch_lock_path_query
-+#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query
-+#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query
-+#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query
-+#define sql_batch_fill_path_query my_mysql_batch_fill_path_query
- #define SQL_ROW MYSQL_ROW
- #define SQL_FIELD MYSQL_FIELD
-
-+
-+int my_mysql_batch_start(B_DB *mdb);
-+extern char* my_mysql_batch_lock_path_query;
-+extern char* my_mysql_batch_lock_filename_query;
-+extern char* my_mysql_batch_unlock_tables_query;
-+extern char* my_mysql_batch_fill_filename_query;
-+extern char* my_mysql_batch_fill_path_query;
-+
- #else
-
- #ifdef HAVE_POSTGRESQL
-@@ -425,6 +464,7 @@
- POOLMEM *fname; /* Filename only */
- POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
-+ POOLMEM *esc_name2; /* Escaped file/path name */
- int fnl; /* file name length */
- int pnl; /* path name length */
- };
-@@ -436,7 +476,19 @@
- int my_postgresql_currval (B_DB *mdb, char *table_name);
- void my_postgresql_field_seek (B_DB *mdb, int row);
- POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb);
--
-+int my_postgresql_lock_table(B_DB *mdb, const char *table);
-+int my_postgresql_unlock_table(B_DB *mdb);
-+int my_postgresql_batch_start(B_DB *mdb);
-+int my_postgresql_batch_end(B_DB *mdb, const char *error);
-+typedef struct ATTR_DBR ATTR_DBR;
-+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar);
-+char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
-+
-+extern char* my_pg_batch_lock_path_query;
-+extern char* my_pg_batch_lock_filename_query;
-+extern char* my_pg_batch_unlock_tables_query;
-+extern char* my_pg_batch_fill_filename_query;
-+extern char* my_pg_batch_fill_path_query;
-
- /* "Generic" names for easier conversion */
- #define sql_store_result(x) ((x)->result)
-@@ -452,6 +504,17 @@
- #define sql_field_seek(x, y) my_postgresql_field_seek((x), (y))
- #define sql_fetch_field(x) my_postgresql_fetch_field(x)
- #define sql_num_fields(x) ((x)->num_fields)
-+#define sql_batch_start(x) my_postgresql_batch_start(x)
-+#define sql_batch_end(x,y) my_postgresql_batch_end(x,y)
-+#define sql_batch_insert(x,y) my_postgresql_batch_insert(x,y)
-+#define sql_lock_table(x,y) my_postgresql_lock_table(x, y)
-+#define sql_unlock_table(x,y) my_postgresql_unlock_table(x)
-+#define sql_batch_lock_path_query my_pg_batch_lock_path_query
-+#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query
-+#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query
-+#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query
-+#define sql_batch_fill_path_query my_pg_batch_fill_path_query
-+
- #define SQL_ROW POSTGRESQL_ROW
- #define SQL_FIELD POSTGRESQL_FIELD
-
-diff -Naur cvs/src/cats/mysql.c my/src/cats/mysql.c
---- cvs/src/cats/mysql.c 2006-12-09 14:41:50.000000000 +0100
-+++ my/src/cats/mysql.c 2007-01-10 19:21:42.000000000 +0100
-@@ -121,6 +121,7 @@
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
-+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
- qinsert(&db_list, &mdb->bq); /* put db in list */
- V(mutex);
- return mdb;
-@@ -231,6 +232,7 @@
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
-+ free_pool_memory(mdb->esc_name2);
- if (mdb->db_name) {
- free(mdb->db_name);
- }
-@@ -372,4 +374,34 @@
-
- }
-
-+char *my_mysql_batch_lock_path_query = "LOCK TABLES Path write, "
-+ " batch write, "
-+ " Path as p write ";
-+
-+
-+char *my_mysql_batch_lock_filename_query = "LOCK TABLES Filename write, "
-+ " batch write, "
-+ " Filename as f write ";
-+
-+char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES";
-+
-+char *my_mysql_batch_fill_path_query = "INSERT IGNORE INTO Path (Path) "
-+ " SELECT a.Path FROM "
-+ " (SELECT DISTINCT Path "
-+ " FROM batch) AS a "
-+ " WHERE NOT EXISTS "
-+ " (SELECT Path "
-+ " FROM Path AS p "
-+ " WHERE p.Path = a.Path) ";
-+
-+char *my_mysql_batch_fill_filename_query = "INSERT IGNORE INTO Filename (Name)"
-+ " SELECT a.Name FROM "
-+ " (SELECT DISTINCT Name "
-+ " FROM batch) AS a "
-+ " WHERE NOT EXISTS "
-+ " (SELECT Name "
-+ " FROM Filename AS f "
-+ " WHERE f.Name = a.Name) ";
-+
- #endif /* HAVE_MYSQL */
-+
-diff -Naur cvs/src/cats/postgresql.c my/src/cats/postgresql.c
---- cvs/src/cats/postgresql.c 2006-12-06 15:11:53.000000000 +0100
-+++ my/src/cats/postgresql.c 2007-01-10 19:25:47.000000000 +0100
-@@ -124,6 +124,7 @@
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
-+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- qinsert(&db_list, &mdb->bq); /* put db in list */
- V(mutex);
-@@ -228,6 +229,7 @@
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
-+ free_pool_memory(mdb->esc_name2);
- if (mdb->db_name) {
- free(mdb->db_name);
- }
-@@ -538,5 +540,202 @@
- return id;
- }
-
-+int my_postgresql_lock_table(B_DB *mdb, const char *table)
-+{
-+ my_postgresql_query(mdb, "BEGIN");
-+ Mmsg(mdb->cmd, "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table);
-+ return my_postgresql_query(mdb, mdb->cmd);
-+}
-+
-+int my_postgresql_unlock_table(B_DB *mdb)
-+{
-+ return my_postgresql_query(mdb, "COMMIT");
-+}
-+
-+int my_postgresql_batch_start(B_DB *mdb)
-+{
-+ Dmsg0(500, "my_postgresql_batch_start started\n");
-+
-+ if (my_postgresql_query(mdb,
-+ " CREATE TEMPORARY TABLE batch "
-+ " (fileindex int, "
-+ " jobid int, "
-+ " path varchar, "
-+ " name varchar, "
-+ " lstat varchar, "
-+ " md5 varchar)") == 1)
-+ {
-+ Dmsg0(500, "my_postgresql_batch_start failed\n");
-+ return 1;
-+ }
-+
-+ // We are starting a new query. reset everything.
-+ mdb->num_rows = -1;
-+ mdb->row_number = -1;
-+ mdb->field_number = -1;
-+
-+ if (mdb->result != NULL) {
-+ my_postgresql_free_result(mdb);
-+ }
-+
-+ mdb->result = PQexec(mdb->db, "COPY batch FROM STDIN");
-+ mdb->status = PQresultStatus(mdb->result);
-+ if (mdb->status == PGRES_COPY_IN) {
-+ // how many fields in the set?
-+ mdb->num_fields = (int) PQnfields(mdb->result);
-+ mdb->num_rows = 0;
-+ mdb->status = 0;
-+ } else {
-+ Dmsg0(500, "we failed\n");
-+ mdb->status = 1;
-+ }
-+
-+ Dmsg0(500, "my_postgresql_batch_start finishing\n");
-+
-+ return mdb->status;
-+}
-+
-+/* set error to something to abort operation */
-+int my_postgresql_batch_end(B_DB *mdb, const char *error)
-+{
-+ int res;
-+ int count=30;
-+ Dmsg0(500, "my_postgresql_batch_end started\n");
-+
-+ if (!mdb) { /* no files ? */
-+ return 0;
-+ }
-+
-+ do {
-+ res = PQputCopyEnd(mdb->db, error);
-+ } while (res == 0 && --count > 0);
-+
-+ if (res == 1) {
-+ Dmsg0(500, "ok\n");
-+ mdb->status = 0;
-+ }
-+
-+ if (res <= 0) {
-+ Dmsg0(500, "we failed\n");
-+ mdb->status = 1;
-+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
-+ }
-+
-+ Dmsg0(500, "my_postgresql_batch_end finishing\n");
-+
-+ return mdb->status;
-+}
-+
-+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar)
-+{
-+ int res;
-+ int count=30;
-+ size_t len;
-+ char *digest;
-+ char ed1[50];
-+
-+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
-+ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
-+
-+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
-+ my_postgresql_copy_escape(mdb->esc_name2, mdb->path, mdb->pnl);
-+
-+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
-+ digest = "0";
-+ } else {
-+ digest = ar->Digest;
-+ }
-+
-+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
-+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->path,
-+ mdb->fname, ar->attr, digest);
-+
-+ do {
-+ res = PQputCopyData(mdb->db,
-+ mdb->cmd,
-+ len);
-+ } while (res == 0 && --count > 0);
-+
-+ if (res == 1) {
-+ Dmsg0(500, "ok\n");
-+ mdb->changes++;
-+ mdb->status = 0;
-+ }
-+
-+ if (res <= 0) {
-+ Dmsg0(500, "we failed\n");
-+ mdb->status = 1;
-+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
-+ }
-+
-+ Dmsg0(500, "my_postgresql_batch_insert finishing\n");
-+
-+ return mdb->status;
-+}
-+
-+/*
-+ * Escape strings so that PostgreSQL is happy on COPY
-+ *
-+ * NOTE! len is the length of the old string. Your new
-+ * string must be long enough (max 2*old+1) to hold
-+ * the escaped output.
-+ */
-+char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
-+{
-+ /* we have to escape \t, \n, \r, \ */
-+ char c = '\0' ;
-+
-+ while (len > 0 && *src) {
-+ switch (*src) {
-+ case '\n':
-+ c = 'n';
-+ break;
-+ case '\\':
-+ c = '\\';
-+ break;
-+ case '\t':
-+ c = 't';
-+ break;
-+ case '\r':
-+ c = 'r';
-+ break;
-+ default:
-+ c = '\0' ;
-+ }
-+
-+ if (c) {
-+ *dest = '\\';
-+ dest++;
-+ *dest = c;
-+ } else {
-+ *dest = *src;
-+ }
-+
-+ len--;
-+ src++;
-+ dest++;
-+ }
-+
-+ *dest = '\0';
-+ return dest;
-+}
-+
-+char *my_pg_batch_lock_path_query = "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
-+
-+
-+char *my_pg_batch_lock_filename_query = "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
-+
-+char *my_pg_batch_unlock_tables_query = "COMMIT";
-+
-+char *my_pg_batch_fill_path_query = "INSERT INTO Path (Path) "
-+ " SELECT a.Path FROM "
-+ " (SELECT DISTINCT Path FROM batch) AS a "
-+ " WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
-+
-
-+char *my_pg_batch_fill_filename_query = "INSERT INTO Filename (Name) "
-+ " SELECT a.Name FROM "
-+ " (SELECT DISTINCT Name FROM batch) as a "
-+ " WHERE NOT EXISTS "
-+ " (SELECT Name FROM Filename WHERE Name = a.Name)";
- #endif /* HAVE_POSTGRESQL */
-diff -Naur cvs/src/cats/protos.h my/src/cats/protos.h
---- cvs/src/cats/protos.h 2006-12-06 15:11:53.000000000 +0100
-+++ my/src/cats/protos.h 2007-01-10 19:21:42.000000000 +0100
-@@ -67,6 +67,10 @@
- bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr);
- bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr);
- bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr);
-+int db_create_batch_file_record(JCR *jcr);
-+int db_batch_start(B_DB *mdb);
-+int db_batch_end(B_DB *mdb, const char *error);
-+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar);
-
- /* delete.c */
- int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr);
-diff -Naur cvs/src/cats/sql_create.c my/src/cats/sql_create.c
---- cvs/src/cats/sql_create.c 2006-12-06 15:11:53.000000000 +0100
-+++ my/src/cats/sql_create.c 2007-01-10 21:46:59.000000000 +0100
-@@ -664,9 +664,208 @@
- * };
- */
-
-+/* All db_batch_* functions are used to do bulk batch insert in File/Filename/Path
-+ * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1"
-+ * in baconfig.h
-+ *
-+ * To sum up :
-+ * - bulk load a temp table
-+ * - insert missing filenames into filename with a single query (lock filenames
-+ * - table before that to avoid possible duplicate inserts with concurrent update)
-+ * - insert missing paths into path with another single query
-+ * - then insert the join between the temp, filename and path tables into file.
-+ */
-+
-+int db_batch_start(B_DB *mdb)
-+{
-+ return sql_query(mdb,
-+ " CREATE TEMPORARY TABLE batch "
-+ " (fileindex integer, "
-+ " jobid integer, "
-+ " path blob, "
-+ " name blob, "
-+ " lstat tinyblob, "
-+ " md5 tinyblob) ");
-+}
-+
-+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar)
-+{
-+ size_t len;
-+ char *digest;
-+ char ed1[50];
-+
-+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
-+ db_escape_string(mdb->esc_name, mdb->fname, mdb->fnl);
-+
-+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
-+ db_escape_string(mdb->esc_name2, mdb->path, mdb->pnl);
-+
-+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
-+ digest = "0";
-+ } else {
-+ digest = ar->Digest;
-+ }
-+
-+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
-+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->path,
-+ mdb->fname, ar->attr, digest);
-+
-+ sql_query(mdb, mdb->cmd);
-+
-+ return mdb->status;
-+}
-+
-+/* set error to something to abort operation */
-+int db_batch_end(B_DB *mdb, const char *error)
-+{
-+
-+ Dmsg0(50, "db_batch_end started");
-+
-+ if (mdb) {
-+ mdb->status = 0;
-+ return mdb->status;
-+ }
-+ return 0;
-+}
-+
-+int db_create_batch_file_record(JCR *jcr)
-+{
-+ Dmsg0(50,"db_create_file_record : no files");
-+
-+ if (!jcr->db_batch) { /* no files to backup ? */
-+ Dmsg0(50,"db_create_file_record : no files\n");
-+ return 0;
-+ }
-+
-+ if (sql_batch_end(jcr->db_batch, NULL)) {
-+ Jmsg(jcr, M_FATAL, 0, "Bad batch end %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ /* we have to lock tables */
-+ if (sql_query(jcr->db_batch, sql_batch_lock_path_query))
-+ {
-+ Jmsg(jcr, M_FATAL, 0, "Can't lock Path table %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ if (sql_query(jcr->db_batch, sql_batch_fill_path_query))
-+ {
-+ Jmsg(jcr, M_FATAL, 0, "Can't fill Path table %s\n",jcr->db_batch->errmsg);
-+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
-+ return 1;
-+ }
-+
-+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query))
-+ {
-+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Path table %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ /* we have to lock tables */
-+ if (sql_query(jcr->db_batch, sql_batch_lock_filename_query))
-+ {
-+ Jmsg(jcr, M_FATAL, 0, "Can't lock Filename table %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ if (sql_query(jcr->db_batch, sql_batch_fill_filename_query))
-+ {
-+ Jmsg(jcr,M_FATAL,0,"Can't fill Filename table %s\n",jcr->db_batch->errmsg);
-+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
-+ return 1;
-+ }
-+
-+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) {
-+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Filename table %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ if (sql_query(jcr->db_batch,
-+ " INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5)"
-+ " SELECT batch.FileIndex, batch.JobId, Path.PathId, "
-+ " Filename.FilenameId,batch.LStat, batch.MD5 "
-+ " FROM batch "
-+ " JOIN Path ON (batch.Path = Path.Path) "
-+ " JOIN Filename ON (batch.Name = Filename.Name) "))
-+ {
-+ Jmsg(jcr, M_FATAL, 0, "Can't fill File table %s\n", jcr->db_batch->errmsg);
-+ return 1;
-+ }
-+
-+ sql_query(jcr->db_batch, "DROP TABLE batch");
-+
-+ return 0;
-+}
-+
-+#ifdef HAVE_BATCH_FILE_INSERT
-+/*
-+ * Create File record in B_DB
-+ *
-+ * In order to reduce database size, we store the File attributes,
-+ * the FileName, and the Path separately. In principle, there
-+ * is a single FileName record and a single Path record, no matter
-+ * how many times it occurs. This is this subroutine, we separate
-+ * the file and the path and fill temporary tables with this three records.
-+ */
-+int db_create_file_attributes_record(JCR *jcr, B_DB *_mdb, ATTR_DBR *ar)
-+{
-+
-+ Dmsg1(dbglevel, "Fname=%s\n", ar->fname);
-+ Dmsg0(dbglevel, "put_file_into_catalog\n");
-+
-+ if (!jcr->db_batch) {
-+ jcr->db_batch = db_init_database(jcr,
-+ jcr->db->db_name,
-+ jcr->db->db_user,
-+ jcr->db->db_password,
-+ jcr->db->db_address,
-+ jcr->db->db_port,
-+ jcr->db->db_socket,
-+ 1 /* multi_db = true */);
-+
-+ if (!jcr->db_batch || !db_open_database(jcr, jcr->db_batch)) {
-+ Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
-+ jcr->db->db_name);
-+ if (jcr->db_batch) {
-+ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch));
-+ }
-+ return 0;
-+ }
-+
-+ sql_batch_start(jcr->db_batch);
-+ }
-+
-+ B_DB *mdb = jcr->db_batch;
-+
-+ /*
-+ * Make sure we have an acceptable attributes record.
-+ */
-+ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
-+ ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
-+ Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"),
-+ ar->Stream);
-+ Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
-+ return 0;
-+ }
-+
-+ split_path_and_file(jcr, mdb, ar->fname);
-
-
- /*
-+ if (jcr->changes > 100000) {
-+ sql_batch_end(mdb, NULL);
-+ sql_batch_start(mdb);
-+ jcr->changes = 0;
-+ }
-+*/
-+
-+ return (sql_batch_insert(mdb, ar) == 0);
-+}
-+
-+#else /* ! HAVE_BATCH_FILE_INSERT */
-+
-+/*
- * Create File record in B_DB
- *
- * In order to reduce database size, we store the File attributes,
-@@ -721,6 +920,8 @@
- return 0;
- }
-
-+#endif /* ! HAVE_BATCH_FILE_INSERT */
-+
- /*
- * This is the master File entry containing the attributes.
- * The filename and path records have already been created.
-diff -Naur cvs/src/cats/sqlite.c my/src/cats/sqlite.c
---- cvs/src/cats/sqlite.c 2006-12-06 15:11:53.000000000 +0100
-+++ my/src/cats/sqlite.c 2007-01-10 19:21:42.000000000 +0100
-@@ -108,6 +108,7 @@
- mdb->fname = get_pool_memory(PM_FNAME);
- mdb->path = get_pool_memory(PM_FNAME);
- mdb->esc_name = get_pool_memory(PM_FNAME);
-+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
- mdb->allow_transactions = mult_db_connections;
- qinsert(&db_list, &mdb->bq); /* put db in list */
- V(mutex);
-@@ -213,6 +214,7 @@
- free_pool_memory(mdb->fname);
- free_pool_memory(mdb->path);
- free_pool_memory(mdb->esc_name);
-+ free_pool_memory(mdb->esc_name2);
- if (mdb->db_name) {
- free(mdb->db_name);
- }
-@@ -433,4 +435,16 @@
- return mdb->fields[mdb->field++];
- }
-
-+char *my_sqlite_batch_lock_query = "BEGIN";
-+char *my_sqlite_batch_unlock_query = "COMMIT";
-+char *my_sqlite_batch_fill_path_query = "INSERT INTO Path (Path) "
-+ " SELECT DISTINCT Path FROM batch "
-+ " EXCEPT SELECT Path FROM Path ";
-+
-+char *my_sqlite_batch_fill_filename_query = "INSERT INTO Filename (Name) "
-+ " SELECT DISTINCT Name FROM batch "
-+ " EXCEPT SELECT Name FROM Filename ";
-+
-+
-+
- #endif /* HAVE_SQLITE */
-diff -Naur cvs/src/dird/backup.c my/src/dird/backup.c
---- cvs/src/dird/backup.c 2006-12-13 11:57:52.000000000 +0100
-+++ my/src/dird/backup.c 2007-01-10 19:21:42.000000000 +0100
-@@ -233,6 +233,9 @@
-
- /* Pickup Job termination data */
- stat = wait_for_job_termination(jcr);
-+#ifdef HAVE_BATCH_FILE_INSERT
-+ db_create_batch_file_record(jcr); /* used by bulk batch file insert */
-+#endif
- if (stat == JS_Terminated) {
- backup_cleanup(jcr, stat);
- return true;
-diff -Naur cvs/src/dird/jobq.c my/src/dird/jobq.c
---- cvs/src/dird/jobq.c 2006-11-24 11:29:37.000000000 +0100
-+++ my/src/dird/jobq.c 2007-01-10 19:21:42.000000000 +0100
-@@ -563,6 +563,10 @@
- db_close_database(jcr, jcr->db);
- jcr->db = NULL;
- }
-+ if (jcr->db_batch) {
-+ db_close_database(jcr, jcr->db_batch);
-+ jcr->db_batch = NULL;
-+ }
- Dmsg2(2300, "====== Termination job=%d use_cnt=%d\n", jcr->JobId, jcr->use_count());
- jcr->SDJobStatus = 0;
- V(jq->mutex); /* release internal lock */
-diff -Naur cvs/src/jcr.h my/src/jcr.h
---- cvs/src/jcr.h 2006-12-19 21:57:38.000000000 +0100
-+++ my/src/jcr.h 2007-01-10 19:21:42.000000000 +0100
-@@ -184,6 +184,7 @@
- bool cached_attribute; /* set if attribute is cached */
- POOLMEM *attr; /* Attribute string from SD */
- B_DB *db; /* database pointer */
-+ B_DB *db_batch; /* database pointer for batch insert */
- ATTR_DBR *ar; /* DB attribute record */
-
- /* Daemon specific part of JCR */
+++ /dev/null
---- src/dird/msgchan.c 8 Dec 2006 14:27:10 -0000 1.66
-+++ src/dird/msgchan.c 5 Feb 2007 14:15:52 -0000
-@@ -316,7 +316,7 @@
- Jmsg1(jcr, M_ABORT, 0, _("Cannot create message thread: %s\n"), be.strerror(status));
- }
- /* Wait for thread to start */
-- while (jcr->SD_msg_chan == 0) {
-+ while (jcr->SD_msg_chan == 0 && cr->sd_msg_thread_done == false) {
- bmicrosleep(0, 50);
- }
- Dmsg1(100, "SD msg_thread started. use=%d\n", jcr->use_count());
+++ /dev/null
-From: Eric Bollengier
-
-Sometime, when my system is very busy, if i cancel a job
-before i was initialised, bacula fall in a race case.
-and the job thread never exits from start_storage_daemon_message_thread().
-
-
-$Log$
-Revision 1.1 2007/02/05 14:19:22 ricozz
-ebl add
-
+++ /dev/null
-diff -Naur org/bacula-2.0.0/src/baconfig.h bacula-2.0.0/src/baconfig.h
---- org/bacula-2.0.0/src/baconfig.h 2006-12-17 14:36:35.000000000 +0100
-+++ bacula-2.0.0/src/baconfig.h 2007-01-09 16:10:02.000000000 +0100
-@@ -106,8 +106,8 @@
- #define OSDependentInit()
- #define tape_open open
- #define tape_ioctl ioctl
--#define tape_read read
--#define tape_write write
-+#define tape_read ::read
-+#define tape_write ::write
- #define tape_close ::close
-
- #endif
-diff -Naur org/bacula-2.0.0/src/cats/sql_find.c bacula-2.0.0/src/cats/sql_find.c
---- org/bacula-2.0.0/src/cats/sql_find.c 2006-11-27 11:02:59.000000000 +0100
-+++ bacula-2.0.0/src/cats/sql_find.c 2007-01-09 16:10:02.000000000 +0100
-@@ -283,7 +283,7 @@
- "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
- "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
- "FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
-- "LabelType "
-+ "LabelType,VolReadTime,VolWriteTime "
- "FROM Media WHERE PoolId=%s AND MediaType='%s' AND VolStatus IN ('Full',"
- "'Recycle','Purged','Used','Append') AND Enabled=1 "
- "ORDER BY LastWritten LIMIT 1",
-@@ -308,7 +308,7 @@
- "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
- "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
- "FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
-- "LabelType "
-+ "LabelType,VolReadTime,VolWriteTime "
- "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 "
- "AND VolStatus='%s' "
- "%s "
-@@ -371,6 +371,8 @@
- mr->InChanger = str_to_int64(row[20]);
- mr->VolParts = str_to_int64(row[21]);
- mr->LabelType = str_to_int64(row[22]);
-+ mr->VolReadTime = str_to_uint64(row[23]);
-+ mr->VolWriteTime = str_to_uint64(row[24]);
- mr->Enabled = 1; /* ensured via query */
- sql_free_result(mdb);
-
-diff -Naur org/bacula-2.0.0/src/cats/sql_get.c bacula-2.0.0/src/cats/sql_get.c
---- org/bacula-2.0.0/src/cats/sql_get.c 2006-11-27 11:02:59.000000000 +0100
-+++ bacula-2.0.0/src/cats/sql_get.c 2007-01-09 16:10:02.000000000 +0100
-@@ -872,7 +872,7 @@
- "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
- "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
- "Enabled,LocationId,RecycleCount,InitialWrite,"
-- "ScratchPoolId,RecyclePoolId "
-+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
- "FROM Media WHERE MediaId=%s",
- edit_int64(mr->MediaId, ed1));
- } else { /* find by name */
-@@ -882,7 +882,7 @@
- "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
- "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
- "Enabled,LocationId,RecycleCount,InitialWrite,"
-- "ScratchPoolId,RecyclePoolId "
-+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
- "FROM Media WHERE VolumeName='%s'", mr->VolumeName);
- }
-
-@@ -938,6 +938,8 @@
- mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite);
- mr->ScratchPoolId = str_to_int64(row[33]);
- mr->RecyclePoolId = str_to_int64(row[34]);
-+ mr->VolReadTime = str_to_int64(row[35]);
-+ mr->VolWriteTime = str_to_int64(row[36]);
-
- ok = true;
- }
-diff -Naur org/bacula-2.0.0/src/dird/catreq.c bacula-2.0.0/src/dird/catreq.c
---- org/bacula-2.0.0/src/dird/catreq.c 2006-12-23 17:33:52.000000000 +0100
-+++ bacula-2.0.0/src/dird/catreq.c 2007-01-09 16:10:02.000000000 +0100
-@@ -280,7 +280,7 @@
- mr.VolWriteTime = sdmr.VolWriteTime;
- mr.VolParts = sdmr.VolParts;
- bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus));
-- if (jcr->wstore->StorageId) {
-+ if (jcr->wstore && jcr->wstore->StorageId) {
- mr.StorageId = jcr->wstore->StorageId;
- }
-
-diff -Naur org/bacula-2.0.0/src/dird/dird.c bacula-2.0.0/src/dird/dird.c
---- org/bacula-2.0.0/src/dird/dird.c 2006-12-22 16:01:05.000000000 +0100
-+++ bacula-2.0.0/src/dird/dird.c 2007-01-09 16:10:02.000000000 +0100
-@@ -269,6 +269,8 @@
-
- init_job_server(director->MaxConcurrentJobs);
-
-+// init_device_resources();
-+
- Dmsg0(200, "wait for next job\n");
- /* Main loop -- call scheduler to get next job to run */
- while ( (jcr = wait_for_next_job(runjob)) ) {
-diff -Naur org/bacula-2.0.0/src/dird/getmsg.c bacula-2.0.0/src/dird/getmsg.c
---- org/bacula-2.0.0/src/dird/getmsg.c 2006-11-21 14:20:09.000000000 +0100
-+++ bacula-2.0.0/src/dird/getmsg.c 2007-01-09 16:10:02.000000000 +0100
-@@ -62,7 +62,9 @@
- "open=%d labeled=%d offline=%d "
- "reserved=%d max_writers=%d "
- "autoselect=%d autochanger=%d "
-- "changer_name=%127s media_type=%127s volume_name=%127s\n";
-+ "changer_name=%127s media_type=%127s volume_name=%127s "
-+ "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d "
-+ "DevWriteBytes=%d\n";
- #endif
-
-
-@@ -243,6 +245,7 @@
- int dev_open, dev_append, dev_read, dev_labeled;
- int dev_offline, dev_autochanger, dev_autoselect;
- int dev_num_writers, dev_max_writers, dev_reserved;
-+ uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes;
- uint64_t dev_PoolId;
- Dmsg1(100, "<stored: %s", bs->msg);
- if (sscanf(bs->msg, Device_update,
-@@ -253,7 +256,9 @@
- &dev_max_writers, &dev_autoselect,
- &dev_autochanger,
- changer_name.c_str(), media_type.c_str(),
-- volume_name.c_str()) != 15) {
-+ volume_name.c_str(),
-+ &dev_read_time, &dev_write_time, &dev_read_bytes,
-+ &dev_write_bytes) != 19) {
- Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg);
- } else {
- unbash_spaces(dev_name);
-@@ -283,6 +288,10 @@
- dev->max_writers = dev_max_writers;
- dev->reserved = dev_reserved;
- dev->found = true;
-+ dev->DevReadTime = dev_read_time; /* TODO : have to update database */
-+ dev->DevWriteTime = dev_write_time;
-+ dev->DevReadBytes = dev_read_bytes;
-+ dev->DevWriteBytes = dev_write_bytes;
- }
- continue;
- }
-diff -Naur org/bacula-2.0.0/src/stored/acquire.c bacula-2.0.0/src/stored/acquire.c
---- org/bacula-2.0.0/src/stored/acquire.c 2006-12-16 16:30:22.000000000 +0100
-+++ bacula-2.0.0/src/stored/acquire.c 2007-01-09 16:10:02.000000000 +0100
-@@ -461,8 +461,8 @@
-
- if (dev->can_read()) {
- dev->clear_read(); /* clear read bit */
--
-- /******FIXME**** send read volume usage statistics to director */
-+ Dmsg0(100, "dir_update_vol_info. Release0\n");
-+ dir_update_volume_info(dcr, false); /* send Volume info to Director */
-
- } else if (dev->num_writers > 0) {
- /*
-diff -Naur org/bacula-2.0.0/src/stored/ansi_label.c bacula-2.0.0/src/stored/ansi_label.c
---- org/bacula-2.0.0/src/stored/ansi_label.c 2006-11-21 18:03:45.000000000 +0100
-+++ bacula-2.0.0/src/stored/ansi_label.c 2007-01-09 16:10:02.000000000 +0100
-@@ -87,7 +87,7 @@
- /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */
- for (i=0; i < 6; i++) {
- do {
-- stat = tape_read(dev->fd, label, sizeof(label));
-+ stat = dev->read(label, sizeof(label));
- } while (stat == -1 && errno == EINTR);
- if (stat < 0) {
- berrno be;
-@@ -309,7 +309,7 @@
- } else {
- label[79] = '3'; /* ANSI label flag */
- }
-- stat = tape_write(dev->fd, label, sizeof(label));
-+ stat = dev->write(label, sizeof(label));
- if (stat != sizeof(label)) {
- berrno be;
- Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. ERR=%s\n"),
-@@ -341,7 +341,7 @@
- * This could come at the end of a tape, ignore
- * EOT errors.
- */
-- stat = tape_write(dev->fd, label, sizeof(label));
-+ stat = dev->write(label, sizeof(label));
- if (stat != sizeof(label)) {
- berrno be;
- if (stat == -1) {
-@@ -370,7 +370,7 @@
- label[4] = 'V';
- ascii_to_ebcdic(label, label, sizeof(label));
- }
-- stat = tape_write(dev->fd, label, sizeof(label));
-+ stat = dev->write(label, sizeof(label));
- if (stat != sizeof(label)) {
- berrno be;
- if (stat == -1) {
-diff -Naur org/bacula-2.0.0/src/stored/askdir.c bacula-2.0.0/src/stored/askdir.c
---- org/bacula-2.0.0/src/stored/askdir.c 2006-12-08 15:27:10.000000000 +0100
-+++ bacula-2.0.0/src/stored/askdir.c 2007-01-09 16:10:02.000000000 +0100
-@@ -308,11 +308,6 @@
- Pmsg0(000, _("NULL Volume name. This shouldn't happen!!!\n"));
- return false;
- }
-- if (dev->can_read()) {
-- Jmsg0(jcr, M_FATAL, 0, _("Attempt to update_volume_info in read mode!!!\n"));
-- Pmsg0(000, _("Attempt to update_volume_info in read mode!!!\n"));
-- return false;
-- }
-
- Dmsg1(100, "Update cat VolFiles=%d\n", dev->file);
- /* Just labeled or relabeled the tape */
-diff -Naur org/bacula-2.0.0/src/stored/block.c bacula-2.0.0/src/stored/block.c
---- org/bacula-2.0.0/src/stored/block.c 2006-12-16 12:10:17.000000000 +0100
-+++ bacula-2.0.0/src/stored/block.c 2007-01-09 16:10:02.000000000 +0100
-@@ -537,11 +537,8 @@
- bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */
- dev->clrerror(-1);
- }
-- if (dev->is_tape()) {
-- stat = tape_write(dev->fd, block->buf, (size_t)wlen);
-- } else {
-- stat = write(dev->fd, block->buf, (size_t)wlen);
-- }
-+ stat = dev->write(block->buf, (size_t)wlen);
-+
- } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3);
-
- #ifdef DEBUG_BLOCK_ZEROING
-@@ -979,11 +976,8 @@
- bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */
- dev->clrerror(-1);
- }
-- if (dev->is_tape()) {
-- stat = tape_read(dev->fd, block->buf, (size_t)block->buf_len);
-- } else {
-- stat = read(dev->fd, block->buf, (size_t)block->buf_len);
-- }
-+ stat = dev->read(block->buf, (size_t)block->buf_len);
-+
- } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3);
- if (stat < 0) {
- berrno be;
-diff -Naur org/bacula-2.0.0/src/stored/dev.c bacula-2.0.0/src/stored/dev.c
---- org/bacula-2.0.0/src/stored/dev.c 2006-12-22 16:01:05.000000000 +0100
-+++ bacula-2.0.0/src/stored/dev.c 2007-01-09 16:10:02.000000000 +0100
-@@ -1325,7 +1325,7 @@
- mt_com.mt_count = 1;
- while (num-- && !at_eot()) {
- Dmsg0(100, "Doing read before fsf\n");
-- if ((stat = tape_read(fd, (char *)rbuf, rbuf_len)) < 0) {
-+ if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) {
- if (errno == ENOMEM) { /* tape record exceeds buf len */
- stat = rbuf_len; /* This is OK */
- /*
-@@ -2192,6 +2192,68 @@
- }
- }
-
-+/* return the last timer interval (ms) */
-+int DEVICE::get_timer_count()
-+{
-+ uint64_t old = last_timer;
-+ struct timeval tv;
-+ gettimeofday(&tv, NULL);
-+ last_timer = tv.tv_usec + tv.tv_sec * 1000000;
-+
-+ return last_timer - old;
-+}
-+
-+/* read from fd */
-+ssize_t DEVICE::read(void *buf, size_t len)
-+{
-+ ssize_t read_len ;
-+
-+ get_timer_count();
-+
-+ if (this->is_tape()) {
-+ read_len = tape_read(fd, buf, len);
-+ } else {
-+ read_len = ::read(fd, buf, len);
-+ }
-+
-+ last_tick = get_timer_count();
-+
-+ DevReadTime += last_tick;
-+ VolCatInfo.VolReadTime += last_tick;
-+
-+ if (read_len > 0) { /* skip error */
-+ DevReadBytes += read_len;
-+ VolCatInfo.VolCatRBytes += read_len;
-+ }
-+
-+ return read_len;
-+}
-+
-+/* write to fd */
-+ssize_t DEVICE::write(const void *buf, size_t len)
-+{
-+ ssize_t write_len ;
-+
-+ get_timer_count();
-+
-+ if (this->is_tape()) {
-+ write_len = tape_write(fd, buf, len);
-+ } else {
-+ write_len = ::write(fd, buf, len);
-+ }
-+
-+ last_tick = get_timer_count();
-+
-+ DevWriteTime += last_tick;
-+ VolCatInfo.VolWriteTime += last_tick;
-+
-+ if (write_len > 0) { /* skip error */
-+ DevWriteBytes += write_len;
-+ VolCatInfo.VolCatBytes += write_len;
-+ }
-+
-+ return write_len;
-+}
-
- /* Return the resource name for the device */
- const char *DEVICE::name() const
-diff -Naur org/bacula-2.0.0/src/stored/dev.h bacula-2.0.0/src/stored/dev.h
---- org/bacula-2.0.0/src/stored/dev.h 2006-12-14 12:41:01.000000000 +0100
-+++ bacula-2.0.0/src/stored/dev.h 2007-01-09 16:10:02.000000000 +0100
-@@ -280,7 +280,17 @@
- int rem_wait_sec;
- int num_wait;
-
-+ uint64_t last_timer; /* used by read/write/seek to get stats (usec) */
-+ int last_tick; /* contains last read/write time (usec) */
-+
-+ uint64_t DevReadTime;
-+ uint64_t DevWriteTime;
-+ uint64_t DevWriteBytes;
-+ uint64_t DevReadBytes;
-+
- /* Methods */
-+ int get_timer_count(); /* return the last timer interval (ms) */
-+
- int has_cap(int cap) const { return capabilities & cap; }
- void clear_cap(int cap) { capabilities &= ~cap; }
- void set_cap(int cap) { capabilities |= cap; }
-@@ -363,6 +373,8 @@
- bool truncate(DCR *dcr); /* in dev.c */
- int open(DCR *dcr, int mode); /* in dev.c */
- void term(void); /* in dev.c */
-+ ssize_t read(void *buf, size_t len); /* in dev.c */
-+ ssize_t write(const void *buf, size_t len); /* in dev.c */
- bool rewind(DCR *dcr); /* in dev.c */
- bool mount(int timeout); /* in dev.c */
- bool unmount(int timeout); /* in dev.c */
+++ /dev/null
-From: Eric Bollengier <eric AT eb.homelinux.org>
-
-This patch allow you to have media statistics
- - VolReadTime
- - VolWriteTime
- - ...
-
-The next step, is to have Device statistics.
-
-$Log$
-Revision 1.1 2006/12/20 18:47:42 ricozz
-ebl works with 1.39.30
-
-Revision 1.1 2006/12/19 21:33:06 ricozz
-ebl ok against 1.39.30
-
-
+++ /dev/null
-diff -Naur org/bacula-2.0.0/src/stored/spool.c bacula-2.0.0/src/stored/spool.c
---- org/bacula-2.0.0/src/stored/spool.c 2006-11-27 11:03:03.000000000 +0100
-+++ bacula-2.0.0/src/stored/spool.c 2007-01-09 16:15:02.000000000 +0100
-@@ -154,8 +154,8 @@
- } else {
- dir = working_directory;
- }
-- Mmsg(name, "%s/%s.data.%s.%s.spool", dir, my_name, dcr->jcr->Job,
-- dcr->device->hdr.name);
-+ Mmsg(name, "%s/%s.data.%u.%s.%s.spool", dir, my_name, dcr->jcr->JobId,
-+ dcr->jcr->Job, dcr->device->hdr.name);
- }
-
-
+++ /dev/null
-From: Eric Bollengier <eric at eb dot homelinux dot org >
-
-This patch implements the include JobID in spool file name project
-
-$Log$
-Revision 1.2 2006/12/30 17:18:33 ricozz
-ebl Works with 1.39.35
-
-Revision 1.1 2006/12/30 09:30:03 ricozz
-ebl works against 1.39.34
-
+++ /dev/null
-diff -Naur org/bacula-2.0.0/src/dird/protos.h bacula-2.0.0/src/dird/protos.h
---- org/bacula-2.0.0/src/dird/protos.h 2006-12-23 17:33:52.000000000 +0100
-+++ bacula-2.0.0/src/dird/protos.h 2007-01-09 16:12:18.000000000 +0100
-@@ -195,6 +195,9 @@
- int get_num_drives_from_SD(UAContext *ua);
- void update_slots(UAContext *ua);
-
-+/* ua_update.c */
-+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr);
-+
- /* ua_output.c */
- void prtit(void *ctx, const char *msg);
- int complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool);
-diff -Naur org/bacula-2.0.0/src/dird/ua_purge.c bacula-2.0.0/src/dird/ua_purge.c
---- org/bacula-2.0.0/src/dird/ua_purge.c 2006-12-22 16:01:05.000000000 +0100
-+++ bacula-2.0.0/src/dird/ua_purge.c 2007-01-09 16:12:18.000000000 +0100
-@@ -605,6 +605,18 @@
- }
- pm_strcpy(jcr->VolumeName, mr->VolumeName);
- generate_job_event(jcr, "VolumePurged");
-+ if (mr->RecyclePoolId && mr->RecyclePoolId != mr->PoolId) {
-+ POOL_DBR oldpr, newpr;
-+ memset(&oldpr, 0, sizeof(POOL_DBR));
-+ memset(&newpr, 0, sizeof(POOL_DBR));
-+ newpr.PoolId = mr->RecyclePoolId;
-+ oldpr.PoolId = mr->PoolId;
-+ if (db_get_pool_record(jcr, ua->db, &oldpr) && db_get_pool_record(jcr, ua->db, &newpr)) {
-+ update_vol_pool(ua, newpr.Name, mr, &oldpr);
-+ } else {
-+ bsendmsg(ua, "%s", db_strerror(ua->db));
-+ }
-+ }
- /* Send message to Job report, if it is a *real* job */
- if (jcr && jcr->JobId > 0) {
- Jmsg1(jcr, M_INFO, 0, _("All records pruned from Volume \"%s\"; marking it \"Purged\"\n"),
-diff -Naur org/bacula-2.0.0/src/dird/ua_update.c bacula-2.0.0/src/dird/ua_update.c
---- org/bacula-2.0.0/src/dird/ua_update.c 2006-12-23 17:33:52.000000000 +0100
-+++ bacula-2.0.0/src/dird/ua_update.c 2007-01-09 16:12:18.000000000 +0100
-@@ -290,7 +290,7 @@
- }
-
- /* Modify the Pool in which this Volume is located */
--static void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr)
-+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr)
- {
- POOL_DBR pr;
- POOLMEM *query;
+++ /dev/null
-From: Eric Bollengier <eric AT eb.homelinux.org>
-
-This patch allow you to :
- - move Purged media to there RecyclePool
-
-$Log$
-Revision 1.2 2007/01/09 15:18:20 ricozz
-ebl works with 2.0.0
-
-Revision 1.1 2006/12/20 18:47:42 ricozz
-ebl works with 1.39.30
-
Notes: If the file is here, we skip restore and we change rights.
If the file isn't here, we can create an empty one and apply
rights or do nothing.
+
Item 18: Quick release of FD-SD connection after backup.
Origin: Frank Volf (frank at deze dot org)
Date: 17 November 2005
#define OSDependentInit()
#define tape_open open
#define tape_ioctl ioctl
-#define tape_read read
-#define tape_write write
+#define tape_read ::read
+#define tape_write ::write
#define tape_close ::close
#endif
int changes; /* changes during transaction */
POOLMEM *fname; /* Filename only */
POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_name2; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
};
#define sql_fetch_field(x) my_sqlite_fetch_field(x)
#define sql_num_fields(x) ((x)->ncolumn)
#define SQL_ROW char**
-
-
+#define sql_batch_start(x) db_batch_start(x)
+#define sql_batch_end(x,y) db_batch_end(x,y)
+#define sql_batch_insert(x,y) db_batch_insert(x,y)
+#define sql_batch_lock_path_query my_sqlite_batch_lock_query
+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
/* In cats/sqlite.c */
void my_sqlite_free_table(B_DB *mdb);
int my_sqlite_query(B_DB *mdb, const char *cmd);
void my_sqlite_field_seek(B_DB *mdb, int field);
SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
+extern char* my_sqlite_batch_lock_query;
+extern char* my_sqlite_batch_unlock_query;
+extern char* my_sqlite_batch_fill_filename_query;
+extern char* my_sqlite_batch_fill_path_query;
#else
int changes; /* changes during transaction */
POOLMEM *fname; /* Filename only */
POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_name2; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
};
#define sql_fetch_field(x) my_sqlite_fetch_field(x)
#define sql_num_fields(x) ((x)->ncolumn)
#define SQL_ROW char**
-
-
+#define sql_batch_start(x) db_batch_start(x)
+#define sql_batch_end(x,y) db_batch_end(x,y)
+#define sql_batch_insert(x,y) db_batch_insert(x,y)
+#define sql_batch_lock_path_query my_sqlite_batch_lock_query
+#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
+#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
+#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
/* In cats/sqlite.c */
void my_sqlite_free_table(B_DB *mdb);
int my_sqlite_query(B_DB *mdb, const char *cmd);
void my_sqlite_field_seek(B_DB *mdb, int field);
SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
+extern char* my_sqlite_batch_lock_query;
+extern char* my_sqlite_batch_unlock_query;
+extern char* my_sqlite_batch_fill_filename_query;
+extern char* my_sqlite_batch_fill_path_query;
#else
int changes; /* changes made to db */
POOLMEM *fname; /* Filename only */
POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_name2; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
};
#define sql_field_seek(x, y) mysql_field_seek((x)->result, (y))
#define sql_fetch_field(x) mysql_fetch_field((x)->result)
#define sql_num_fields(x) (int)mysql_num_fields((x)->result)
+#define sql_batch_start(x) db_batch_start(x)
+#define sql_batch_end(x,y) db_batch_end(x,y)
+#define sql_batch_insert(x,y) db_batch_insert(x,y)
+#define sql_batch_lock_path_query my_mysql_batch_lock_path_query
+#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query
+#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query
+#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query
+#define sql_batch_fill_path_query my_mysql_batch_fill_path_query
#define SQL_ROW MYSQL_ROW
#define SQL_FIELD MYSQL_FIELD
+
+int my_mysql_batch_start(B_DB *mdb);
+extern char* my_mysql_batch_lock_path_query;
+extern char* my_mysql_batch_lock_filename_query;
+extern char* my_mysql_batch_unlock_tables_query;
+extern char* my_mysql_batch_fill_filename_query;
+extern char* my_mysql_batch_fill_path_query;
+
#else
#ifdef HAVE_POSTGRESQL
int changes; /* changes made to db */
POOLMEM *fname; /* Filename only */
POOLMEM *path; /* Path only */
- POOLMEM *esc_name; /* Escaped file/path name */
+ POOLMEM *esc_name; /* Escaped file name */
+ POOLMEM *esc_name2; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
};
int my_postgresql_currval (B_DB *mdb, char *table_name);
void my_postgresql_field_seek (B_DB *mdb, int row);
POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb);
-
+int my_postgresql_lock_table(B_DB *mdb, const char *table);
+int my_postgresql_unlock_table(B_DB *mdb);
+int my_postgresql_batch_start(B_DB *mdb);
+int my_postgresql_batch_end(B_DB *mdb, const char *error);
+typedef struct ATTR_DBR ATTR_DBR;
+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar);
+char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
+
+extern char* my_pg_batch_lock_path_query;
+extern char* my_pg_batch_lock_filename_query;
+extern char* my_pg_batch_unlock_tables_query;
+extern char* my_pg_batch_fill_filename_query;
+extern char* my_pg_batch_fill_path_query;
/* "Generic" names for easier conversion */
#define sql_store_result(x) ((x)->result)
#define sql_field_seek(x, y) my_postgresql_field_seek((x), (y))
#define sql_fetch_field(x) my_postgresql_fetch_field(x)
#define sql_num_fields(x) ((x)->num_fields)
+#define sql_batch_start(x) my_postgresql_batch_start(x)
+#define sql_batch_end(x,y) my_postgresql_batch_end(x,y)
+#define sql_batch_insert(x,y) my_postgresql_batch_insert(x,y)
+#define sql_lock_table(x,y) my_postgresql_lock_table(x, y)
+#define sql_unlock_table(x,y) my_postgresql_unlock_table(x)
+#define sql_batch_lock_path_query my_pg_batch_lock_path_query
+#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query
+#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query
+#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query
+#define sql_batch_fill_path_query my_pg_batch_fill_path_query
+
#define SQL_ROW POSTGRESQL_ROW
#define SQL_FIELD POSTGRESQL_FIELD
mdb->fname = get_pool_memory(PM_FNAME);
mdb->path = get_pool_memory(PM_FNAME);
mdb->esc_name = get_pool_memory(PM_FNAME);
+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
qinsert(&db_list, &mdb->bq); /* put db in list */
V(mutex);
return mdb;
free_pool_memory(mdb->fname);
free_pool_memory(mdb->path);
free_pool_memory(mdb->esc_name);
+ free_pool_memory(mdb->esc_name2);
if (mdb->db_name) {
free(mdb->db_name);
}
}
+char *my_mysql_batch_lock_path_query = "LOCK TABLES Path write, "
+ " batch write, "
+ " Path as p write ";
+
+
+char *my_mysql_batch_lock_filename_query = "LOCK TABLES Filename write, "
+ " batch write, "
+ " Filename as f write ";
+
+char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES";
+
+char *my_mysql_batch_fill_path_query = "INSERT IGNORE INTO Path (Path) "
+ " SELECT a.Path FROM "
+ " (SELECT DISTINCT Path "
+ " FROM batch) AS a "
+ " WHERE NOT EXISTS "
+ " (SELECT Path "
+ " FROM Path AS p "
+ " WHERE p.Path = a.Path) ";
+
+char *my_mysql_batch_fill_filename_query = "INSERT IGNORE INTO Filename (Name)"
+ " SELECT a.Name FROM "
+ " (SELECT DISTINCT Name "
+ " FROM batch) AS a "
+ " WHERE NOT EXISTS "
+ " (SELECT Name "
+ " FROM Filename AS f "
+ " WHERE f.Name = a.Name) ";
+
#endif /* HAVE_MYSQL */
+
mdb->fname = get_pool_memory(PM_FNAME);
mdb->path = get_pool_memory(PM_FNAME);
mdb->esc_name = get_pool_memory(PM_FNAME);
+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
mdb->allow_transactions = mult_db_connections;
qinsert(&db_list, &mdb->bq); /* put db in list */
V(mutex);
free_pool_memory(mdb->fname);
free_pool_memory(mdb->path);
free_pool_memory(mdb->esc_name);
+ free_pool_memory(mdb->esc_name2);
if (mdb->db_name) {
free(mdb->db_name);
}
return id;
}
+int my_postgresql_lock_table(B_DB *mdb, const char *table)
+{
+ my_postgresql_query(mdb, "BEGIN");
+ Mmsg(mdb->cmd, "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table);
+ return my_postgresql_query(mdb, mdb->cmd);
+}
+
+int my_postgresql_unlock_table(B_DB *mdb)
+{
+ return my_postgresql_query(mdb, "COMMIT");
+}
+
+int my_postgresql_batch_start(B_DB *mdb)
+{
+ Dmsg0(500, "my_postgresql_batch_start started\n");
+
+ if (my_postgresql_query(mdb,
+ " CREATE TEMPORARY TABLE batch "
+ " (fileindex int, "
+ " jobid int, "
+ " path varchar, "
+ " name varchar, "
+ " lstat varchar, "
+ " md5 varchar)") == 1)
+ {
+ Dmsg0(500, "my_postgresql_batch_start failed\n");
+ return 1;
+ }
+
+ // We are starting a new query. reset everything.
+ mdb->num_rows = -1;
+ mdb->row_number = -1;
+ mdb->field_number = -1;
+
+ if (mdb->result != NULL) {
+ my_postgresql_free_result(mdb);
+ }
+
+ mdb->result = PQexec(mdb->db, "COPY batch FROM STDIN");
+ mdb->status = PQresultStatus(mdb->result);
+ if (mdb->status == PGRES_COPY_IN) {
+ // how many fields in the set?
+ mdb->num_fields = (int) PQnfields(mdb->result);
+ mdb->num_rows = 0;
+ mdb->status = 0;
+ } else {
+ Dmsg0(500, "we failed\n");
+ mdb->status = 1;
+ }
+
+ Dmsg0(500, "my_postgresql_batch_start finishing\n");
+
+ return mdb->status;
+}
+
+/* set error to something to abort operation */
+int my_postgresql_batch_end(B_DB *mdb, const char *error)
+{
+ int res;
+ int count=30;
+ Dmsg0(500, "my_postgresql_batch_end started\n");
+
+ if (!mdb) { /* no files ? */
+ return 0;
+ }
+
+ do {
+ res = PQputCopyEnd(mdb->db, error);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->status = 0;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ mdb->status = 1;
+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
+ }
+
+ Dmsg0(500, "my_postgresql_batch_end finishing\n");
+
+ return mdb->status;
+}
+
+int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar)
+{
+ int res;
+ int count=30;
+ size_t len;
+ char *digest;
+ char ed1[50];
+
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
+ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+
+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
+ my_postgresql_copy_escape(mdb->esc_name2, mdb->path, mdb->pnl);
+
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+
+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->path,
+ mdb->fname, ar->attr, digest);
+
+ do {
+ res = PQputCopyData(mdb->db,
+ mdb->cmd,
+ len);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->changes++;
+ mdb->status = 0;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ mdb->status = 1;
+ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
+ }
+
+ Dmsg0(500, "my_postgresql_batch_insert finishing\n");
+
+ return mdb->status;
+}
+
+/*
+ * Escape strings so that PostgreSQL is happy on COPY
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ */
+char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
+{
+ /* we have to escape \t, \n, \r, \ */
+ char c = '\0' ;
+
+ while (len > 0 && *src) {
+ switch (*src) {
+ case '\n':
+ c = 'n';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case '\t':
+ c = 't';
+ break;
+ case '\r':
+ c = 'r';
+ break;
+ default:
+ c = '\0' ;
+ }
+
+ if (c) {
+ *dest = '\\';
+ dest++;
+ *dest = c;
+ } else {
+ *dest = *src;
+ }
+
+ len--;
+ src++;
+ dest++;
+ }
+
+ *dest = '\0';
+ return dest;
+}
+
+char *my_pg_batch_lock_path_query = "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
+
+
+char *my_pg_batch_lock_filename_query = "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
+
+char *my_pg_batch_unlock_tables_query = "COMMIT";
+
+char *my_pg_batch_fill_path_query = "INSERT INTO Path (Path) "
+ " SELECT a.Path FROM "
+ " (SELECT DISTINCT Path FROM batch) AS a "
+ " WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
+
+char *my_pg_batch_fill_filename_query = "INSERT INTO Filename (Name) "
+ " SELECT a.Name FROM "
+ " (SELECT DISTINCT Name FROM batch) as a "
+ " WHERE NOT EXISTS "
+ " (SELECT Name FROM Filename WHERE Name = a.Name)";
#endif /* HAVE_POSTGRESQL */
bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr);
bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr);
bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr);
+int db_create_batch_file_record(JCR *jcr);
+int db_batch_start(B_DB *mdb);
+int db_batch_end(B_DB *mdb, const char *error);
+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar);
/* delete.c */
int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr);
* };
*/
+/* All db_batch_* functions are used to do bulk batch insert in File/Filename/Path
+ * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1"
+ * in baconfig.h
+ *
+ * To sum up :
+ * - bulk load a temp table
+ * - insert missing filenames into filename with a single query (lock filenames
+ * - table before that to avoid possible duplicate inserts with concurrent update)
+ * - insert missing paths into path with another single query
+ * - then insert the join between the temp, filename and path tables into file.
+ */
+
+int db_batch_start(B_DB *mdb)
+{
+ return sql_query(mdb,
+ " CREATE TEMPORARY TABLE batch "
+ " (fileindex integer, "
+ " jobid integer, "
+ " path blob, "
+ " name blob, "
+ " lstat tinyblob, "
+ " md5 tinyblob) ");
+}
+
+int db_batch_insert(B_DB *mdb, ATTR_DBR *ar)
+{
+ size_t len;
+ char *digest;
+ char ed1[50];
+
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
+ db_escape_string(mdb->esc_name, mdb->fname, mdb->fnl);
+
+ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
+ db_escape_string(mdb->esc_name2, mdb->path, mdb->pnl);
+
+ if (ar->Digest == NULL || ar->Digest[0] == 0) {
+ digest = "0";
+ } else {
+ digest = ar->Digest;
+ }
+
+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->path,
+ mdb->fname, ar->attr, digest);
+
+ sql_query(mdb, mdb->cmd);
+
+ return mdb->status;
+}
+
+/* set error to something to abort operation */
+int db_batch_end(B_DB *mdb, const char *error)
+{
+
+ Dmsg0(50, "db_batch_end started");
+
+ if (mdb) {
+ mdb->status = 0;
+ return mdb->status;
+ }
+ return 0;
+}
+
+int db_create_batch_file_record(JCR *jcr)
+{
+ Dmsg0(50,"db_create_file_record : no files");
+
+ if (!jcr->db_batch) { /* no files to backup ? */
+ Dmsg0(50,"db_create_file_record : no files\n");
+ return 0;
+ }
+
+ if (sql_batch_end(jcr->db_batch, NULL)) {
+ Jmsg(jcr, M_FATAL, 0, "Bad batch end %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+
+ /* we have to lock tables */
+ if (sql_query(jcr->db_batch, sql_batch_lock_path_query))
+ {
+ Jmsg(jcr, M_FATAL, 0, "Can't lock Path table %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+
+ if (sql_query(jcr->db_batch, sql_batch_fill_path_query))
+ {
+ Jmsg(jcr, M_FATAL, 0, "Can't fill Path table %s\n",jcr->db_batch->errmsg);
+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
+ return 1;
+ }
+
+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query))
+ {
+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Path table %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+
+ /* we have to lock tables */
+ if (sql_query(jcr->db_batch, sql_batch_lock_filename_query))
+ {
+ Jmsg(jcr, M_FATAL, 0, "Can't lock Filename table %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+
+ if (sql_query(jcr->db_batch, sql_batch_fill_filename_query))
+ {
+ Jmsg(jcr,M_FATAL,0,"Can't fill Filename table %s\n",jcr->db_batch->errmsg);
+ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
+ return 1;
+ }
+
+ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) {
+ Jmsg(jcr, M_FATAL, 0, "Can't unlock Filename table %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+
+ if (sql_query(jcr->db_batch,
+ " INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5)"
+ " SELECT batch.FileIndex, batch.JobId, Path.PathId, "
+ " Filename.FilenameId,batch.LStat, batch.MD5 "
+ " FROM batch "
+ " JOIN Path ON (batch.Path = Path.Path) "
+ " JOIN Filename ON (batch.Name = Filename.Name) "))
+ {
+ Jmsg(jcr, M_FATAL, 0, "Can't fill File table %s\n", jcr->db_batch->errmsg);
+ return 1;
+ }
+ sql_query(jcr->db_batch, "DROP TABLE batch");
+
+ return 0;
+}
+
+#ifdef HAVE_BATCH_FILE_INSERT
+/*
+ * Create File record in B_DB
+ *
+ * In order to reduce database size, we store the File attributes,
+ * the FileName, and the Path separately. In principle, there
+ * is a single FileName record and a single Path record, no matter
+ * how many times it occurs. This is this subroutine, we separate
+ * the file and the path and fill temporary tables with this three records.
+ */
+int db_create_file_attributes_record(JCR *jcr, B_DB *_mdb, ATTR_DBR *ar)
+{
+
+ Dmsg1(dbglevel, "Fname=%s\n", ar->fname);
+ Dmsg0(dbglevel, "put_file_into_catalog\n");
+
+ if (!jcr->db_batch) {
+ jcr->db_batch = db_init_database(jcr,
+ jcr->db->db_name,
+ jcr->db->db_user,
+ jcr->db->db_password,
+ jcr->db->db_address,
+ jcr->db->db_port,
+ jcr->db->db_socket,
+ 1 /* multi_db = true */);
+
+ if (!jcr->db_batch || !db_open_database(jcr, jcr->db_batch)) {
+ Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
+ jcr->db->db_name);
+ if (jcr->db_batch) {
+ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch));
+ }
+ return 0;
+ }
+
+ sql_batch_start(jcr->db_batch);
+ }
+
+ B_DB *mdb = jcr->db_batch;
+
+ /*
+ * Make sure we have an acceptable attributes record.
+ */
+ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
+ ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
+ Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"),
+ ar->Stream);
+ Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
+ return 0;
+ }
+
+ split_path_and_file(jcr, mdb, ar->fname);
+
+
+/*
+ if (jcr->changes > 100000) {
+ sql_batch_end(mdb, NULL);
+ sql_batch_start(mdb);
+ jcr->changes = 0;
+ }
+*/
+
+ return (sql_batch_insert(mdb, ar) == 0);
+}
+
+#else /* ! HAVE_BATCH_FILE_INSERT */
/*
* Create File record in B_DB
return 0;
}
+#endif /* ! HAVE_BATCH_FILE_INSERT */
+
/*
* This is the master File entry containing the attributes.
* The filename and path records have already been created.
"VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
"VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
"FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
- "LabelType "
+ "LabelType,VolReadTime,VolWriteTime "
"FROM Media WHERE PoolId=%s AND MediaType='%s' AND VolStatus IN ('Full',"
"'Recycle','Purged','Used','Append') AND Enabled=1 "
"ORDER BY LastWritten LIMIT 1",
"VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
"VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
"FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
- "LabelType "
+ "LabelType,VolReadTime,VolWriteTime "
"FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 "
"AND VolStatus='%s' "
"%s "
mr->InChanger = str_to_int64(row[20]);
mr->VolParts = str_to_int64(row[21]);
mr->LabelType = str_to_int64(row[22]);
+ mr->VolReadTime = str_to_uint64(row[23]);
+ mr->VolWriteTime = str_to_uint64(row[24]);
mr->Enabled = 1; /* ensured via query */
sql_free_result(mdb);
"MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
"EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
"Enabled,LocationId,RecycleCount,InitialWrite,"
- "ScratchPoolId,RecyclePoolId "
+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
"FROM Media WHERE MediaId=%s",
edit_int64(mr->MediaId, ed1));
} else { /* find by name */
"MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
"EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
"Enabled,LocationId,RecycleCount,InitialWrite,"
- "ScratchPoolId,RecyclePoolId "
+ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
"FROM Media WHERE VolumeName='%s'", mr->VolumeName);
}
mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite);
mr->ScratchPoolId = str_to_int64(row[33]);
mr->RecyclePoolId = str_to_int64(row[34]);
+ mr->VolReadTime = str_to_int64(row[35]);
+ mr->VolWriteTime = str_to_int64(row[36]);
ok = true;
}
mdb->fname = get_pool_memory(PM_FNAME);
mdb->path = get_pool_memory(PM_FNAME);
mdb->esc_name = get_pool_memory(PM_FNAME);
+ mdb->esc_name2 = get_pool_memory(PM_FNAME);
mdb->allow_transactions = mult_db_connections;
qinsert(&db_list, &mdb->bq); /* put db in list */
V(mutex);
free_pool_memory(mdb->fname);
free_pool_memory(mdb->path);
free_pool_memory(mdb->esc_name);
+ free_pool_memory(mdb->esc_name2);
if (mdb->db_name) {
free(mdb->db_name);
}
return mdb->fields[mdb->field++];
}
+char *my_sqlite_batch_lock_query = "BEGIN";
+char *my_sqlite_batch_unlock_query = "COMMIT";
+char *my_sqlite_batch_fill_path_query = "INSERT INTO Path (Path) "
+ " SELECT DISTINCT Path FROM batch "
+ " EXCEPT SELECT Path FROM Path ";
+
+char *my_sqlite_batch_fill_filename_query = "INSERT INTO Filename (Name) "
+ " SELECT DISTINCT Name FROM batch "
+ " EXCEPT SELECT Name FROM Filename ";
+
+
+
#endif /* HAVE_SQLITE */
/* Pickup Job termination data */
stat = wait_for_job_termination(jcr);
+#ifdef HAVE_BATCH_FILE_INSERT
+ db_create_batch_file_record(jcr); /* used by bulk batch file insert */
+#endif
if (stat == JS_Terminated) {
backup_cleanup(jcr, stat);
return true;
mr.VolWriteTime = sdmr.VolWriteTime;
mr.VolParts = sdmr.VolParts;
bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus));
- if (jcr->wstore->StorageId) {
+ if (jcr->wstore && jcr->wstore->StorageId) {
mr.StorageId = jcr->wstore->StorageId;
}
init_job_server(director->MaxConcurrentJobs);
+// init_device_resources();
+
Dmsg0(200, "wait for next job\n");
/* Main loop -- call scheduler to get next job to run */
while ( (jcr = wait_for_next_job(runjob)) ) {
"open=%d labeled=%d offline=%d "
"reserved=%d max_writers=%d "
"autoselect=%d autochanger=%d "
- "changer_name=%127s media_type=%127s volume_name=%127s\n";
+ "changer_name=%127s media_type=%127s volume_name=%127s "
+ "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d "
+ "DevWriteBytes=%d\n";
#endif
int dev_open, dev_append, dev_read, dev_labeled;
int dev_offline, dev_autochanger, dev_autoselect;
int dev_num_writers, dev_max_writers, dev_reserved;
+ uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes;
uint64_t dev_PoolId;
Dmsg1(100, "<stored: %s", bs->msg);
if (sscanf(bs->msg, Device_update,
&dev_max_writers, &dev_autoselect,
&dev_autochanger,
changer_name.c_str(), media_type.c_str(),
- volume_name.c_str()) != 15) {
+ volume_name.c_str(),
+ &dev_read_time, &dev_write_time, &dev_read_bytes,
+ &dev_write_bytes) != 19) {
Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg);
} else {
unbash_spaces(dev_name);
dev->max_writers = dev_max_writers;
dev->reserved = dev_reserved;
dev->found = true;
+ dev->DevReadTime = dev_read_time; /* TODO : have to update database */
+ dev->DevWriteTime = dev_write_time;
+ dev->DevReadBytes = dev_read_bytes;
+ dev->DevWriteBytes = dev_write_bytes;
}
continue;
}
db_close_database(jcr, jcr->db);
jcr->db = NULL;
}
+ if (jcr->db_batch) {
+ db_close_database(jcr, jcr->db_batch);
+ jcr->db_batch = NULL;
+ }
if (jcr->stime) {
Dmsg0(200, "Free JCR stime\n");
free_pool_memory(jcr->stime);
int get_num_drives_from_SD(UAContext *ua);
void update_slots(UAContext *ua);
+/* ua_update.c */
+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr);
+
/* ua_output.c */
void prtit(void *ctx, const char *msg);
int complete_jcr_for_job(JCR *jcr, JOB *job, POOL *pool);
JCR *jcr = NULL;
char JobName[MAX_NAME_LENGTH];
- if (!open_client_db(ua)) {
- return 1;
- }
-
for (i=1; i<ua->argc; i++) {
if (strcasecmp(ua->argk[i], NT_("jobid")) == 0) {
uint32_t JobId;
* throw up a list and ask the user to select one.
*/
char buf[1000];
+ int tjobs = 0; /* total # number jobs */
/* Count Jobs running */
foreach_jcr(jcr) {
if (jcr->JobId == 0) { /* this is us */
continue;
}
+ tjobs++; /* count of all jobs */
if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) {
continue; /* skip not authorized */
}
- njobs++;
+ njobs++; /* count of authorized jobs */
}
endeach_jcr(jcr);
- if (njobs == 0) {
- bsendmsg(ua, _("No Jobs running.\n"));
+ if (njobs == 0) { /* no authorized */
+ if (tjobs == 0) {
+ bsendmsg(ua, _("No Jobs running.\n"));
+ } else {
+ bsendmsg(ua, _("None of your jobs are running.\n"));
+ }
return 1;
}
+
start_prompt(ua, _("Select Job:\n"));
foreach_jcr(jcr) {
char ed1[50];
if (jcr->JobId == 0) { /* this is us */
continue;
}
+ if (!acl_access_ok(ua, Job_ACL, jcr->job->name())) {
+ continue; /* skip not authorized */
+ }
bsnprintf(buf, sizeof(buf), _("JobId=%s Job=%s"), edit_int64(jcr->JobId, ed1), jcr->Job);
add_prompt(ua, buf);
}
sscanf(buf, "JobId=%d Job=%127s", &njobs, JobName);
jcr = get_jcr_by_full_name(JobName);
if (!jcr) {
- bsendmsg(ua, _("Job %s not found.\n"), JobName);
+ bsendmsg(ua, _("Job \"%s\" not found.\n"), JobName);
return 1;
}
}
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2002-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2002-2007 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
}
pm_strcpy(jcr->VolumeName, mr->VolumeName);
generate_job_event(jcr, "VolumePurged");
+ /*
+ * If the RecyclePool is defined, move the volume there
+ */
+ if (mr->RecyclePoolId && mr->RecyclePoolId != mr->PoolId) {
+ POOL_DBR oldpr, newpr;
+ memset(&oldpr, 0, sizeof(POOL_DBR));
+ memset(&newpr, 0, sizeof(POOL_DBR));
+ newpr.PoolId = mr->RecyclePoolId;
+ oldpr.PoolId = mr->PoolId;
+ if (db_get_pool_record(jcr, ua->db, &oldpr) && db_get_pool_record(jcr, ua->db, &newpr)) {
+ update_vol_pool(ua, newpr.Name, mr, &oldpr);
+ } else {
+ bsendmsg(ua, "%s", db_strerror(ua->db));
+ }
+ }
/* Send message to Job report, if it is a *real* job */
if (jcr && jcr->JobId > 0) {
Jmsg1(jcr, M_INFO, 0, _("All records pruned from Volume \"%s\"; marking it \"Purged\"\n"),
}
/* Modify the Pool in which this Volume is located */
-static void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr)
+void update_vol_pool(UAContext *ua, char *val, MEDIA_DBR *mr, POOL_DBR *opr)
{
POOL_DBR pr;
POOLMEM *query;
}
if (!jcr->fn_printed) {
Jmsg(jcr, M_INFO, 0, "\n");
- Jmsg(jcr, M_INFO, 0, _("The following files are missing:\n"));
+ Jmsg(jcr, M_INFO, 0, _("The following files are in the Catalog but not on disk:\n"));
jcr->fn_printed = true;
}
Jmsg(jcr, M_INFO, 0, " %s%s\n", row[0]?row[0]:"", row[1]?row[1]:"");
-/*
- * Bacula File Daemon verify.c Verify files.
- *
- * Kern Sibbald, October MM
- *
- * Version $Id$
- *
- */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2007 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
*/
+/*
+ * Bacula File Daemon verify.c Verify files.
+ *
+ * Kern Sibbald, October MM
+ *
+ * Version $Id$
+ *
+ */
#include "bacula.h"
#include "filed.h"
Dmsg2(30, "FT_LNK saving: %s -> %s\n", ff_pkt->fname, ff_pkt->link);
break;
case FT_DIRBEGIN:
+ jcr->num_files_examined--; /* correct file count */
return 1; /* ignored */
case FT_DIREND:
Dmsg1(30, "FT_DIR saving: %s\n", ff_pkt->fname);
return 1;
case FT_NORECURSE:
Jmsg(jcr, M_SKIPPED, 1, _(" Recursion turned off. Directory skipped: %s\n"), ff_pkt->fname);
- return 1;
+ ff_pkt->type = FT_DIREND; /* directory entry was backed up */
+ break;
case FT_NOFSCHG:
Jmsg(jcr, M_SKIPPED, 1, _(" File system change prohibited. Directory skipped: %s\n"), ff_pkt->fname);
return 1;
bool cached_attribute; /* set if attribute is cached */
POOLMEM *attr; /* Attribute string from SD */
B_DB *db; /* database pointer */
+ B_DB *db_batch; /* database pointer for batch insert */
ATTR_DBR *ar; /* DB attribute record */
/* Daemon specific part of JCR */
void dlist::insert_before(void *item, void *where)
{
- dlink *where_link = (dlink *)((char *)where+loffset);
+ dlink *where_link = get_link(where);
set_next(item, where);
set_prev(item, where_link->prev);
void dlist::insert_after(void *item, void *where)
{
- dlink *where_link = (dlink *)((char *)where+loffset);
+ dlink *where_link = get_link(where);
set_next(item, where_link->next);
set_prev(item, where);
void dlist::remove(void *item)
{
void *xitem;
- dlink *ilink = (dlink *)(((char *)item)+loffset); /* item's link */
+ dlink *ilink = get_link(item); /* item's link */
if (item == head) {
head = ilink->next;
if (head) {
}
}
-void * dlist::next(const void *item) const
+void *dlist::next(void *item)
{
if (item == NULL) {
return head;
}
- return ((dlink *)(((char *)item)+loffset))->next;
+ return get_next(item);
}
-void * dlist::prev(const void *item) const
+void *dlist::prev(void *item)
{
if (item == NULL) {
return tail;
}
- return ((dlink *)(((char *)item)+loffset))->prev;
+ return get_prev(item);
}
void dlist::destroy()
{
for (void *n=head; n; ) {
- void *ni = ((dlink *)(((char *)n)+loffset))->next;
+ void *ni = get_next(n);
free(n);
n = ni;
}
* it.
*/
dlist chain;
- dlistString *node;
+ chain.append(new_dlistString("This is a long test line"));
#define CNT 26
printf("append %d dlistString items\n", CNT*CNT*CNT);
strcpy(buf, "ZZZ");
if ((count & 0x3FF) == 0) {
Dmsg1(000, "At %d\n", count);
}
- node = new_dlistString(buf);
- chain.append(node);
+ chain.append(new_dlistString(buf));
buf[1]--;
}
buf[1] = 'Z';
buf[0]--;
}
printf("dlistString items appended, walking chain\n");
+ dlistString *node;
foreach_dlist(node, &chain) {
printf("%s\n", node->c_str());
}
void remove(void *item);
bool empty() const;
int size() const;
- void *next(const void *item) const;
- void *prev(const void *item) const;
+ void *next(void *item);
+ void *prev(void *item);
void destroy();
void *first() const;
void *last() const;
inline dlink *dlist::get_link(void *item)
{
- return (dlink *)((dlink *)(((char *)item)+loffset));
+ return (dlink *)(((char *)item)+loffset);
}
<height>16777215</height>
</size>
</property>
+ <property name="focusPolicy" >
+ <enum>Qt::ClickFocus</enum>
+ </property>
<property name="windowTitle" >
<string>Page Selector</string>
</property>
<height>0</height>
</size>
</property>
+ <property name="focusPolicy" >
+ <enum>Qt::ClickFocus</enum>
+ </property>
<property name="currentIndex" >
<number>-1</number>
</property>
runDialog::runDialog(Console *console)
{
+ QDateTime dt;
+ m_console = console;
setupUi(this);
- storageCombo->addItems(console->storage_list);
+ jobCombo->addItems(console->job_list);
+ filesetCombo->addItems(console->fileset_list);
+ levelCombo->addItems(console->level_list);
+ clientCombo->addItems(console->client_list);
poolCombo->addItems(console->pool_list);
+ storageCombo->addItems(console->storage_list);
+ dateTimeEdit->setDateTime(dt.currentDateTime());
this->show();
}
void runDialog::accept()
{
- printf("Storage=%s\n"
- "Pool=%s\n",
- storageCombo->currentText().toUtf8().data(),
- poolCombo->currentText().toUtf8().data());
- this->hide();
- delete this;
-
-#ifdef xxx
- volume = get_entry_text(label_dialog, "label_entry_volume");
-
- slot = get_spin_text(label_dialog, "label_slot");
-
- if (!pool || !storage || !volume || !(*volume)) {
- set_status_ready();
- return;
- }
+ char cmd[1000];
+ this->hide();
+
bsnprintf(cmd, sizeof(cmd),
- "label volume=\"%s\" pool=\"%s\" storage=\"%s\" slot=%s\n",
- volume, pool, storage, slot);
- write_director(cmd);
- set_text(cmd, strlen(cmd));
-#endif
+ "run job=\"%s\" fileset=\"%s\" level=%s client=\"%s\" pool=\"%s\" "
+ "when=\"%s\" storage=\"%s\" priority=\"%d\" yes\n",
+ jobCombo->currentText().toUtf8().data(),
+ filesetCombo->currentText().toUtf8().data(),
+ levelCombo->currentText().toUtf8().data(),
+ clientCombo->currentText().toUtf8().data(),
+ poolCombo->currentText().toUtf8().data(),
+// dateTimeEdit->textFromDateTime(dateTimeEdit->dateTime()).toUtf8().data(),
+ "",
+ storageCombo->currentText().toUtf8().data(),
+ prioritySpin->value());
+
+// m_console->write(cmd);
+ m_console->set_text(cmd);
+ delete this;
}
+
void runDialog::reject()
{
- printf("Rejected\n");
+ mainWin->set_status(" Canceled");
this->hide();
delete this;
}
void reject();
private:
+ Console *m_console;
};
<property name="windowTitle" >
<string>Run Dialog</string>
</property>
- <layout class="QGridLayout" >
+ <layout class="QVBoxLayout" >
<property name="margin" >
<number>9</number>
</property>
<property name="spacing" >
<number>6</number>
</property>
- <item row="2" column="0" >
- <layout class="QGridLayout" >
+ <item>
+ <layout class="QHBoxLayout" >
<property name="margin" >
<number>0</number>
</property>
<property name="spacing" >
<number>6</number>
</property>
- <item row="3" column="1" >
- <widget class="QComboBox" name="filesetCombo" />
- </item>
- <item row="0" column="3" >
- <widget class="QComboBox" name="typeCombo" >
- <property name="sizePolicy" >
- <sizepolicy>
- <hsizetype>5</hsizetype>
- <vsizetype>0</vsizetype>
- <horstretch>0</horstretch>
- <verstretch>0</verstretch>
- </sizepolicy>
+ <item>
+ <spacer>
+ <property name="orientation" >
+ <enum>Qt::Horizontal</enum>
</property>
- <property name="minimumSize" >
+ <property name="sizeHint" >
<size>
- <width>150</width>
- <height>0</height>
+ <width>71</width>
+ <height>21</height>
</size>
</property>
- </widget>
+ </spacer>
</item>
- <item row="8" column="0" >
- <widget class="QLabel" name="label_5" >
- <property name="text" >
- <string>Bootstrap:</string>
- </property>
- <property name="openExternalLinks" >
- <bool>true</bool>
- </property>
- <property name="buddy" >
- <cstring>bootstrap</cstring>
+ <item>
+ <widget class="QLabel" name="run" >
+ <property name="maximumSize" >
+ <size>
+ <width>16777215</width>
+ <height>30</height>
+ </size>
</property>
- </widget>
- </item>
- <item row="1" column="0" >
- <widget class="QLabel" name="label_8" >
<property name="text" >
- <string>Client:</string>
- </property>
- <property name="buddy" >
- <cstring>clientCombo</cstring>
+ <string><h3>Run a Job</h3></string>
</property>
</widget>
</item>
- <item row="1" column="3" >
- <widget class="QSpinBox" name="prioritySpin" >
- <property name="maximum" >
- <number>10000</number>
+ <item>
+ <spacer>
+ <property name="orientation" >
+ <enum>Qt::Horizontal</enum>
</property>
- </widget>
+ <property name="sizeHint" >
+ <size>
+ <width>81</width>
+ <height>20</height>
+ </size>
+ </property>
+ </spacer>
</item>
+ </layout>
+ </item>
+ <item>
+ <spacer>
+ <property name="orientation" >
+ <enum>Qt::Vertical</enum>
+ </property>
+ <property name="sizeType" >
+ <enum>QSizePolicy::Maximum</enum>
+ </property>
+ <property name="sizeHint" >
+ <size>
+ <width>572</width>
+ <height>16</height>
+ </size>
+ </property>
+ </spacer>
+ </item>
+ <item>
+ <layout class="QGridLayout" >
+ <property name="margin" >
+ <number>0</number>
+ </property>
+ <property name="spacing" >
+ <number>6</number>
+ </property>
<item row="0" column="2" >
<widget class="QLabel" name="label_7" >
<property name="text" >
</property>
</widget>
</item>
- <item row="5" column="0" >
- <widget class="QLabel" name="label_2" >
+ <item row="6" column="0" >
+ <widget class="QLabel" name="label_10" >
<property name="text" >
- <string>Storage:</string>
+ <string>Messages:</string>
</property>
<property name="buddy" >
- <cstring>storageCombo</cstring>
+ <cstring>messagesCombo</cstring>
</property>
</widget>
</item>
- <item row="2" column="1" >
- <widget class="QComboBox" name="levelCombo" />
- </item>
- <item row="4" column="1" >
- <widget class="QComboBox" name="poolCombo" />
+ <item rowspan="6" row="2" column="3" >
+ <spacer>
+ <property name="orientation" >
+ <enum>Qt::Vertical</enum>
+ </property>
+ <property name="sizeHint" >
+ <size>
+ <width>20</width>
+ <height>171</height>
+ </size>
+ </property>
+ </spacer>
</item>
<item row="5" column="1" >
<widget class="QComboBox" name="storageCombo" />
</item>
- <item row="4" column="0" >
- <widget class="QLabel" name="label_3" >
- <property name="text" >
- <string>Pool:</string>
- </property>
- <property name="buddy" >
- <cstring>poolCombo</cstring>
- </property>
- </widget>
+ <item row="4" column="1" >
+ <widget class="QComboBox" name="poolCombo" />
</item>
<item row="7" column="0" >
<widget class="QLabel" name="label" >
</property>
</widget>
</item>
- <item row="2" column="0" >
- <widget class="QLabel" name="label_11" >
+ <item row="3" column="0" >
+ <widget class="QLabel" name="label_9" >
<property name="text" >
- <string>Level:</string>
+ <string>FileSet:</string>
</property>
<property name="buddy" >
- <cstring>levelCombo</cstring>
+ <cstring>filesetCombo</cstring>
</property>
</widget>
</item>
- <item rowspan="6" row="2" column="3" >
- <spacer>
- <property name="orientation" >
- <enum>Qt::Vertical</enum>
+ <item row="8" column="1" >
+ <widget class="QLineEdit" name="bootstrap" >
+ <property name="enabled" >
+ <bool>false</bool>
</property>
- <property name="sizeHint" >
+ <property name="minimumSize" >
<size>
- <width>20</width>
- <height>171</height>
+ <width>200</width>
+ <height>0</height>
</size>
</property>
- </spacer>
- </item>
- <item row="3" column="0" >
- <widget class="QLabel" name="label_9" >
- <property name="text" >
- <string>FileSet:</string>
- </property>
- <property name="buddy" >
- <cstring>filesetCombo</cstring>
+ <property name="readOnly" >
+ <bool>false</bool>
</property>
</widget>
</item>
<item row="7" column="1" >
- <widget class="QDateTimeEdit" name="dateTimeEdit" />
+ <widget class="QDateTimeEdit" name="dateTimeEdit" >
+ <property name="dateTime" >
+ <datetime>
+ <hour>0</hour>
+ <minute>2</minute>
+ <second>0</second>
+ <year>2000</year>
+ <month>1</month>
+ <day>1</day>
+ </datetime>
+ </property>
+ <property name="displayFormat" >
+ <string>yyyy-mm-dd hh:mm:ss</string>
+ </property>
+ <property name="calendarPopup" >
+ <bool>true</bool>
+ </property>
+ </widget>
+ </item>
+ <item row="1" column="3" >
+ <widget class="QSpinBox" name="prioritySpin" >
+ <property name="maximum" >
+ <number>10000</number>
+ </property>
+ <property name="minimum" >
+ <number>1</number>
+ </property>
+ <property name="value" >
+ <number>10</number>
+ </property>
+ </widget>
</item>
<item row="6" column="1" >
<widget class="QComboBox" name="messagesCombo" />
</item>
- <item row="1" column="2" >
- <widget class="QLabel" name="label_4" >
+ <item row="2" column="1" >
+ <widget class="QComboBox" name="levelCombo" />
+ </item>
+ <item row="3" column="1" >
+ <widget class="QComboBox" name="filesetCombo" />
+ </item>
+ <item row="4" column="0" >
+ <widget class="QLabel" name="label_3" >
<property name="text" >
- <string>Priority:</string>
+ <string>Pool:</string>
</property>
<property name="buddy" >
- <cstring>prioritySpin</cstring>
+ <cstring>poolCombo</cstring>
</property>
</widget>
</item>
- <item row="6" column="0" >
- <widget class="QLabel" name="label_10" >
+ <item row="8" column="0" >
+ <widget class="QLabel" name="label_5" >
<property name="text" >
- <string>Messages:</string>
+ <string>Bootstrap:</string>
+ </property>
+ <property name="openExternalLinks" >
+ <bool>true</bool>
</property>
<property name="buddy" >
- <cstring>messagesCombo</cstring>
+ <cstring>bootstrap</cstring>
</property>
</widget>
</item>
- <item row="8" column="1" >
- <widget class="QLineEdit" name="bootstrap" >
+ <item row="0" column="3" >
+ <widget class="QComboBox" name="typeCombo" >
+ <property name="sizePolicy" >
+ <sizepolicy>
+ <hsizetype>5</hsizetype>
+ <vsizetype>0</vsizetype>
+ <horstretch>0</horstretch>
+ <verstretch>0</verstretch>
+ </sizepolicy>
+ </property>
<property name="minimumSize" >
<size>
- <width>200</width>
+ <width>150</width>
<height>0</height>
</size>
</property>
</widget>
</item>
+ <item row="1" column="0" >
+ <widget class="QLabel" name="label_8" >
+ <property name="text" >
+ <string>Client:</string>
+ </property>
+ <property name="buddy" >
+ <cstring>clientCombo</cstring>
+ </property>
+ </widget>
+ </item>
<item row="0" column="0" >
<widget class="QLabel" name="label_6" >
<property name="text" >
</property>
</widget>
</item>
+ <item row="5" column="0" >
+ <widget class="QLabel" name="label_2" >
+ <property name="text" >
+ <string>Storage:</string>
+ </property>
+ <property name="buddy" >
+ <cstring>storageCombo</cstring>
+ </property>
+ </widget>
+ </item>
+ <item row="2" column="0" >
+ <widget class="QLabel" name="label_11" >
+ <property name="text" >
+ <string>Level:</string>
+ </property>
+ <property name="buddy" >
+ <cstring>levelCombo</cstring>
+ </property>
+ </widget>
+ </item>
+ <item row="1" column="2" >
+ <widget class="QLabel" name="label_4" >
+ <property name="text" >
+ <string>Priority:</string>
+ </property>
+ <property name="buddy" >
+ <cstring>prioritySpin</cstring>
+ </property>
+ </widget>
+ </item>
<item row="1" column="1" >
<widget class="QComboBox" name="clientCombo" />
</item>
</item>
</layout>
</item>
- <item row="3" column="0" >
+ <item>
<spacer>
<property name="orientation" >
<enum>Qt::Vertical</enum>
</property>
</spacer>
</item>
- <item row="4" column="0" >
+ <item>
<widget class="QDialogButtonBox" name="buttonBox" >
<property name="orientation" >
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
- <item row="1" column="0" >
- <spacer>
- <property name="orientation" >
- <enum>Qt::Vertical</enum>
- </property>
- <property name="sizeType" >
- <enum>QSizePolicy::Maximum</enum>
- </property>
- <property name="sizeHint" >
- <size>
- <width>572</width>
- <height>16</height>
- </size>
- </property>
- </spacer>
- </item>
- <item row="0" column="0" >
- <layout class="QHBoxLayout" >
- <property name="margin" >
- <number>0</number>
- </property>
- <property name="spacing" >
- <number>6</number>
- </property>
- <item>
- <spacer>
- <property name="orientation" >
- <enum>Qt::Horizontal</enum>
- </property>
- <property name="sizeHint" >
- <size>
- <width>71</width>
- <height>21</height>
- </size>
- </property>
- </spacer>
- </item>
- <item>
- <widget class="QLabel" name="run" >
- <property name="maximumSize" >
- <size>
- <width>16777215</width>
- <height>30</height>
- </size>
- </property>
- <property name="text" >
- <string><h3>Run a Job</h3></string>
- </property>
- </widget>
- </item>
- <item>
- <spacer>
- <property name="orientation" >
- <enum>Qt::Horizontal</enum>
- </property>
- <property name="sizeHint" >
- <size>
- <width>81</width>
- <height>20</height>
- </size>
- </property>
- </spacer>
- </item>
- </layout>
- </item>
</layout>
</widget>
<resources/>
if (dev->can_read()) {
dev->clear_read(); /* clear read bit */
-
- /******FIXME**** send read volume usage statistics to director */
+ Dmsg0(100, "dir_update_vol_info. Release0\n");
+ dir_update_volume_info(dcr, false); /* send Volume info to Director */
} else if (dev->num_writers > 0) {
/*
/* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */
for (i=0; i < 6; i++) {
do {
- stat = tape_read(dev->fd, label, sizeof(label));
+ stat = dev->read(label, sizeof(label));
} while (stat == -1 && errno == EINTR);
if (stat < 0) {
berrno be;
} else {
label[79] = '3'; /* ANSI label flag */
}
- stat = tape_write(dev->fd, label, sizeof(label));
+ stat = dev->write(label, sizeof(label));
if (stat != sizeof(label)) {
berrno be;
Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. ERR=%s\n"),
* This could come at the end of a tape, ignore
* EOT errors.
*/
- stat = tape_write(dev->fd, label, sizeof(label));
+ stat = dev->write(label, sizeof(label));
if (stat != sizeof(label)) {
berrno be;
if (stat == -1) {
label[4] = 'V';
ascii_to_ebcdic(label, label, sizeof(label));
}
- stat = tape_write(dev->fd, label, sizeof(label));
+ stat = dev->write(label, sizeof(label));
if (stat != sizeof(label)) {
berrno be;
if (stat == -1) {
Pmsg0(000, _("NULL Volume name. This shouldn't happen!!!\n"));
return false;
}
- if (dev->can_read()) {
- Jmsg0(jcr, M_FATAL, 0, _("Attempt to update_volume_info in read mode!!!\n"));
- Pmsg0(000, _("Attempt to update_volume_info in read mode!!!\n"));
- return false;
- }
/* Lock during Volume update */
P(vol_info_mutex);
bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */
dev->clrerror(-1);
}
- if (dev->is_tape()) {
- stat = tape_write(dev->fd, block->buf, (size_t)wlen);
- } else {
- stat = write(dev->fd, block->buf, (size_t)wlen);
- }
+ stat = dev->write(block->buf, (size_t)wlen);
+
} while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3);
#ifdef DEBUG_BLOCK_ZEROING
bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */
dev->clrerror(-1);
}
- if (dev->is_tape()) {
- stat = tape_read(dev->fd, block->buf, (size_t)block->buf_len);
- } else {
- stat = read(dev->fd, block->buf, (size_t)block->buf_len);
- }
+ stat = dev->read(block->buf, (size_t)block->buf_len);
+
} while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3);
if (stat < 0) {
berrno be;
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2007 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
mt_com.mt_count = 1;
while (num-- && !at_eot()) {
Dmsg0(100, "Doing read before fsf\n");
- if ((stat = tape_read(fd, (char *)rbuf, rbuf_len)) < 0) {
+ if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) {
if (errno == ENOMEM) { /* tape record exceeds buf len */
stat = rbuf_len; /* This is OK */
/*
}
}
+/* return the last timer interval (ms) */
+int DEVICE::get_timer_count()
+{
+ uint64_t old = last_timer;
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ last_timer = tv.tv_usec + tv.tv_sec * 1000000;
+
+ return last_timer - old;
+}
+
+/* read from fd */
+ssize_t DEVICE::read(void *buf, size_t len)
+{
+ ssize_t read_len ;
+
+// get_timer_count();
+
+ if (this->is_tape()) {
+ read_len = tape_read(fd, buf, len);
+ } else {
+ read_len = ::read(fd, buf, len);
+ }
+
+// last_tick = get_timer_count();
+
+ DevReadTime += last_tick;
+ VolCatInfo.VolReadTime += last_tick;
+
+ if (read_len > 0) { /* skip error */
+ DevReadBytes += read_len;
+ VolCatInfo.VolCatRBytes += read_len;
+ }
+
+ return read_len;
+}
+
+/* write to fd */
+ssize_t DEVICE::write(const void *buf, size_t len)
+{
+ ssize_t write_len ;
+
+// get_timer_count();
+
+ if (this->is_tape()) {
+ write_len = tape_write(fd, buf, len);
+ } else {
+ write_len = ::write(fd, buf, len);
+ }
+
+// last_tick = get_timer_count();
+
+ DevWriteTime += last_tick;
+ VolCatInfo.VolWriteTime += last_tick;
+
+ if (write_len > 0) { /* skip error */
+ DevWriteBytes += write_len;
+ VolCatInfo.VolCatBytes += write_len;
+ }
+
+ return write_len;
+}
/* Return the resource name for the device */
const char *DEVICE::name() const
int rem_wait_sec;
int num_wait;
+ uint64_t last_timer; /* used by read/write/seek to get stats (usec) */
+ int last_tick; /* contains last read/write time (usec) */
+
+ uint64_t DevReadTime;
+ uint64_t DevWriteTime;
+ uint64_t DevWriteBytes;
+ uint64_t DevReadBytes;
+
/* Methods */
+ int get_timer_count(); /* return the last timer interval (ms) */
+
int has_cap(int cap) const { return capabilities & cap; }
void clear_cap(int cap) { capabilities &= ~cap; }
void set_cap(int cap) { capabilities |= cap; }
bool truncate(DCR *dcr); /* in dev.c */
int open(DCR *dcr, int mode); /* in dev.c */
void term(void); /* in dev.c */
+ ssize_t read(void *buf, size_t len); /* in dev.c */
+ ssize_t write(const void *buf, size_t len); /* in dev.c */
bool rewind(DCR *dcr); /* in dev.c */
bool mount(int timeout); /* in dev.c */
bool unmount(int timeout); /* in dev.c */
*/
#undef VERSION
-#define VERSION "2.1.3"
-#define BDATE "09 February 2007"
-#define LSMDATE "09Feb07"
+#define VERSION "2.1.4"
+#define BDATE "10 February 2007"
+#define LSMDATE "10Feb07"
#define PROG_COPYRIGHT "Copyright (C) %d-2007 Free Software Foundation Europe e.V.\n"
#define BYEAR "2007" /* year for copyright messages in progs */
Technical notes on version 2.1
General: a
+10Feb07
+kes Apply Eric's scratch patch that moves a purged Volume to
+ the RecyclePool. Question: how is RecyclePool set? what
+ happens to the ScratchPool?
+kes Apply Eric's media patch that collects read/write media
+ times as well as byte counts. This patch requires a
+ simultaneous upgrade of the DIR and SD. Note, there
+ should be some way to turn of timing. I'm not sure
+ times are in Bacula units.
+kes Apply Eric's batch-insert patch.
09Feb07
kes Update projects list.
08Feb07