--- /dev/null
+diff -Naur cvs/src/cats/cats.h my/src/cats/cats.h
+--- cvs/src/cats/cats.h 2006-12-06 15:11:53.000000000 +0100
++++ my/src/cats/cats.h 2006-12-14 21:11:40.000000000 +0100
+@@ -141,6 +141,7 @@
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file/path name */
++ POOLMEM *esc_name2; /* Escaped file/path name */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+ };
+@@ -170,8 +171,14 @@
+ #define sql_fetch_field(x) my_sqlite_fetch_field(x)
+ #define sql_num_fields(x) ((x)->ncolumn)
+ #define SQL_ROW char**
+-
+-
++#define sql_batch_start(x) db_batch_start(x)
++#define sql_batch_end(x,y) db_batch_end(x,y)
++#define sql_batch_insert(x,y) db_batch_insert(x,y)
++#define sql_batch_lock_path_query my_sqlite_batch_lock_query
++#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
++#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
++#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
++#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
+
+ /* In cats/sqlite.c */
+ void my_sqlite_free_table(B_DB *mdb);
+@@ -179,6 +186,10 @@
+ int my_sqlite_query(B_DB *mdb, const char *cmd);
+ void my_sqlite_field_seek(B_DB *mdb, int field);
+ SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
++extern char* my_sqlite_batch_lock_query;
++extern char* my_sqlite_batch_unlock_query;
++extern char* my_sqlite_batch_fill_filename_query;
++extern char* my_sqlite_batch_fill_path_query;
+
+
+ #else
+@@ -249,6 +260,7 @@
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file/path name */
++ POOLMEM *esc_name2; /* Escaped file/path name */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+ };
+@@ -289,8 +301,14 @@
+ #define sql_fetch_field(x) my_sqlite_fetch_field(x)
+ #define sql_num_fields(x) ((x)->ncolumn)
+ #define SQL_ROW char**
+-
+-
++#define sql_batch_start(x) db_batch_start(x)
++#define sql_batch_end(x,y) db_batch_end(x,y)
++#define sql_batch_insert(x,y) db_batch_insert(x,y)
++#define sql_batch_lock_path_query my_sqlite_batch_lock_query
++#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
++#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
++#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
++#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
+
+ /* In cats/sqlite.c */
+ void my_sqlite_free_table(B_DB *mdb);
+@@ -298,6 +316,10 @@
+ int my_sqlite_query(B_DB *mdb, const char *cmd);
+ void my_sqlite_field_seek(B_DB *mdb, int field);
+ SQL_FIELD *my_sqlite_fetch_field(B_DB *mdb);
++extern char* my_sqlite_batch_lock_query;
++extern char* my_sqlite_batch_unlock_query;
++extern char* my_sqlite_batch_fill_filename_query;
++extern char* my_sqlite_batch_fill_path_query;
+
+
+ #else
+@@ -341,6 +363,7 @@
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file/path name */
++ POOLMEM *esc_name2; /* Escaped file/path name */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+ };
+@@ -362,9 +385,25 @@
+ #define sql_field_seek(x, y) mysql_field_seek((x)->result, (y))
+ #define sql_fetch_field(x) mysql_fetch_field((x)->result)
+ #define sql_num_fields(x) (int)mysql_num_fields((x)->result)
++#define sql_batch_start(x) db_batch_start(x)
++#define sql_batch_end(x,y) db_batch_end(x,y)
++#define sql_batch_insert(x,y) db_batch_insert(x,y)
++#define sql_batch_lock_path_query my_mysql_batch_lock_path_query
++#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query
++#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query
++#define sql_batch_fill_filename_query my_mysql_batch_fill_filename_query
++#define sql_batch_fill_path_query my_mysql_batch_fill_path_query
+ #define SQL_ROW MYSQL_ROW
+ #define SQL_FIELD MYSQL_FIELD
+
++
++int my_mysql_batch_start(B_DB *mdb);
++extern char* my_mysql_batch_lock_path_query;
++extern char* my_mysql_batch_lock_filename_query;
++extern char* my_mysql_batch_unlock_tables_query;
++extern char* my_mysql_batch_fill_filename_query;
++extern char* my_mysql_batch_fill_path_query;
++
+ #else
+
+ #ifdef HAVE_POSTGRESQL
+@@ -425,6 +464,7 @@
+ POOLMEM *fname; /* Filename only */
+ POOLMEM *path; /* Path only */
+ POOLMEM *esc_name; /* Escaped file/path name */
++ POOLMEM *esc_name2; /* Escaped file/path name */
+ int fnl; /* file name length */
+ int pnl; /* path name length */
+ };
+@@ -436,7 +476,19 @@
+ int my_postgresql_currval (B_DB *mdb, char *table_name);
+ void my_postgresql_field_seek (B_DB *mdb, int row);
+ POSTGRESQL_FIELD * my_postgresql_fetch_field(B_DB *mdb);
+-
++int my_postgresql_lock_table(B_DB *mdb, const char *table);
++int my_postgresql_unlock_table(B_DB *mdb);
++int my_postgresql_batch_start(B_DB *mdb);
++int my_postgresql_batch_end(B_DB *mdb, const char *error);
++typedef struct ATTR_DBR ATTR_DBR;
++int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar);
++char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
++
++extern char* my_pg_batch_lock_path_query;
++extern char* my_pg_batch_lock_filename_query;
++extern char* my_pg_batch_unlock_tables_query;
++extern char* my_pg_batch_fill_filename_query;
++extern char* my_pg_batch_fill_path_query;
+
+ /* "Generic" names for easier conversion */
+ #define sql_store_result(x) ((x)->result)
+@@ -452,6 +504,17 @@
+ #define sql_field_seek(x, y) my_postgresql_field_seek((x), (y))
+ #define sql_fetch_field(x) my_postgresql_fetch_field(x)
+ #define sql_num_fields(x) ((x)->num_fields)
++#define sql_batch_start(x) my_postgresql_batch_start(x)
++#define sql_batch_end(x,y) my_postgresql_batch_end(x,y)
++#define sql_batch_insert(x,y) my_postgresql_batch_insert(x,y)
++#define sql_lock_table(x,y) my_postgresql_lock_table(x, y)
++#define sql_unlock_table(x,y) my_postgresql_unlock_table(x)
++#define sql_batch_lock_path_query my_pg_batch_lock_path_query
++#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query
++#define sql_batch_unlock_tables_query my_pg_batch_unlock_tables_query
++#define sql_batch_fill_filename_query my_pg_batch_fill_filename_query
++#define sql_batch_fill_path_query my_pg_batch_fill_path_query
++
+ #define SQL_ROW POSTGRESQL_ROW
+ #define SQL_FIELD POSTGRESQL_FIELD
+
+diff -Naur cvs/src/cats/mysql.c my/src/cats/mysql.c
+--- cvs/src/cats/mysql.c 2006-12-09 14:41:50.000000000 +0100
++++ my/src/cats/mysql.c 2006-12-16 19:18:17.000000000 +0100
+@@ -121,6 +121,7 @@
+ mdb->fname = get_pool_memory(PM_FNAME);
+ mdb->path = get_pool_memory(PM_FNAME);
+ mdb->esc_name = get_pool_memory(PM_FNAME);
++ mdb->esc_name2 = get_pool_memory(PM_FNAME);
+ qinsert(&db_list, &mdb->bq); /* put db in list */
+ V(mutex);
+ return mdb;
+@@ -231,6 +232,7 @@
+ free_pool_memory(mdb->fname);
+ free_pool_memory(mdb->path);
+ free_pool_memory(mdb->esc_name);
++ free_pool_memory(mdb->esc_name2);
+ if (mdb->db_name) {
+ free(mdb->db_name);
+ }
+@@ -372,4 +374,34 @@
+
+ }
+
++char *my_mysql_batch_lock_path_query = "LOCK TABLES Path write, "
++ " batch write, "
++ " Path as p write ";
++
++
++char *my_mysql_batch_lock_filename_query = "LOCK TABLES Filename write, "
++ " batch write, "
++ " Filename as f write ";
++
++char *my_mysql_batch_unlock_tables_query = "UNLOCK TABLES";
++
++char *my_mysql_batch_fill_path_query = "INSERT IGNORE INTO Path (Path) "
++ " SELECT a.Path FROM "
++ " (SELECT DISTINCT Path "
++ " FROM batch) AS a "
++ " WHERE NOT EXISTS "
++ " (SELECT Path "
++ " FROM Path AS p "
++ " WHERE p.Path = a.Path) ";
++
++char *my_mysql_batch_fill_filename_query = "INSERT IGNORE INTO Filename (Name)"
++ " SELECT a.Name FROM "
++ " (SELECT DISTINCT Name "
++ " FROM batch) AS a "
++ " WHERE NOT EXISTS "
++ " (SELECT Name "
++ " FROM Filename AS f "
++ " WHERE f.Name = a.Name) ";
++
+ #endif /* HAVE_MYSQL */
++
+diff -Naur cvs/src/cats/postgresql.c my/src/cats/postgresql.c
+--- cvs/src/cats/postgresql.c 2006-12-06 15:11:53.000000000 +0100
++++ my/src/cats/postgresql.c 2006-12-14 20:28:28.000000000 +0100
+@@ -124,6 +124,7 @@
+ mdb->fname = get_pool_memory(PM_FNAME);
+ mdb->path = get_pool_memory(PM_FNAME);
+ mdb->esc_name = get_pool_memory(PM_FNAME);
++ mdb->esc_name2 = get_pool_memory(PM_FNAME);
+ mdb->allow_transactions = mult_db_connections;
+ qinsert(&db_list, &mdb->bq); /* put db in list */
+ V(mutex);
+@@ -228,6 +229,7 @@
+ free_pool_memory(mdb->fname);
+ free_pool_memory(mdb->path);
+ free_pool_memory(mdb->esc_name);
++ free_pool_memory(mdb->esc_name2);
+ if (mdb->db_name) {
+ free(mdb->db_name);
+ }
+@@ -538,5 +540,201 @@
+ return id;
+ }
+
++int my_postgresql_lock_table(B_DB *mdb, const char *table)
++{
++ my_postgresql_query(mdb, "BEGIN");
++ Mmsg(mdb->cmd, "LOCK TABLE %s IN SHARE ROW EXCLUSIVE MODE", table);
++ return my_postgresql_query(mdb, mdb->cmd);
++}
++
++int my_postgresql_unlock_table(B_DB *mdb)
++{
++ return my_postgresql_query(mdb, "COMMIT");
++}
++
++int my_postgresql_batch_start(B_DB *mdb)
++{
++ Dmsg0(500, "my_postgresql_batch_start started\n");
++
++ if (my_postgresql_query(mdb,
++ " CREATE TEMPORARY TABLE batch "
++ " (fileindex int, "
++ " jobid int, "
++ " path varchar, "
++ " name varchar, "
++ " lstat varchar, "
++ " md5 varchar)") == 1)
++ {
++ Dmsg0(500, "my_postgresql_batch_start failed\n");
++ return 1;
++ }
++
++ // We are starting a new query. reset everything.
++ mdb->num_rows = -1;
++ mdb->row_number = -1;
++ mdb->field_number = -1;
++
++ if (mdb->result != NULL) {
++ my_postgresql_free_result(mdb);
++ }
++
++ mdb->result = PQexec(mdb->db, "COPY batch FROM STDIN");
++ mdb->status = PQresultStatus(mdb->result);
++ if (mdb->status == PGRES_COPY_IN) {
++ // how many fields in the set?
++ mdb->num_fields = (int) PQnfields(mdb->result);
++ mdb->num_rows = 0;
++ mdb->status = 0;
++ } else {
++ Dmsg0(500, "we failed\n");
++ mdb->status = 1;
++ }
++
++ Dmsg0(500, "my_postgresql_batch_start finishing\n");
++
++ return mdb->status;
++}
++
++/* set error to something to abort operation */
++int my_postgresql_batch_end(B_DB *mdb, const char *error)
++{
++ int res;
++ int count=30;
++ Dmsg0(500, "my_postgresql_batch_end started\n");
++
++ if (!mdb) { /* no files ? */
++ return 0;
++ }
++
++ do {
++ res = PQputCopyEnd(mdb->db, error);
++ } while (res == 0 && --count > 0);
++
++ if (res == 1) {
++ Dmsg0(500, "ok\n");
++ mdb->status = 0;
++ }
++
++ if (res <= 0) {
++ Dmsg0(500, "we failed\n");
++ mdb->status = 1;
++ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
++ }
++
++ Dmsg0(500, "my_postgresql_batch_end finishing\n");
++
++ return mdb->status;
++}
++
++int my_postgresql_batch_insert(B_DB *mdb, ATTR_DBR *ar)
++{
++ int res;
++ int count=30;
++ size_t len;
++ char *digest;
++
++ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
++ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
++
++ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
++ my_postgresql_copy_escape(mdb->esc_name2, mdb->path, mdb->pnl);
++
++ if (ar->Digest == NULL || ar->Digest[0] == 0) {
++ digest = "0";
++ } else {
++ digest = ar->Digest;
++ }
++
++ len = Mmsg(mdb->cmd, "%u\t%u\t%s\t%s\t%s\t%s\n",
++ ar->FileIndex, ar->JobId, mdb->path,
++ mdb->fname, ar->attr, digest);
++
++ do {
++ res = PQputCopyData(mdb->db,
++ mdb->cmd,
++ len);
++ } while (res == 0 && --count > 0);
++
++ if (res == 1) {
++ Dmsg0(500, "ok\n");
++ mdb->changes++;
++ mdb->status = 0;
++ }
++
++ if (res <= 0) {
++ Dmsg0(500, "we failed\n");
++ mdb->status = 1;
++ Mmsg1(&mdb->errmsg, _("error ending batch mode: %s\n"), PQerrorMessage(mdb->db));
++ }
++
++ Dmsg0(500, "my_postgresql_batch_insert finishing\n");
++
++ return mdb->status;
++}
++
++/*
++ * Escape strings so that PostgreSQL is happy on COPY
++ *
++ * NOTE! len is the length of the old string. Your new
++ * string must be long enough (max 2*old+1) to hold
++ * the escaped output.
++ */
++char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
++{
++ /* we have to escape \t, \n, \r, \ */
++ char c = '\0' ;
++
++ while (len > 0 && *src) {
++ switch (*src) {
++ case '\n':
++ c = 'n';
++ break;
++ case '\\':
++ c = '\\';
++ break;
++ case '\t':
++ c = 't';
++ break;
++ case '\r':
++ c = 'r';
++ break;
++ default:
++ c = '\0' ;
++ }
++
++ if (c) {
++ *dest = '\\';
++ dest++;
++ *dest = c;
++ } else {
++ *dest = *src;
++ }
++
++ len--;
++ src++;
++ dest++;
++ }
++
++ *dest = '\0';
++ return dest;
++}
++
++char *my_pg_batch_lock_path_query = "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
++
++
++char *my_pg_batch_lock_filename_query = "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
++
++char *my_pg_batch_unlock_tables_query = "COMMIT";
++
++char *my_pg_batch_fill_path_query = "INSERT INTO Path (Path) "
++ " SELECT a.Path FROM "
++ " (SELECT DISTINCT Path FROM batch) AS a "
++ " WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
++
+
++char *my_pg_batch_fill_filename_query = "INSERT INTO Filename (Name) "
++ " SELECT a.Name FROM "
++ " (SELECT DISTINCT Name FROM batch) as a "
++ " WHERE NOT EXISTS "
++ " (SELECT Name FROM Filename WHERE Name = a.Name)";
+ #endif /* HAVE_POSTGRESQL */
+diff -Naur cvs/src/cats/protos.h my/src/cats/protos.h
+--- cvs/src/cats/protos.h 2006-12-06 15:11:53.000000000 +0100
++++ my/src/cats/protos.h 2006-12-13 19:03:46.000000000 +0100
+@@ -67,6 +67,10 @@
+ bool db_create_device_record(JCR *jcr, B_DB *mdb, DEVICE_DBR *dr);
+ bool db_create_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr);
+ bool db_create_mediatype_record(JCR *jcr, B_DB *mdb, MEDIATYPE_DBR *mr);
++int db_create_batch_file_record(JCR *jcr);
++int db_batch_start(B_DB *mdb);
++int db_batch_end(B_DB *mdb, const char *error);
++int db_batch_insert(B_DB *mdb, ATTR_DBR *ar);
+
+ /* delete.c */
+ int db_delete_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pool_dbr);
+diff -Naur cvs/src/cats/sql_create.c my/src/cats/sql_create.c
+--- cvs/src/cats/sql_create.c 2006-12-06 15:11:53.000000000 +0100
++++ my/src/cats/sql_create.c 2006-12-14 22:06:41.000000000 +0100
+@@ -664,9 +664,207 @@
+ * };
+ */
+
++/* All db_batch_* functions are used to do bulk batch insert in File/Filename/Path
++ * tables. This code can be activated by adding "#define HAVE_BATCH_FILE_INSERT 1"
++ * in baconfig.h
++ *
++ * To sum up :
++ * - bulk load a temp table
++ * - insert missing filenames into filename with a single query (lock filenames
++ * - table before that to avoid possible duplicate inserts with concurrent update)
++ * - insert missing paths into path with another single query
++ * - then insert the join between the temp, filename and path tables into file.
++ */
++
++int db_batch_start(B_DB *mdb)
++{
++ return sql_query(mdb,
++ " CREATE TEMPORARY TABLE batch "
++ " (fileindex integer, "
++ " jobid integer, "
++ " path blob, "
++ " name blob, "
++ " lstat tinyblob, "
++ " md5 tinyblob) ");
++}
++
++int db_batch_insert(B_DB *mdb, ATTR_DBR *ar)
++{
++ size_t len;
++ char *digest;
++
++ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
++ db_escape_string(mdb->esc_name, mdb->fname, mdb->fnl);
++
++ mdb->esc_name2 = check_pool_memory_size(mdb->esc_name2, mdb->pnl*2+1);
++ db_escape_string(mdb->esc_name2, mdb->path, mdb->pnl);
++
++ if (ar->Digest == NULL || ar->Digest[0] == 0) {
++ digest = "0";
++ } else {
++ digest = ar->Digest;
++ }
++
++ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%u,'%s','%s','%s','%s')",
++ ar->FileIndex, ar->JobId, mdb->path,
++ mdb->fname, ar->attr, digest);
++
++ sql_query(mdb, mdb->cmd);
++
++ return mdb->status;
++}
++
++/* set error to something to abort operation */
++int db_batch_end(B_DB *mdb, const char *error)
++{
++
++ Dmsg0(50, "db_batch_end started");
++
++ if (mdb) {
++ mdb->status = 0;
++ return mdb->status;
++ }
++ return 0;
++}
++
++int db_create_batch_file_record(JCR *jcr)
++{
++ Dmsg0(50,"db_create_file_record : no files");
++
++ if (!jcr->db_batch) { /* no files to backup ? */
++ Dmsg0(50,"db_create_file_record : no files\n");
++ return 0;
++ }
++
++ if (sql_batch_end(jcr->db_batch, NULL)) {
++ Jmsg(jcr, M_FATAL, 0, "Bad batch end %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ /* we have to lock tables */
++ if (sql_query(jcr->db_batch, sql_batch_lock_path_query))
++ {
++ Jmsg(jcr, M_FATAL, 0, "Can't lock Path table %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ if (sql_query(jcr->db_batch, sql_batch_fill_path_query))
++ {
++ Jmsg(jcr, M_FATAL, 0, "Can't fill Path table %s\n",jcr->db_batch->errmsg);
++ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
++ return 1;
++ }
++
++ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query))
++ {
++ Jmsg(jcr, M_FATAL, 0, "Can't unlock Path table %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ /* we have to lock tables */
++ if (sql_query(jcr->db_batch, sql_batch_lock_filename_query))
++ {
++ Jmsg(jcr, M_FATAL, 0, "Can't lock Filename table %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ if (sql_query(jcr->db_batch, sql_batch_fill_filename_query))
++ {
++ Jmsg(jcr,M_FATAL,0,"Can't fill Filename table %s\n",jcr->db_batch->errmsg);
++ sql_query(jcr->db_batch, sql_batch_unlock_tables_query);
++ return 1;
++ }
++
++ if (sql_query(jcr->db_batch, sql_batch_unlock_tables_query)) {
++ Jmsg(jcr, M_FATAL, 0, "Can't unlock Filename table %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ if (sql_query(jcr->db_batch,
++ " INSERT INTO File (FileIndex, JobId, PathId, FilenameId, LStat, MD5)"
++ " SELECT batch.FileIndex, batch.JobId, Path.PathId, "
++ " Filename.FilenameId,batch.LStat, batch.MD5 "
++ " FROM batch "
++ " JOIN Path ON (batch.Path = Path.Path) "
++ " JOIN Filename ON (batch.Name = Filename.Name) "))
++ {
++ Jmsg(jcr, M_FATAL, 0, "Can't fill File table %s\n", jcr->db_batch->errmsg);
++ return 1;
++ }
++
++ sql_query(jcr->db_batch, "DROP TABLE batch");
++
++ return 0;
++}
++
++#ifdef HAVE_BATCH_FILE_INSERT
++/*
++ * Create File record in B_DB
++ *
++ * In order to reduce database size, we store the File attributes,
++ * the FileName, and the Path separately. In principle, there
++ * is a single FileName record and a single Path record, no matter
++ * how many times it occurs. This is this subroutine, we separate
++ * the file and the path and fill temporary tables with this three records.
++ */
++int db_create_file_attributes_record(JCR *jcr, B_DB *_mdb, ATTR_DBR *ar)
++{
++
++ Dmsg1(dbglevel, "Fname=%s\n", ar->fname);
++ Dmsg0(dbglevel, "put_file_into_catalog\n");
++
++ if (!jcr->db_batch) {
++ jcr->db_batch = db_init_database(jcr,
++ jcr->db->db_name,
++ jcr->db->db_user,
++ jcr->db->db_password,
++ jcr->db->db_address,
++ jcr->db->db_port,
++ jcr->db->db_socket,
++ 1 /* multi_db = true */);
++
++ if (!jcr->db_batch || !db_open_database(jcr, jcr->db_batch)) {
++ Jmsg(jcr, M_FATAL, 0, _("Could not open database \"%s\".\n"),
++ jcr->db->db_name);
++ if (jcr->db_batch) {
++ Jmsg(jcr, M_FATAL, 0, "%s", db_strerror(jcr->db_batch));
++ }
++ return 0;
++ }
++
++ sql_batch_start(jcr->db_batch);
++ }
++
++ B_DB *mdb = jcr->db_batch;
++
++ /*
++ * Make sure we have an acceptable attributes record.
++ */
++ if (!(ar->Stream == STREAM_UNIX_ATTRIBUTES ||
++ ar->Stream == STREAM_UNIX_ATTRIBUTES_EX)) {
++ Mmsg1(&mdb->errmsg, _("Attempt to put non-attributes into catalog. Stream=%d\n"),
++ ar->Stream);
++ Jmsg(jcr, M_ERROR, 0, "%s", mdb->errmsg);
++ return 0;
++ }
++
++ split_path_and_file(jcr, mdb, ar->fname);
+
+
+ /*
++ if (jcr->changes > 100000) {
++ sql_batch_end(mdb, NULL);
++ sql_batch_start(mdb);
++ jcr->changes = 0;
++ }
++*/
++
++ return (sql_batch_insert(mdb, ar) == 0);
++}
++
++#else /* ! HAVE_BATCH_FILE_INSERT */
++
++/*
+ * Create File record in B_DB
+ *
+ * In order to reduce database size, we store the File attributes,
+@@ -721,6 +919,8 @@
+ return 0;
+ }
+
++#endif /* ! HAVE_BATCH_FILE_INSERT */
++
+ /*
+ * This is the master File entry containing the attributes.
+ * The filename and path records have already been created.
+diff -Naur cvs/src/cats/sqlite.c my/src/cats/sqlite.c
+--- cvs/src/cats/sqlite.c 2006-12-06 15:11:53.000000000 +0100
++++ my/src/cats/sqlite.c 2006-12-14 22:30:35.000000000 +0100
+@@ -108,6 +108,7 @@
+ mdb->fname = get_pool_memory(PM_FNAME);
+ mdb->path = get_pool_memory(PM_FNAME);
+ mdb->esc_name = get_pool_memory(PM_FNAME);
++ mdb->esc_name2 = get_pool_memory(PM_FNAME);
+ mdb->allow_transactions = mult_db_connections;
+ qinsert(&db_list, &mdb->bq); /* put db in list */
+ V(mutex);
+@@ -213,6 +214,7 @@
+ free_pool_memory(mdb->fname);
+ free_pool_memory(mdb->path);
+ free_pool_memory(mdb->esc_name);
++ free_pool_memory(mdb->esc_name2);
+ if (mdb->db_name) {
+ free(mdb->db_name);
+ }
+@@ -433,4 +435,16 @@
+ return mdb->fields[mdb->field++];
+ }
+
++char *my_sqlite_batch_lock_query = "BEGIN";
++char *my_sqlite_batch_unlock_query = "COMMIT";
++char *my_sqlite_batch_fill_path_query = "INSERT INTO Path (Path) "
++ " SELECT DISTINCT Path FROM batch "
++ " EXCEPT SELECT Path FROM Path ";
++
++char *my_sqlite_batch_fill_filename_query = "INSERT INTO Filename (Name) "
++ " SELECT DISTINCT Name FROM batch "
++ " EXCEPT SELECT Name FROM Filename ";
++
++
++
+ #endif /* HAVE_SQLITE */
+diff -Naur cvs/src/dird/backup.c my/src/dird/backup.c
+--- cvs/src/dird/backup.c 2006-12-13 11:57:52.000000000 +0100
++++ my/src/dird/backup.c 2006-12-13 19:03:46.000000000 +0100
+@@ -233,6 +233,9 @@
+
+ /* Pickup Job termination data */
+ stat = wait_for_job_termination(jcr);
++#ifdef HAVE_BATCH_FILE_INSERT
++ db_create_batch_file_record(jcr); /* used by bulk batch file insert */
++#endif
+ if (stat == JS_Terminated) {
+ backup_cleanup(jcr, stat);
+ return true;
+diff -Naur cvs/src/dird/jobq.c my/src/dird/jobq.c
+--- cvs/src/dird/jobq.c 2006-11-24 11:29:37.000000000 +0100
++++ my/src/dird/jobq.c 2006-12-13 19:03:46.000000000 +0100
+@@ -563,6 +563,10 @@
+ db_close_database(jcr, jcr->db);
+ jcr->db = NULL;
+ }
++ if (jcr->db_batch) {
++ db_close_database(jcr, jcr->db_batch);
++ jcr->db_batch = NULL;
++ }
+ Dmsg2(2300, "====== Termination job=%d use_cnt=%d\n", jcr->JobId, jcr->use_count());
+ jcr->SDJobStatus = 0;
+ V(jq->mutex); /* release internal lock */
+diff -Naur cvs/src/jcr.h my/src/jcr.h
+--- cvs/src/jcr.h 2006-12-12 21:03:36.000000000 +0100
++++ my/src/jcr.h 2006-12-13 19:03:46.000000000 +0100
+@@ -184,6 +184,7 @@
+ bool cached_attribute; /* set if attribute is cached */
+ POOLMEM *attr; /* Attribute string from SD */
+ B_DB *db; /* database pointer */
++ B_DB *db_batch; /* database pointer for batch insert */
+ ATTR_DBR *ar; /* DB attribute record */
+
+ /* Daemon specific part of JCR */
--- /dev/null
+diff -Naur new/bacula-1.39.30/src/baconfig.h bacula-1.39.30/src/baconfig.h
+--- new/bacula-1.39.30/src/baconfig.h 2006-12-08 15:27:09.000000000 +0100
++++ bacula-1.39.30/src/baconfig.h 2006-12-19 22:27:27.000000000 +0100
+@@ -107,8 +107,8 @@
+ #define OSDependentInit()
+ #define tape_open open
+ #define tape_ioctl ioctl
+-#define tape_read read
+-#define tape_write write
++#define tape_read ::read
++#define tape_write ::write
+ #define tape_close ::close
+
+ #endif
+diff -Naur new/bacula-1.39.30/src/cats/sql_find.c bacula-1.39.30/src/cats/sql_find.c
+--- new/bacula-1.39.30/src/cats/sql_find.c 2006-11-27 11:02:59.000000000 +0100
++++ bacula-1.39.30/src/cats/sql_find.c 2006-12-19 22:27:27.000000000 +0100
+@@ -283,7 +283,7 @@
+ "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
+ "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
+ "FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
+- "LabelType "
++ "LabelType,VolReadTime,VolWriteTime "
+ "FROM Media WHERE PoolId=%s AND MediaType='%s' AND VolStatus IN ('Full',"
+ "'Recycle','Purged','Used','Append') AND Enabled=1 "
+ "ORDER BY LastWritten LIMIT 1",
+@@ -308,7 +308,7 @@
+ "VolBytes,VolMounts,VolErrors,VolWrites,MaxVolBytes,VolCapacityBytes,"
+ "VolRetention,VolUseDuration,MaxVolJobs,MaxVolFiles,Recycle,Slot,"
+ "FirstWritten,LastWritten,VolStatus,InChanger,VolParts,"
+- "LabelType "
++ "LabelType,VolReadTime,VolWriteTime "
+ "FROM Media WHERE PoolId=%s AND MediaType='%s' AND Enabled=1 "
+ "AND VolStatus='%s' "
+ "%s "
+@@ -371,6 +371,8 @@
+ mr->InChanger = str_to_int64(row[20]);
+ mr->VolParts = str_to_int64(row[21]);
+ mr->LabelType = str_to_int64(row[22]);
++ mr->VolReadTime = str_to_uint64(row[23]);
++ mr->VolWriteTime = str_to_uint64(row[24]);
+ mr->Enabled = 1; /* ensured via query */
+ sql_free_result(mdb);
+
+diff -Naur new/bacula-1.39.30/src/cats/sql_get.c bacula-1.39.30/src/cats/sql_get.c
+--- new/bacula-1.39.30/src/cats/sql_get.c 2006-11-27 11:02:59.000000000 +0100
++++ bacula-1.39.30/src/cats/sql_get.c 2006-12-19 22:27:27.000000000 +0100
+@@ -872,7 +872,7 @@
+ "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
+ "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
+ "Enabled,LocationId,RecycleCount,InitialWrite,"
+- "ScratchPoolId,RecyclePoolId "
++ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
+ "FROM Media WHERE MediaId=%s",
+ edit_int64(mr->MediaId, ed1));
+ } else { /* find by name */
+@@ -882,7 +882,7 @@
+ "MaxVolFiles,Recycle,Slot,FirstWritten,LastWritten,InChanger,"
+ "EndFile,EndBlock,VolParts,LabelType,LabelDate,StorageId,"
+ "Enabled,LocationId,RecycleCount,InitialWrite,"
+- "ScratchPoolId,RecyclePoolId "
++ "ScratchPoolId,RecyclePoolId,VolReadTime,VolWriteTime "
+ "FROM Media WHERE VolumeName='%s'", mr->VolumeName);
+ }
+
+@@ -938,6 +938,8 @@
+ mr->InitialWrite = (time_t)str_to_utime(mr->cInitialWrite);
+ mr->ScratchPoolId = str_to_int64(row[33]);
+ mr->RecyclePoolId = str_to_int64(row[34]);
++ mr->VolReadTime = str_to_int64(row[35]);
++ mr->VolWriteTime = str_to_int64(row[36]);
+
+ ok = true;
+ }
+diff -Naur new/bacula-1.39.30/src/dird/catreq.c bacula-1.39.30/src/dird/catreq.c
+--- new/bacula-1.39.30/src/dird/catreq.c 2006-11-21 14:20:08.000000000 +0100
++++ bacula-1.39.30/src/dird/catreq.c 2006-12-19 22:27:27.000000000 +0100
+@@ -277,7 +277,7 @@
+ mr.VolWriteTime = sdmr.VolWriteTime;
+ mr.VolParts = sdmr.VolParts;
+ bstrncpy(mr.VolStatus, sdmr.VolStatus, sizeof(mr.VolStatus));
+- if (jcr->wstore->StorageId) {
++ if (jcr->wstore && jcr->wstore->StorageId) {
+ mr.StorageId = jcr->wstore->StorageId;
+ }
+
+diff -Naur new/bacula-1.39.30/src/dird/dird.c bacula-1.39.30/src/dird/dird.c
+--- new/bacula-1.39.30/src/dird/dird.c 2006-11-27 11:02:59.000000000 +0100
++++ bacula-1.39.30/src/dird/dird.c 2006-12-19 22:27:27.000000000 +0100
+@@ -269,6 +269,8 @@
+
+ init_job_server(director->MaxConcurrentJobs);
+
++// init_device_resources();
++
+ Dmsg0(200, "wait for next job\n");
+ /* Main loop -- call scheduler to get next job to run */
+ while ( (jcr = wait_for_next_job(runjob)) ) {
+diff -Naur new/bacula-1.39.30/src/dird/getmsg.c bacula-1.39.30/src/dird/getmsg.c
+--- new/bacula-1.39.30/src/dird/getmsg.c 2006-11-21 14:20:09.000000000 +0100
++++ bacula-1.39.30/src/dird/getmsg.c 2006-12-19 22:27:27.000000000 +0100
+@@ -62,7 +62,9 @@
+ "open=%d labeled=%d offline=%d "
+ "reserved=%d max_writers=%d "
+ "autoselect=%d autochanger=%d "
+- "changer_name=%127s media_type=%127s volume_name=%127s\n";
++ "changer_name=%127s media_type=%127s volume_name=%127s "
++ "DevReadTime=%d DevWriteTime=%d DevReadBytes=%d "
++ "DevWriteBytes=%d\n";
+ #endif
+
+
+@@ -243,6 +245,7 @@
+ int dev_open, dev_append, dev_read, dev_labeled;
+ int dev_offline, dev_autochanger, dev_autoselect;
+ int dev_num_writers, dev_max_writers, dev_reserved;
++ uint64_t dev_read_time, dev_write_time, dev_write_bytes, dev_read_bytes;
+ uint64_t dev_PoolId;
+ Dmsg1(100, "<stored: %s", bs->msg);
+ if (sscanf(bs->msg, Device_update,
+@@ -253,7 +256,9 @@
+ &dev_max_writers, &dev_autoselect,
+ &dev_autochanger,
+ changer_name.c_str(), media_type.c_str(),
+- volume_name.c_str()) != 15) {
++ volume_name.c_str(),
++ &dev_read_time, &dev_write_time, &dev_read_bytes,
++ &dev_write_bytes) != 19) {
+ Emsg1(M_ERROR, 0, _("Malformed message: %s\n"), bs->msg);
+ } else {
+ unbash_spaces(dev_name);
+@@ -283,6 +288,10 @@
+ dev->max_writers = dev_max_writers;
+ dev->reserved = dev_reserved;
+ dev->found = true;
++ dev->DevReadTime = dev_read_time; /* TODO : have to update database */
++ dev->DevWriteTime = dev_write_time;
++ dev->DevReadBytes = dev_read_bytes;
++ dev->DevWriteBytes = dev_write_bytes;
+ }
+ continue;
+ }
+diff -Naur new/bacula-1.39.30/src/stored/acquire.c bacula-1.39.30/src/stored/acquire.c
+--- new/bacula-1.39.30/src/stored/acquire.c 2006-11-27 11:03:01.000000000 +0100
++++ bacula-1.39.30/src/stored/acquire.c 2006-12-19 22:27:27.000000000 +0100
+@@ -461,8 +461,8 @@
+
+ if (dev->can_read()) {
+ dev->clear_read(); /* clear read bit */
+-
+- /******FIXME**** send read volume usage statistics to director */
++ Dmsg0(100, "dir_update_vol_info. Release0\n");
++ dir_update_volume_info(dcr, false); /* send Volume info to Director */
+
+ } else if (dev->num_writers > 0) {
+ /*
+diff -Naur new/bacula-1.39.30/src/stored/ansi_label.c bacula-1.39.30/src/stored/ansi_label.c
+--- new/bacula-1.39.30/src/stored/ansi_label.c 2006-11-21 18:03:45.000000000 +0100
++++ bacula-1.39.30/src/stored/ansi_label.c 2006-12-19 22:27:27.000000000 +0100
+@@ -87,7 +87,7 @@
+ /* Read a maximum of 5 records VOL1, HDR1, ... HDR4 */
+ for (i=0; i < 6; i++) {
+ do {
+- stat = tape_read(dev->fd, label, sizeof(label));
++ stat = dev->read(label, sizeof(label));
+ } while (stat == -1 && errno == EINTR);
+ if (stat < 0) {
+ berrno be;
+@@ -309,7 +309,7 @@
+ } else {
+ label[79] = '3'; /* ANSI label flag */
+ }
+- stat = tape_write(dev->fd, label, sizeof(label));
++ stat = dev->write(label, sizeof(label));
+ if (stat != sizeof(label)) {
+ berrno be;
+ Jmsg1(jcr, M_FATAL, 0, _("Could not write ANSI VOL1 label. ERR=%s\n"),
+@@ -341,7 +341,7 @@
+ * This could come at the end of a tape, ignore
+ * EOT errors.
+ */
+- stat = tape_write(dev->fd, label, sizeof(label));
++ stat = dev->write(label, sizeof(label));
+ if (stat != sizeof(label)) {
+ berrno be;
+ if (stat == -1) {
+@@ -370,7 +370,7 @@
+ label[4] = 'V';
+ ascii_to_ebcdic(label, label, sizeof(label));
+ }
+- stat = tape_write(dev->fd, label, sizeof(label));
++ stat = dev->write(label, sizeof(label));
+ if (stat != sizeof(label)) {
+ berrno be;
+ if (stat == -1) {
+diff -Naur new/bacula-1.39.30/src/stored/askdir.c bacula-1.39.30/src/stored/askdir.c
+--- new/bacula-1.39.30/src/stored/askdir.c 2006-12-08 15:27:10.000000000 +0100
++++ bacula-1.39.30/src/stored/askdir.c 2006-12-19 22:27:27.000000000 +0100
+@@ -308,11 +308,6 @@
+ Pmsg0(000, _("NULL Volume name. This shouldn't happen!!!\n"));
+ return false;
+ }
+- if (dev->can_read()) {
+- Jmsg0(jcr, M_FATAL, 0, _("Attempt to update_volume_info in read mode!!!\n"));
+- Pmsg0(000, _("Attempt to update_volume_info in read mode!!!\n"));
+- return false;
+- }
+
+ Dmsg1(100, "Update cat VolFiles=%d\n", dev->file);
+ /* Just labeled or relabeled the tape */
+diff -Naur new/bacula-1.39.30/src/stored/block.c bacula-1.39.30/src/stored/block.c
+--- new/bacula-1.39.30/src/stored/block.c 2006-12-08 15:27:10.000000000 +0100
++++ bacula-1.39.30/src/stored/block.c 2006-12-19 22:27:27.000000000 +0100
+@@ -537,11 +537,8 @@
+ bmicrosleep(5, 0); /* pause a bit if busy or lots of errors */
+ dev->clrerror(-1);
+ }
+- if (dev->is_tape()) {
+- stat = tape_write(dev->fd, block->buf, (size_t)wlen);
+- } else {
+- stat = write(dev->fd, block->buf, (size_t)wlen);
+- }
++ stat = dev->write(block->buf, (size_t)wlen);
++
+ } while (stat == -1 && (errno == EBUSY || errno == EIO) && retry++ < 3);
+
+ #ifdef DEBUG_BLOCK_ZEROING
+@@ -979,11 +976,8 @@
+ bmicrosleep(10, 0); /* pause a bit if busy or lots of errors */
+ dev->clrerror(-1);
+ }
+- if (dev->is_tape()) {
+- stat = tape_read(dev->fd, block->buf, (size_t)block->buf_len);
+- } else {
+- stat = read(dev->fd, block->buf, (size_t)block->buf_len);
+- }
++ stat = dev->read(block->buf, (size_t)block->buf_len);
++
+ } while (stat == -1 && (errno == EBUSY || errno == EINTR || errno == EIO) && retry++ < 3);
+ if (stat < 0) {
+ berrno be;
+diff -Naur new/bacula-1.39.30/src/stored/dev.c bacula-1.39.30/src/stored/dev.c
+--- new/bacula-1.39.30/src/stored/dev.c 2006-11-22 15:48:29.000000000 +0100
++++ bacula-1.39.30/src/stored/dev.c 2006-12-19 22:27:27.000000000 +0100
+@@ -1326,7 +1326,7 @@
+ mt_com.mt_count = 1;
+ while (num-- && !at_eot()) {
+ Dmsg0(100, "Doing read before fsf\n");
+- if ((stat = tape_read(fd, (char *)rbuf, rbuf_len)) < 0) {
++ if ((stat = this->read((char *)rbuf, rbuf_len)) < 0) {
+ if (errno == ENOMEM) { /* tape record exceeds buf len */
+ stat = rbuf_len; /* This is OK */
+ /*
+@@ -2193,6 +2193,68 @@
+ }
+ }
+
++/* return the last timer interval (ms) */
++int DEVICE::get_timer_count()
++{
++ uint64_t old = last_timer;
++ struct timeval tv;
++ gettimeofday(&tv, NULL);
++ last_timer = tv.tv_usec + tv.tv_sec * 1000000;
++
++ return last_timer - old;
++}
++
++/* read from fd */
++ssize_t DEVICE::read(void *buf, size_t len)
++{
++ ssize_t read_len ;
++
++ get_timer_count();
++
++ if (this->is_tape()) {
++ read_len = tape_read(fd, buf, len);
++ } else {
++ read_len = ::read(fd, buf, len);
++ }
++
++ last_tick = get_timer_count();
++
++ DevReadTime += last_tick;
++ VolCatInfo.VolReadTime += last_tick;
++
++ if (read_len > 0) { /* skip error */
++ DevReadBytes += read_len;
++ VolCatInfo.VolCatRBytes += read_len;
++ }
++
++ return read_len;
++}
++
++/* write to fd */
++ssize_t DEVICE::write(const void *buf, size_t len)
++{
++ ssize_t write_len ;
++
++ get_timer_count();
++
++ if (this->is_tape()) {
++ write_len = tape_write(fd, buf, len);
++ } else {
++ write_len = ::write(fd, buf, len);
++ }
++
++ last_tick = get_timer_count();
++
++ DevWriteTime += last_tick;
++ VolCatInfo.VolWriteTime += last_tick;
++
++ if (write_len > 0) { /* skip error */
++ DevWriteBytes += write_len;
++ VolCatInfo.VolCatBytes += write_len;
++ }
++
++ return write_len;
++}
+
+ /* Return the resource name for the device */
+ const char *DEVICE::name() const
+diff -Naur new/bacula-1.39.30/src/stored/dev.h bacula-1.39.30/src/stored/dev.h
+--- new/bacula-1.39.30/src/stored/dev.h 2006-11-21 18:03:46.000000000 +0100
++++ bacula-1.39.30/src/stored/dev.h 2006-12-19 22:27:27.000000000 +0100
+@@ -283,7 +283,17 @@
+ int rem_wait_sec;
+ int num_wait;
+
++ uint64_t last_timer; /* used by read/write/seek to get stats (usec) */
++ int last_tick; /* contains last read/write time (usec) */
++
++ uint64_t DevReadTime;
++ uint64_t DevWriteTime;
++ uint64_t DevWriteBytes;
++ uint64_t DevReadBytes;
++
+ /* Methods */
++ int get_timer_count(); /* return the last timer interval (ms) */
++
+ int has_cap(int cap) const { return capabilities & cap; }
+ int is_autochanger() const { return capabilities & CAP_AUTOCHANGER; }
+ int requires_mount() const { return capabilities & CAP_REQMOUNT; }
+@@ -364,6 +374,8 @@
+ bool truncate(DCR *dcr); /* in dev.c */
+ int open(DCR *dcr, int mode); /* in dev.c */
+ void term(void); /* in dev.c */
++ ssize_t read(void *buf, size_t len); /* in dev.c */
++ ssize_t write(const void *buf, size_t len); /* in dev.c */
+ bool rewind(DCR *dcr); /* in dev.c */
+ bool mount(int timeout); /* in dev.c */
+ bool unmount(int timeout); /* in dev.c */