*/
/*
- Here is how database versions work.
+ Here is how database versions work.
While I am working on a new release with database changes, the
update scripts are in the src/cats directory under the names
will be copied to the updatedb directory with the correct name
(in the present case 8 to 9).
- Now, in principle, each of the different DB implementations
+ Now, in principle, each of the different DB implementations
can have a different version, but in practice they are all
the same (simplifies things). The exception is the internal
database, which is no longer used, and hence, no longer changes.
enum {
SQL_TYPE_MYSQL = 0,
SQL_TYPE_POSTGRESQL = 1,
- SQL_TYPE_SQLITE = 2
+ SQL_TYPE_SQLITE = 2,
+ SQL_TYPE_SQLITE3
};
#define sql_num_fields(x) ((x)->ncolumn)
#define SQL_ROW char**
-#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
+#define sql_batch_start(x,y) my_batch_start(x,y)
+#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
#define sql_batch_lock_path_query my_sqlite_batch_lock_query
#define sql_batch_lock_filename_query my_sqlite_batch_lock_query
#define sql_batch_unlock_tables_query my_sqlite_batch_unlock_query
#define sql_batch_fill_filename_query my_sqlite_batch_fill_filename_query
-#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
+#define sql_batch_fill_path_query my_sqlite_batch_fill_path_query
/* In cats/sqlite.c */
void my_sqlite_free_table(B_DB *mdb);
#else
/* S Q L I T E 3 */
-
+
#ifdef HAVE_SQLITE3
#define sql_field_seek(x, y) my_sqlite_field_seek((x), (y))
#define sql_fetch_field(x) my_sqlite_fetch_field(x)
#define sql_num_fields(x) ((x)->ncolumn)
-#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
+#define sql_batch_start(x,y) my_batch_start(x,y)
+#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
#define SQL_ROW char**
#define sql_batch_lock_path_query my_sqlite_batch_lock_query
#define SQL_FIELD MYSQL_FIELD
#define sql_batch_start(x,y) my_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
-#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
+#define sql_batch_end(x,y,z) my_batch_end(x,y,z)
+#define sql_batch_insert(x,y,z) my_batch_insert(x,y,z)
#define sql_batch_lock_path_query my_mysql_batch_lock_path_query
#define sql_batch_lock_filename_query my_mysql_batch_lock_filename_query
#define sql_batch_unlock_tables_query my_mysql_batch_unlock_tables_query
POOLMEM *esc_path; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
-};
+};
void my_postgresql_free_result(B_DB *mdb);
POSTGRESQL_ROW my_postgresql_fetch_row (B_DB *mdb);
#define sql_fetch_field(x) my_postgresql_fetch_field(x)
#define sql_num_fields(x) ((x)->num_fields)
-#define sql_batch_start(x,y) my_postgresql_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_postgresql_batch_end(x,y,z)
+#define sql_batch_start(x,y) my_postgresql_batch_start(x,y)
+#define sql_batch_end(x,y,z) my_postgresql_batch_end(x,y,z)
#define sql_batch_insert(x,y,z) my_postgresql_batch_insert(x,y,z)
#define sql_batch_lock_path_query my_pg_batch_lock_path_query
#define sql_batch_lock_filename_query my_pg_batch_lock_filename_query
POOLMEM *esc_path; /* Escaped path name */
int fnl; /* file name length */
int pnl; /* path name length */
-};
+};
void my_dbi_free_result(B_DB *mdb);
DBI_ROW my_dbi_fetch_row (B_DB *mdb);
int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar);
char *my_postgresql_copy_escape(char *dest, char *src, size_t len);
// typedefs for libdbi work with postgresql copy insert
-typedef int (*custom_function_insert_t)(void*, const char*, int);
+typedef int (*custom_function_insert_t)(void*, const char*, int);
typedef char* (*custom_function_error_t)(void*);
typedef int (*custom_function_end_t)(void*, const char*);
-extern const char* my_dbi_batch_lock_path_query[3];
-extern const char* my_dbi_batch_lock_filename_query[3];
-extern const char* my_dbi_batch_unlock_tables_query[3];
-extern const char* my_dbi_batch_fill_filename_query[3];
-extern const char* my_dbi_batch_fill_path_query[3];
+extern const char* my_dbi_batch_lock_path_query[4];
+extern const char* my_dbi_batch_lock_filename_query[4];
+extern const char* my_dbi_batch_unlock_tables_query[4];
+extern const char* my_dbi_batch_fill_filename_query[4];
+extern const char* my_dbi_batch_fill_path_query[4];
/* "Generic" names for easier conversion */
#define sql_store_result(x) (x)->result
#define sql_field_seek(x, y) my_dbi_field_seek((x), (y))
#define sql_fetch_field(x) my_dbi_fetch_field(x)
#define sql_num_fields(x) ((x)->num_fields)
-#define sql_batch_start(x,y) my_dbi_batch_start(x,y)
-#define sql_batch_end(x,y,z) my_dbi_batch_end(x,y,z)
+#define sql_batch_start(x,y) my_dbi_batch_start(x,y)
+#define sql_batch_end(x,y,z) my_dbi_batch_end(x,y,z)
#define sql_batch_insert(x,y,z) my_dbi_batch_insert(x,y,z)
#define sql_batch_lock_path_query my_dbi_batch_lock_path_query[db_type]
#define sql_batch_lock_filename_query my_dbi_batch_lock_filename_query[db_type]
*/
struct B_DB {
int dummy; /* for SunOS compiler */
-};
+};
#endif /* __SQL_C */
-/* ==============================================================
+/* ==============================================================
*
- * What follows are definitions that are used "globally" for all
+ * What follows are definitions that are used "globally" for all
* the different SQL engines and both inside and external to the
* cats directory.
*/
char cLastWritten[MAX_TIME_LENGTH]; /* LastWritten returned from DB */
char cLabelDate[MAX_TIME_LENGTH]; /* LabelData returned from DB */
char cInitialWrite[MAX_TIME_LENGTH]; /* InitialWrite returned from DB */
- bool set_first_written;
+ bool set_first_written;
bool set_label_date;
};
#include "sql_cmds.h"
/*
- * Exported globals from sql.c
+ * Exported globals from sql.c
*/
extern int DLL_IMP_EXP db_type; /* SQL engine type index */
*/
B_DB *
db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
+ const char *db_address, int db_port, const char *db_socket,
int mult_db_connections)
{
B_DB *mdb;
Jmsg(jcr, M_FATAL, 0, _("A dbi driver for DBI must be supplied.\n"));
return NULL;
}
-
- /* Do the correct selection of driver.
- * Can be one of the varius supported by libdbi
+
+ /* Do the correct selection of driver.
+ * Can be one of the varius supported by libdbi
*/
switch (db_type) {
case SQL_TYPE_MYSQL:
bstrncpy(db_driver,"pgsql", sizeof(db_driver));
break;
case SQL_TYPE_SQLITE:
- bstrncpy(db_driver,"pgsql", sizeof(db_driver));
+ bstrncpy(db_driver,"sqlite", sizeof(db_driver));
+ break;
+ case SQL_TYPE_SQLITE3:
+ bstrncpy(db_driver,"sqlite3", sizeof(db_driver));
break;
}
-
+
/* Set db_driverdir whereis is the libdbi drivers */
bstrncpy(db_driverdir, DBI_DRIVER_DIR, 255);
-
+
if (!db_user) {
Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n"));
return NULL;
if (bstrcmp(mdb->db_name, db_name) &&
bstrcmp(mdb->db_address, db_address) &&
bstrcmp(mdb->db_driver, db_driver) &&
- mdb->db_port == db_port) {
- Dmsg4(100, "DB REopen %d %s %s erro: %d\n", mdb->ref_count, db_driver, db_name,
+ mdb->db_port == db_port) {
+ Dmsg4(100, "DB REopen %d %s %s erro: %d\n", mdb->ref_count, db_driver, db_name,
dbi_conn_error(mdb->db, NULL));
mdb->ref_count++;
V(mutex);
*/
int
db_open_database(JCR *jcr, B_DB *mdb)
-{
+{
int errstat;
int dbstat;
+ uint8_t len;
const char *errmsg;
char buf[10], *port;
- int numdrivers;
-
+ int numdrivers;
+ char *db_name = NULL;
+ char *db_dir = NULL;
+
P(mutex);
if (mdb->connected) {
V(mutex);
} else {
port = NULL;
}
-
+
numdrivers = dbi_initialize_r(mdb->db_driverdir, &(mdb->instance));
if (numdrivers < 0) {
Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
return 0;
}
mdb->db = (void **)dbi_conn_new_r(mdb->db_driver, mdb->instance);
- dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
- dbi_conn_set_option(mdb->db, "port", port); /* default port */
- dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
- dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
- dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
-
+ /* Can be many types of databases */
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
+ dbi_conn_set_option(mdb->db, "port", port); /* default port */
+ dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
+ dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
+ dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ dbi_conn_set_option(mdb->db, "host", mdb->db_address);
+ dbi_conn_set_option(mdb->db, "port", port);
+ dbi_conn_set_option(mdb->db, "username", mdb->db_user);
+ dbi_conn_set_option(mdb->db, "password", mdb->db_password);
+ dbi_conn_set_option(mdb->db, "dbname", mdb->db_name);
+ break;
+ case SQL_TYPE_SQLITE:
+ len = strlen(working_directory) + 5;
+ db_dir = (char *)malloc(len);
+ strcpy(db_dir, working_directory);
+ strcat(db_dir, "/");
+ len = strlen(mdb->db_name) + 5;
+ db_name = (char *)malloc(len);
+ strcpy(db_name, mdb->db_name);
+ strcat(db_name, ".db");
+ dbi_conn_set_option(mdb->db, "sqlite_dbdir", db_dir);
+ dbi_conn_set_option(mdb->db, "dbname", db_name);
+ break;
+ case SQL_TYPE_SQLITE3:
+ len = strlen(working_directory) + 5;
+ db_dir = (char *)malloc(len);
+ strcpy(db_dir, working_directory);
+ strcat(db_dir, "/");
+ len = strlen(mdb->db_name) + 5;
+ db_name = (char *)malloc(len);
+ strcpy(db_name, mdb->db_name);
+ strcat(db_name, ".db");
+ dbi_conn_set_option(mdb->db, "sqlite3_dbdir", db_dir);
+ dbi_conn_set_option(mdb->db, "dbname", db_name);
+ Dmsg2(500, "SQLITE: %s %s\n", db_dir, db_name);
+ break;
+ }
+
/* If connection fails, try at 5 sec intervals for 30 seconds. */
for (int retry=0; retry < 6; retry++) {
-
- dbstat = dbi_conn_connect(mdb->db);
+
+ dbstat = dbi_conn_connect(mdb->db);
if ( dbstat == 0) {
break;
}
-
+
dbi_conn_error(mdb->db, &errmsg);
Dmsg1(50, "dbi error: %s\n", errmsg);
-
+
bmicrosleep(5, 0);
-
+
}
-
+
if ( dbstat != 0 ) {
Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface.\n"
"Type=%s Database=%s User=%s\n"
"It is probably not running or your password is incorrect.\n"),
mdb->db_driver, mdb->db_name, mdb->db_user);
V(mutex);
- return 0;
- }
-
+ return 0;
+ }
+
Dmsg0(50, "dbi_real_connect done\n");
Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n",
mdb->db_user, mdb->db_name,
V(mutex);
return 0;
}
-
+
switch (mdb->db_type) {
case SQL_TYPE_MYSQL:
/* Set connection timeout to 8 days specialy for batch mode */
sql_query(mdb, "SET datestyle TO 'ISO, YMD'");
sql_query(mdb, "set standard_conforming_strings=on");
break;
- case SQL_TYPE_SQLITE:
- break;
}
-
+
+ if(db_dir) {
+ free(db_dir);
+ }
+ if(db_name) {
+ free(db_name);
+ }
+
V(mutex);
return 1;
}
if (mdb->db_driver) {
free(mdb->db_driver);
}
- free(mdb);
- }
+ free(mdb);
+ }
V(mutex);
}
* NOTE! len is the length of the old string. Your new
* string must be long enough (max 2*old+1) to hold
* the escaped output.
- *
+ *
* dbi_conn_quote_string_copy receives a pointer to pointer.
- * We need copy the value of pointer to snew because libdbi change the
+ * We need copy the value of pointer to snew because libdbi change the
* pointer
*/
void
{
char *inew;
char *pnew;
-
+
if (len == 0) {
- snew[0] = 0;
+ snew[0] = 0;
} else {
/* correct the size of old basead in len
* and copy new string to inew
dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
free(inew);
/* copy the escaped string to snew */
- bstrncpy(snew, pnew, 2 * len + 1);
+ bstrncpy(snew, pnew, 2 * len + 1);
}
-
+
Dmsg2(500, "dbi_conn_escape_string_copy %p %s\n",snew,snew);
-
+
}
/*
DBI_ROW row = NULL; // by default, return NULL
Dmsg0(500, "my_dbi_fetch_row start\n");
-
-
- if (!mdb->row || mdb->row_size < mdb->num_fields) {
+ if ((!mdb->row || mdb->row_size < mdb->num_fields) && mdb->num_rows > 0) {
int num_fields = mdb->num_fields;
Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields);
Dmsg2(500, "my_dbi_free_row row: '%p' num_fields: '%d'\n", mdb->row, mdb->num_fields);
if (mdb->num_rows != 0) {
for(j = 0; j < mdb->num_fields; j++) {
- Dmsg2(500, "my_dbi_free_row row '%p' '%d'\n", mdb->row[j], j);
+ Dmsg2(500, "my_dbi_free_row row '%p' '%d'\n", mdb->row[j], j);
if(mdb->row[j]) {
free(mdb->row[j]);
- }
- }
+ }
+ }
}
free(mdb->row);
}
}
// if still within the result set
- if (mdb->row_number <= mdb->num_rows) {
+ if (mdb->row_number <= mdb->num_rows && mdb->row_number != DBI_ERROR_BADPTR) {
Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (1..%d)\n", mdb->row_number, mdb->num_rows);
// get each value from this row
for (j = 0; j < mdb->num_fields; j++) {
- mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
- // allocate space to queue row
+ mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
+ // allocate space to queue row
mdb->field_get = (DBI_FIELD_GET *)malloc(sizeof(DBI_FIELD_GET));
// store the pointer in queue
- mdb->field_get->value = mdb->row[j];
+ mdb->field_get->value = mdb->row[j];
Dmsg4(500, "my_dbi_fetch_row row[%d] field: '%p' in queue: '%p' has value: '%s'\n",
j, mdb->row[j], mdb->field_get->value, mdb->row[j]);
// insert in queue to future free
- qinsert(&dbi_getvalue_list, &mdb->field_get->bq);
+ qinsert(&dbi_getvalue_list, &mdb->field_get->bq);
}
// increment the row number for the next call
mdb->row_number++;
Dmsg1(500, "allocating space for %d fields\n", mdb->num_fields);
mdb->fields = (DBI_FIELD *)malloc(sizeof(DBI_FIELD) * mdb->num_fields);
mdb->fields_size = mdb->num_fields;
-
+
for (i = 0; i < mdb->num_fields; i++) {
// num_fileds is starting at 1, increment i by 1
dbi_index = i + 1;
dbi_result_free(mdb->result); /* hmm, someone forgot to free?? */
mdb->result = NULL;
}
-
+
mdb->result = (void **)dbi_conn_query(mdb->db, query);
-
+
if (!mdb->result) {
- Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
+ Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
goto bail_out;
}
-
+
mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db, &errmsg);
-
+
if (mdb->status == DBI_ERROR_NONE) {
Dmsg1(500, "we have a result\n", query);
}
void my_dbi_free_result(B_DB *mdb)
-{
-
+{
+
DBI_FIELD_GET *f;
- db_lock(mdb);
- int i = 0;
+ db_lock(mdb);
if (mdb->result) {
Dmsg1(500, "my_dbi_free_result result '%p'\n", mdb->result);
dbi_result_free(mdb->result);
}
mdb->result = NULL;
-
+
if (mdb->row) {
free(mdb->row);
}
-
+
/* now is time to free all value return by my_dbi_get_value
* this is necessary because libdbi don't free memory return by yours results
* and Bacula has some routine wich call more than once time my_dbi_fetch_row
- *
+ *
* Using a queue to store all pointer allocate is a good way to free all things
* when necessary
*/
free(f->value);
free(f);
}
-
+
mdb->row = NULL;
-
+
if (mdb->fields) {
free(mdb->fields);
mdb->fields = NULL;
}
db_unlock(mdb);
Dmsg0(500, "my_dbi_free_result finish\n");
-
+
}
-const char *my_dbi_strerror(B_DB *mdb)
+const char *my_dbi_strerror(B_DB *mdb)
{
const char *errmsg;
-
+
dbi_conn_error(mdb->db, &errmsg);
-
+
return errmsg;
}
/*
* This can be a bit strang but is the one way to do
- *
+ *
* Returns 1 if OK
* 0 if failed
*/
int my_dbi_batch_start(JCR *jcr, B_DB *mdb)
{
char *query = "COPY batch FROM STDIN";
-
+
Dmsg0(500, "my_dbi_batch_start started\n");
-
+
switch (mdb->db_type) {
case SQL_TYPE_MYSQL:
db_lock(mdb);
return 1;
break;
case SQL_TYPE_POSTGRESQL:
-
- //query = "COPY batch FROM STDIN";
- if (my_dbi_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
"fileindex int,"
"jobid int,"
"path varchar,"
Dmsg0(500, "my_dbi_batch_start failed\n");
return 1;
}
-
+
// We are starting a new query. reset everything.
mdb->num_rows = -1;
mdb->row_number = -1;
mdb->status = (dbi_error_flag)dbi_conn_error(mdb->db, NULL);
//mdb->status = DBI_ERROR_NONE;
-
+
if (mdb->status == DBI_ERROR_NONE) {
// how many fields in the set?
mdb->num_fields = dbi_result_get_numfields(mdb->result);
break;
case SQL_TYPE_SQLITE:
db_lock(mdb);
- if (my_dbi_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob)") == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_start failed\n");
+ goto bail_out;
+ }
+ db_unlock(mdb);
+ Dmsg0(500, "my_dbi_batch_start finishing\n");
+ return 1;
+ break;
+ case SQL_TYPE_SQLITE3:
+ db_lock(mdb);
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
"FileIndex integer,"
"JobId integer,"
"Path blob,"
return 1;
break;
}
-
+
bail_out:
Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), my_dbi_strerror(mdb));
mdb->status = (dbi_error_flag) 0;
{
int res = 0;
int count = 30;
- int (*custom_function)(void*, const char*) = NULL;
+ int (*custom_function)(void*, const char*) = NULL;
dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
-
+
Dmsg0(500, "my_dbi_batch_end started\n");
if (!mdb) { /* no files ? */
case SQL_TYPE_POSTGRESQL:
custom_function = (custom_function_end_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQputCopyEnd");
-
- do {
- res = (*custom_function)(myconn->connection, error);
+
+ do {
+ res = (*custom_function)(myconn->connection, error);
} while (res == 0 && --count > 0);
if (res == 1) {
Dmsg0(500, "ok\n");
mdb->status = (dbi_error_flag) 1;
}
-
+
if (res <= 0) {
Dmsg0(500, "we failed\n");
mdb->status = (dbi_error_flag) 0;
mdb->status = (dbi_error_flag) 0;
}
break;
+ case SQL_TYPE_SQLITE3:
+ if(mdb) {
+ mdb->status = (dbi_error_flag) 0;
+ }
+ break;
}
Dmsg0(500, "my_dbi_batch_end finishing\n");
- return true;
+ return true;
}
-/*
- * This function is big and use a big switch.
+/*
+ * This function is big and use a big switch.
* In near future is better split in small functions
* and refactory.
- *
+ *
*/
int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
int res;
- int count=30;
+ int count=30;
dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
int (*custom_function)(void*, const char*, int) = NULL;
char* (*custom_function_error)(void*) = NULL;
Dmsg0(500, "my_dbi_batch_insert started \n");
- mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
if (ar->Digest == NULL || ar->Digest[0] == 0) {
db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
mdb->esc_name, ar->attr, digest);
-
+
if (my_dbi_query(mdb,mdb->cmd) == 1)
{
Dmsg0(500, "my_dbi_batch_insert failed\n");
goto bail_out;
}
-
+
Dmsg0(500, "my_dbi_batch_insert finishing\n");
-
+
return 1;
break;
case SQL_TYPE_POSTGRESQL:
my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
- len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
- ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
mdb->esc_name, ar->attr, digest);
-
+
/* libdbi don't support CopyData and we need call a postgresql
* specific function to do this work
*/
Dmsg2(500, "my_dbi_batch_insert :\n %s \ncmd_size: %d",mdb->cmd, len);
if ((custom_function = (custom_function_insert_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db),
"PQputCopyData")) != NULL) {
- do {
+ do {
res = (*custom_function)(myconn->connection, mdb->cmd, len);
} while (res == 0 && --count > 0);
Dmsg0(500, "my_dbi_batch_insert finishing\n");
return mdb->status;
} else {
- // ensure to detect a PQerror
+ // ensure to detect a PQerror
custom_function_error = (custom_function_error_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQerrorMessage");
Dmsg1(500, "my_dbi_batch_insert failed\n PQerrorMessage: %s", (*custom_function_error)(myconn->connection));
goto bail_out;
db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
- ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
mdb->esc_name, ar->attr, digest);
if (my_dbi_query(mdb,mdb->cmd) == 1)
{
Dmsg0(500, "my_dbi_batch_insert failed\n");
goto bail_out;
}
-
+
Dmsg0(500, "my_dbi_batch_insert finishing\n");
-
+
+ return 1;
+ break;
+ case SQL_TYPE_SQLITE3:
+ db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+ db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+ if (my_dbi_query(mdb,mdb->cmd) == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_insert failed\n");
+ goto bail_out;
+ }
+
+ Dmsg0(500, "my_dbi_batch_insert finishing\n");
+
return 1;
break;
}
-
+
bail_out:
Mmsg1(&mdb->errmsg, _("error inserting batch mode: %s"), my_dbi_strerror(mdb));
mdb->status = (dbi_error_flag) 0;
- my_dbi_free_result(mdb);
+ my_dbi_free_result(mdb);
return mdb->status;
}
* int PQgetisnull(const PGresult *res,
* int row_number,
* int column_number);
- *
+ *
* use dbi_result_seek_row to search in result set
*/
int my_dbi_getisnull(dbi_result *result, int row_number, int column_number) {
if(row_number == 0) {
row_number++;
}
-
+
column_number++;
-
+
if(dbi_result_seek_row(result, row_number)) {
i = dbi_result_field_is_null_idx(result,column_number);
return i;
} else {
-
+
return 0;
}
-
+
}
-/* my_dbi_getvalue
+/* my_dbi_getvalue
* like PQgetvalue;
* char *PQgetvalue(const PGresult *res,
* int row_number,
char *buf = NULL;
const char *errmsg;
- const char *field_name;
+ const char *field_name;
unsigned short dbitype;
size_t field_length;
int64_t num;
-
+
/* correct the index for dbi interface
* dbi index begins 1
* I prefer do not change others functions
*/
- Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
+ Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
result, row_number, column_number);
-
+
column_number++;
if(row_number == 0) {
row_number++;
}
-
- Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
+
+ Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
result, row_number, column_number);
-
+
if(dbi_result_seek_row(result, row_number)) {
field_name = dbi_result_get_field_name(result, column_number);
field_length = dbi_result_get_field_length(result, field_name);
dbitype = dbi_result_get_field_type_idx(result,column_number);
-
+
Dmsg3(500, "my_dbi_getvalue start: type: '%d' "
- "field_length bytes: '%d' fieldname: '%s'\n",
+ "field_length bytes: '%d' fieldname: '%s'\n",
dbitype, field_length, field_name);
-
+
if(field_length) {
//buf = (char *)malloc(sizeof(char *) * field_length + 1);
buf = (char *)malloc(field_length + 1);
} else {
/* if numbers */
buf = (char *)malloc(sizeof(char *) * 50);
- }
-
+ }
+
switch (dbitype) {
case DBI_TYPE_INTEGER:
- num = dbi_result_get_longlong(result, field_name);
+ num = dbi_result_get_longlong(result, field_name);
edit_int64(num, buf);
field_length = strlen(buf);
break;
case DBI_TYPE_STRING:
if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
+ field_length = bsnprintf(buf, field_length + 1, "%s",
dbi_result_get_string(result, field_name));
} else {
buf[0] = 0;
* following, change this to what Bacula espected
*/
if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
+ field_length = bsnprintf(buf, field_length + 1, "%s",
dbi_result_get_binary(result, field_name));
} else {
buf[0] = 0;
case DBI_TYPE_DATETIME:
time_t last;
struct tm tm;
-
+
last = dbi_result_get_datetime(result, field_name);
-
+
if(last == -1) {
- field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
+ field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
} else {
(void)localtime_r(&last, &tm);
field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d",
dbi_conn_error(dbi_result_get_conn(result), &errmsg);
Dmsg1(500, "my_dbi_getvalue error: %s\n", errmsg);
}
-
- Dmsg3(500, "my_dbi_getvalue finish buffer: '%p' num bytes: '%d' data: '%s'\n",
+
+ Dmsg3(500, "my_dbi_getvalue finish buffer: '%p' num bytes: '%d' data: '%s'\n",
buf, field_length, buf);
-
+
// don't worry about this buf
return buf;
}
everything else can use the PostgreSQL formula.
*/
-
- char sequence[30];
+
+ char sequence[30];
uint64_t id = 0;
if (mdb->db_type == SQL_TYPE_POSTGRESQL) {
-
+
if (strcasecmp(table_name, "basefiles") == 0) {
bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
} else {
} else {
id = dbi_conn_sequence_last(mdb->db, NT_(table_name));
}
-
+
return id;
}
#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_dbi_batch_lock_path_query[3] = {
+const char *my_dbi_batch_lock_path_query[4] = {
/* Mysql */
"LOCK TABLES Path write, batch write, Path as p write",
/* Postgresql */
"BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",
/* SQLite */
- "BEGIN"};
+ "BEGIN",
+ /* SQLite3 */
+ "BEGIN"};
-const char *my_dbi_batch_lock_filename_query[3] = {
+const char *my_dbi_batch_lock_filename_query[4] = {
/* Mysql */
"LOCK TABLES Filename write, batch write, Filename as f write",
/* Postgresql */
"BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",
/* SQLite */
+ "BEGIN",
+ /* SQLite3 */
"BEGIN"};
-const char *my_dbi_batch_unlock_tables_query[3] = {
+const char *my_dbi_batch_unlock_tables_query[4] = {
/* Mysql */
"UNLOCK TABLES",
/* Postgresql */
"COMMIT",
/* SQLite */
+ "COMMIT",
+ /* SQLite3 */
"COMMIT"};
-const char *my_dbi_batch_fill_path_query[3] = {
+const char *my_dbi_batch_fill_path_query[4] = {
/* Mysql */
"INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
+ "SELECT a.Path FROM "
"(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
"(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
/* Postgresql */
"(SELECT DISTINCT Path FROM batch) AS a "
"WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
/* SQLite */
- "INSERT INTO Path (Path)"
+ "INSERT INTO Path (Path)"
+ " SELECT DISTINCT Path FROM batch"
+ " EXCEPT SELECT Path FROM Path",
+ /* SQLite3 */
+ "INSERT INTO Path (Path)"
" SELECT DISTINCT Path FROM batch"
" EXCEPT SELECT Path FROM Path"};
-const char *my_dbi_batch_fill_filename_query[3] = {
+const char *my_dbi_batch_fill_filename_query[4] = {
/* Mysql */
"INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
+ "SELECT a.Name FROM "
"(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
"(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",
/* Postgresql */
/* SQLite */
"INSERT INTO Filename (Name)"
" SELECT DISTINCT Name FROM batch "
+ " EXCEPT SELECT Name FROM Filename",
+ /* SQLite3 */
+ "INSERT INTO Filename (Name)"
+ " SELECT DISTINCT Name FROM batch "
" EXCEPT SELECT Name FROM Filename"};
+
#endif /* HAVE_BATCH_FILE_INSERT */
#endif /* HAVE_DBI */