/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2008 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2010 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
- Bacula® is a registered trademark of John Walker.
+ Bacula® is a registered trademark of Kern Sibbald.
The licensor of Bacula is the Free Software Foundation Europe
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
* based upon work done by Dan Langille, December 2003 and
* by Kern Sibbald, March 2000
*
- * Version $Id$
+ */
+/*
+ * This code only compiles against a recent version of libdbi. The current
+ * release found on the libdbi website (0.8.3) won't work for this code.
+ *
+ * You find the libdbi library on http://sourceforge.net/projects/libdbi
+ *
+ * A fairly recent version of libdbi from CVS works, so either make sure
+ * your distribution has a fairly recent version of libdbi installed or
+ * clone the CVS repositories from sourceforge and compile that code and
+ * install it.
+ *
+ * You need:
+ * cvs co :pserver:anonymous@libdbi.cvs.sourceforge.net:/cvsroot/libdbi
+ * cvs co :pserver:anonymous@libdbi-drivers.cvs.sourceforge.net:/cvsroot/libdbi-drivers
*/
*/
/* List of open databases */
-static BQUEUE db_list = {&db_list, &db_list};
+static dlist *db_list = NULL;
+
+/* Control allocated fields by my_dbi_getvalue */
+static dlist *dbi_getvalue_list = NULL;
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
*/
B_DB *
db_init_database(JCR *jcr, const char *db_name, const char *db_user, const char *db_password,
- const char *db_address, int db_port, const char *db_socket,
+ const char *db_address, int db_port, const char *db_socket,
int mult_db_connections)
{
- B_DB *mdb;
+ B_DB *mdb = NULL;
+ DBI_FIELD_GET *field;
char db_driver[10];
char db_driverdir[256];
Jmsg(jcr, M_FATAL, 0, _("A dbi driver for DBI must be supplied.\n"));
return NULL;
}
-
- /* Do the correct selection of driver.
- * Can be one of the varius supported by libdbi
+
+ /* Do the correct selection of driver.
+ * Can be one of the varius supported by libdbi
*/
switch (db_type) {
case SQL_TYPE_MYSQL:
bstrncpy(db_driver,"pgsql", sizeof(db_driver));
break;
case SQL_TYPE_SQLITE:
- bstrncpy(db_driver,"pgsql", sizeof(db_driver));
+ bstrncpy(db_driver,"sqlite", sizeof(db_driver));
+ break;
+ case SQL_TYPE_SQLITE3:
+ bstrncpy(db_driver,"sqlite3", sizeof(db_driver));
break;
}
-
+
/* Set db_driverdir whereis is the libdbi drivers */
bstrncpy(db_driverdir, DBI_DRIVER_DIR, 255);
-
+
if (!db_user) {
Jmsg(jcr, M_FATAL, 0, _("A user name for DBI must be supplied.\n"));
return NULL;
}
P(mutex); /* lock DB queue */
+ if (db_list == NULL) {
+ db_list = New(dlist(mdb, &mdb->link));
+ dbi_getvalue_list = New(dlist(field, &field->link));
+ }
if (!mult_db_connections) {
/* Look to see if DB already open */
- for (mdb=NULL; (mdb=(B_DB *)qnext(&db_list, &mdb->bq)); ) {
+ foreach_dlist(mdb, db_list) {
if (bstrcmp(mdb->db_name, db_name) &&
bstrcmp(mdb->db_address, db_address) &&
bstrcmp(mdb->db_driver, db_driver) &&
- mdb->db_port == db_port) {
- Dmsg3(100, "DB REopen %d %s %s\n", mdb->ref_count, db_driver, db_name);
+ mdb->db_port == db_port) {
+ Dmsg4(100, "DB REopen %d %s %s erro: %d\n", mdb->ref_count, db_driver, db_name,
+ dbi_conn_error(mdb->db, NULL));
mdb->ref_count++;
V(mutex);
return mdb; /* already open */
}
mdb->db_type = db_type;
mdb->db_port = db_port;
- mdb->have_insert_id = TRUE;
mdb->errmsg = get_pool_memory(PM_EMSG); /* get error message buffer */
*mdb->errmsg = 0;
mdb->cmd = get_pool_memory(PM_EMSG); /* get command buffer */
mdb->esc_name = get_pool_memory(PM_FNAME);
mdb->esc_path = get_pool_memory(PM_FNAME);
mdb->allow_transactions = mult_db_connections;
- qinsert(&db_list, &mdb->bq); /* put db in list */
+ db_list->append(mdb); /* put db in list */
V(mutex);
return mdb;
}
*/
int
db_open_database(JCR *jcr, B_DB *mdb)
-{
+{
int errstat;
int dbstat;
+ uint8_t len;
const char *errmsg;
char buf[10], *port;
- int numdrivers;
-
+ int numdrivers;
+ char *db_name = NULL;
+ char *db_dir = NULL;
+
P(mutex);
if (mdb->connected) {
V(mutex);
} else {
port = NULL;
}
-
- numdrivers = dbi_initialize(mdb->db_driverdir);
+
+ numdrivers = dbi_initialize_r(mdb->db_driverdir, &(mdb->instance));
if (numdrivers < 0) {
- dbi_shutdown();
Mmsg2(&mdb->errmsg, _("Unable to locate the DBD drivers to DBI interface in: \n"
"db_driverdir=%s. It is probaly not found any drivers\n"),
mdb->db_driverdir,numdrivers);
V(mutex);
return 0;
}
- mdb->db = (void **)dbi_conn_new(mdb->db_driver);
- dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
- dbi_conn_set_option(mdb->db, "port", port); /* default port */
- dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
- dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
- dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
-
+ mdb->db = (void **)dbi_conn_new_r(mdb->db_driver, mdb->instance);
+ /* Can be many types of databases */
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ dbi_conn_set_option(mdb->db, "host", mdb->db_address); /* default = localhost */
+ dbi_conn_set_option(mdb->db, "port", port); /* default port */
+ dbi_conn_set_option(mdb->db, "username", mdb->db_user); /* login name */
+ dbi_conn_set_option(mdb->db, "password", mdb->db_password); /* password */
+ dbi_conn_set_option(mdb->db, "dbname", mdb->db_name); /* database name */
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ dbi_conn_set_option(mdb->db, "host", mdb->db_address);
+ dbi_conn_set_option(mdb->db, "port", port);
+ dbi_conn_set_option(mdb->db, "username", mdb->db_user);
+ dbi_conn_set_option(mdb->db, "password", mdb->db_password);
+ dbi_conn_set_option(mdb->db, "dbname", mdb->db_name);
+ break;
+ case SQL_TYPE_SQLITE:
+ len = strlen(working_directory) + 5;
+ db_dir = (char *)malloc(len);
+ strcpy(db_dir, working_directory);
+ strcat(db_dir, "/");
+ len = strlen(mdb->db_name) + 5;
+ db_name = (char *)malloc(len);
+ strcpy(db_name, mdb->db_name);
+ strcat(db_name, ".db");
+ dbi_conn_set_option(mdb->db, "sqlite_dbdir", db_dir);
+ dbi_conn_set_option(mdb->db, "dbname", db_name);
+ break;
+ case SQL_TYPE_SQLITE3:
+ len = strlen(working_directory) + 5;
+ db_dir = (char *)malloc(len);
+ strcpy(db_dir, working_directory);
+ strcat(db_dir, "/");
+ len = strlen(mdb->db_name) + 5;
+ db_name = (char *)malloc(len);
+ strcpy(db_name, mdb->db_name);
+ strcat(db_name, ".db");
+ dbi_conn_set_option(mdb->db, "sqlite3_dbdir", db_dir);
+ dbi_conn_set_option(mdb->db, "dbname", db_name);
+ Dmsg2(500, "SQLITE: %s %s\n", db_dir, db_name);
+ break;
+ }
+
/* If connection fails, try at 5 sec intervals for 30 seconds. */
for (int retry=0; retry < 6; retry++) {
-
- dbstat = dbi_conn_connect(mdb->db);
+
+ dbstat = dbi_conn_connect(mdb->db);
if ( dbstat == 0) {
break;
}
-
+
dbi_conn_error(mdb->db, &errmsg);
Dmsg1(50, "dbi error: %s\n", errmsg);
-
+
bmicrosleep(5, 0);
-
+
}
-
+
if ( dbstat != 0 ) {
- Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface.\n"
- "Type=%s Database=%s User=%s\n"
- "It is probably not running or your password is incorrect.\n"),
- mdb->db_driver, mdb->db_name, mdb->db_user);
+ Mmsg3(&mdb->errmsg, _("Unable to connect to DBI interface. Type=%s Database=%s User=%s\n"
+ "Possible causes: SQL server not running; password incorrect; max_connections exceeded.\n"),
+ mdb->db_driver, mdb->db_name, mdb->db_user);
V(mutex);
- return 0;
- }
-
+ return 0;
+ }
+
Dmsg0(50, "dbi_real_connect done\n");
Dmsg3(50, "db_user=%s db_name=%s db_password=%s\n",
mdb->db_user, mdb->db_name,
V(mutex);
return 0;
}
-
+
switch (mdb->db_type) {
case SQL_TYPE_MYSQL:
/* Set connection timeout to 8 days specialy for batch mode */
sql_query(mdb, "SET datestyle TO 'ISO, YMD'");
sql_query(mdb, "set standard_conforming_strings=on");
break;
- case SQL_TYPE_SQLITE:
- break;
}
-
+
+ if(db_dir) {
+ free(db_dir);
+ }
+ if(db_name) {
+ free(db_name);
+ }
+
V(mutex);
return 1;
}
sql_free_result(mdb);
mdb->ref_count--;
if (mdb->ref_count == 0) {
- qdchain(&mdb->bq);
+ db_list->remove(mdb);
if (mdb->connected && mdb->db) {
- sql_close(mdb);
+ //sql_close(mdb);
+ dbi_shutdown_r(mdb->instance);
mdb->db = NULL;
+ mdb->instance = NULL;
}
rwl_destroy(&mdb->lock);
free_pool_memory(mdb->errmsg);
if (mdb->db_socket) {
free(mdb->db_socket);
}
- dbi_shutdown();
+ if (mdb->db_driverdir) {
+ free(mdb->db_driverdir);
+ }
if (mdb->db_driver) {
free(mdb->db_driver);
}
free(mdb);
-
-
- }
+ if (db_list->size() == 0) {
+ delete db_list;
+ db_list = NULL;
+ }
+ }
V(mutex);
}
+void db_check_backend_thread_safe()
+{ }
+
void db_thread_cleanup()
{ }
* NOTE! len is the length of the old string. Your new
* string must be long enough (max 2*old+1) to hold
* the escaped output.
- *
+ *
* dbi_conn_quote_string_copy receives a pointer to pointer.
- * We need copy the value of pointer to snew. Because libdbi change the
+ * We need copy the value of pointer to snew because libdbi change the
* pointer
*/
void
{
char *inew;
char *pnew;
-
+
if (len == 0) {
- snew[0] = 0;
+ snew[0] = 0;
} else {
- /* correct the size of old basead in len and copy new string to inew */
- inew = (char *)malloc(sizeof(char) * len + 1);
- bstrncpy(inew,old,len + 1);
- /* escape the correct size of old */
- dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
- /* copy the escaped string to snew */
- bstrncpy(snew, pnew, 2 * len + 1);
- }
-
+ /* correct the size of old basead in len
+ * and copy new string to inew
+ */
+ inew = (char *)malloc(sizeof(char) * len + 1);
+ bstrncpy(inew,old,len + 1);
+ /* escape the correct size of old */
+ dbi_conn_escape_string_copy(mdb->db, inew, &pnew);
+ free(inew);
+ /* copy the escaped string to snew */
+ bstrncpy(snew, pnew, 2 * len + 1);
+ }
+
Dmsg2(500, "dbi_conn_escape_string_copy %p %s\n",snew,snew);
-
+
}
/*
DBI_ROW row = NULL; // by default, return NULL
Dmsg0(500, "my_dbi_fetch_row start\n");
-
- if (!mdb->row || mdb->row_size < mdb->num_fields) {
+ if ((!mdb->row || mdb->row_size < mdb->num_fields) && mdb->num_rows > 0) {
int num_fields = mdb->num_fields;
Dmsg1(500, "we have need space of %d bytes\n", sizeof(char *) * mdb->num_fields);
if (mdb->row) {
Dmsg0(500, "my_dbi_fetch_row freeing space\n");
+ Dmsg2(500, "my_dbi_free_row row: '%p' num_fields: '%d'\n", mdb->row, mdb->num_fields);
+ if (mdb->num_rows != 0) {
+ for(j = 0; j < mdb->num_fields; j++) {
+ Dmsg2(500, "my_dbi_free_row row '%p' '%d'\n", mdb->row[j], j);
+ if(mdb->row[j]) {
+ free(mdb->row[j]);
+ }
+ }
+ }
free(mdb->row);
}
- num_fields += 20; /* add a bit extra */
+ //num_fields += 20; /* add a bit extra */
mdb->row = (DBI_ROW)malloc(sizeof(char *) * num_fields);
mdb->row_size = num_fields;
}
// if still within the result set
- if (mdb->row_number <= mdb->num_rows) {
- Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
+ if (mdb->row_number <= mdb->num_rows && mdb->row_number != DBI_ERROR_BADPTR) {
+ Dmsg2(500, "my_dbi_fetch_row row number '%d' is acceptable (1..%d)\n", mdb->row_number, mdb->num_rows);
// get each value from this row
for (j = 0; j < mdb->num_fields; j++) {
mdb->row[j] = my_dbi_getvalue(mdb->result, mdb->row_number, j);
- Dmsg2(500, "my_dbi_fetch_row field '%d' has value '%s'\n", j, mdb->row[j]);
+ // allocate space to queue row
+ mdb->field_get = (DBI_FIELD_GET *)malloc(sizeof(DBI_FIELD_GET));
+ // store the pointer in queue
+ mdb->field_get->value = mdb->row[j];
+ Dmsg4(500, "my_dbi_fetch_row row[%d] field: '%p' in queue: '%p' has value: '%s'\n",
+ j, mdb->row[j], mdb->field_get->value, mdb->row[j]);
+ // insert in queue to future free
+ dbi_getvalue_list->append(mdb->field_get);
}
// increment the row number for the next call
mdb->row_number++;
row = mdb->row;
} else {
- Dmsg2(500, "my_dbi_fetch_row row number '%d' is NOT acceptable (0..%d)\n", mdb->row_number, mdb->num_rows);
+ Dmsg2(500, "my_dbi_fetch_row row number '%d' is NOT acceptable (1..%d)\n", mdb->row_number, mdb->num_rows);
}
Dmsg1(500, "my_dbi_fetch_row finishes returning %p\n", row);
int max_length;
int i;
int this_length;
+ char *cbuf = NULL;
max_length = 0;
for (i = 0; i < mdb->num_rows; i++) {
if (my_dbi_getisnull(mdb->result, i, field_num)) {
this_length = 4; // "NULL"
} else {
- // TODO: error
- this_length = cstrlen(my_dbi_getvalue(mdb->result, i, field_num));
+ cbuf = my_dbi_getvalue(mdb->result, i, field_num);
+ this_length = cstrlen(cbuf);
+ // cbuf is always free
+ free(cbuf);
}
if (max_length < this_length) {
mdb->fields_size = mdb->num_fields;
for (i = 0; i < mdb->num_fields; i++) {
+ // num_fileds is starting at 1, increment i by 1
dbi_index = i + 1;
Dmsg1(500, "filling field %d\n", i);
mdb->fields[i].name = (char *)dbi_result_get_field_name(mdb->result, dbi_index);
mdb->result = NULL;
}
- //for (int i=0; i < 10; i++) {
-
- mdb->result = (void **)dbi_conn_query(mdb->db, query);
-
- // if (mdb->result) {
- // break;
- // }
- // bmicrosleep(5, 0);
- //}
- if (mdb->result == NULL) {
- Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
+ mdb->result = (void **)dbi_conn_query(mdb->db, query);
+
+ if (!mdb->result) {
+ Dmsg2(50, "Query failed: %s %p\n", query, mdb->result);
goto bail_out;
}
- //mdb->status = (dbi_error_flag)dbi_conn_error_flag(mdb->db);
- mdb->status = DBI_ERROR_NONE;
-
+ mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db, &errmsg);
+
if (mdb->status == DBI_ERROR_NONE) {
Dmsg1(500, "we have a result\n", query);
// how many fields in the set?
+ // num_fields starting at 1
mdb->num_fields = dbi_result_get_numfields(mdb->result);
Dmsg1(500, "we have %d fields\n", mdb->num_fields);
-
+ // if no result num_rows is 0
mdb->num_rows = dbi_result_get_numrows(mdb->result);
Dmsg1(500, "we have %d rows\n", mdb->num_rows);
return mdb->status;
bail_out:
- mdb->status = dbi_conn_error_flag(mdb->db);
- dbi_conn_error(mdb->db, &errmsg);
- Dmsg4(500, "my_dbi_query we failed dbi error "
+ mdb->status = (dbi_error_flag) dbi_conn_error(mdb->db,&errmsg);
+ //dbi_conn_error(mdb->db, &errmsg);
+ Dmsg4(500, "my_dbi_query we failed dbi error: "
"'%s' '%p' '%d' flag '%d''\n", errmsg, mdb->result, mdb->result, mdb->status);
dbi_result_free(mdb->result);
mdb->result = NULL;
void my_dbi_free_result(B_DB *mdb)
{
- int i;
-
+
+ DBI_FIELD_GET *f;
db_lock(mdb);
- //Dmsg2(500, "my_dbi_free_result started result '%p' '%p'\n", mdb->result, mdb->result);
- if (mdb->result != NULL) {
- i = dbi_result_free(mdb->result);
- if(i == 0) {
- mdb->result = NULL;
- //Dmsg2(500, "my_dbi_free_result result '%p' '%d'\n", mdb->result, mdb->result);
- }
-
+ if (mdb->result) {
+ Dmsg1(500, "my_dbi_free_result result '%p'\n", mdb->result);
+ dbi_result_free(mdb->result);
}
+ mdb->result = NULL;
+
if (mdb->row) {
free(mdb->row);
- mdb->row = NULL;
}
+ /* now is time to free all value return by my_dbi_get_value
+ * this is necessary because libdbi don't free memory return by yours results
+ * and Bacula has some routine wich call more than once time my_dbi_fetch_row
+ *
+ * Using a queue to store all pointer allocate is a good way to free all things
+ * when necessary
+ */
+ foreach_dlist(f, dbi_getvalue_list) {
+ Dmsg2(500, "my_dbi_free_result field value: '%p' in queue: '%p'\n", f->value, f);
+ free(f->value);
+ free(f);
+ }
+
+ mdb->row = NULL;
+
if (mdb->fields) {
free(mdb->fields);
mdb->fields = NULL;
}
db_unlock(mdb);
- //Dmsg0(500, "my_dbi_free_result finish\n");
-
+ Dmsg0(500, "my_dbi_free_result finish\n");
+
}
-const char *my_dbi_strerror(B_DB *mdb)
+const char *my_dbi_strerror(B_DB *mdb)
{
const char *errmsg;
-
+
dbi_conn_error(mdb->db, &errmsg);
-
+
return errmsg;
}
-// TODO: make batch insert work with libdbi
#ifdef HAVE_BATCH_FILE_INSERT
+/*
+ * This can be a bit strang but is the one way to do
+ *
+ * Returns 1 if OK
+ * 0 if failed
+ */
int my_dbi_batch_start(JCR *jcr, B_DB *mdb)
{
- char *query = "COPY batch FROM STDIN";
-
- Dmsg0(500, "my_postgresql_batch_start started\n");
-
- if (my_postgresql_query(mdb,
- "CREATE TEMPORARY TABLE batch ("
- "fileindex int,"
- "jobid int,"
- "path varchar,"
- "name varchar,"
- "lstat varchar,"
- "md5 varchar)") == 1)
- {
- Dmsg0(500, "my_postgresql_batch_start failed\n");
+ const char *query = "COPY batch FROM STDIN";
+
+ Dmsg0(500, "my_dbi_batch_start started\n");
+
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ db_lock(mdb);
+ if (my_dbi_query(mdb,
+ "CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob)") == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_start failed\n");
+ return 1;
+ }
+ db_unlock(mdb);
+ Dmsg0(500, "my_dbi_batch_start finishing\n");
return 1;
- }
-
- // We are starting a new query. reset everything.
- mdb->num_rows = -1;
- mdb->row_number = -1;
- mdb->field_number = -1;
+ break;
+ case SQL_TYPE_POSTGRESQL:
+
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
+ "fileindex int,"
+ "jobid int,"
+ "path varchar,"
+ "name varchar,"
+ "lstat varchar,"
+ "md5 varchar)") == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_start failed\n");
+ return 1;
+ }
- my_postgresql_free_result(mdb);
+ // We are starting a new query. reset everything.
+ mdb->num_rows = -1;
+ mdb->row_number = -1;
+ mdb->field_number = -1;
- for (int i=0; i < 10; i++) {
- mdb->result = PQexec(mdb->db, query);
- if (mdb->result) {
- break;
+ my_dbi_free_result(mdb);
+
+ for (int i=0; i < 10; i++) {
+ my_dbi_query(mdb, query);
+ if (mdb->result) {
+ break;
+ }
+ bmicrosleep(5, 0);
+ }
+ if (!mdb->result) {
+ Dmsg1(50, "Query failed: %s\n", query);
+ goto bail_out;
}
- bmicrosleep(5, 0);
- }
- if (!mdb->result) {
- Dmsg1(50, "Query failed: %s\n", query);
- goto bail_out;
- }
- mdb->status = PQresultStatus(mdb->result);
- if (mdb->status == PGRES_COPY_IN) {
- // how many fields in the set?
- mdb->num_fields = (int) PQnfields(mdb->result);
- mdb->num_rows = 0;
- mdb->status = 1;
- } else {
- Dmsg1(50, "Result status failed: %s\n", query);
- goto bail_out;
- }
+ mdb->status = (dbi_error_flag)dbi_conn_error(mdb->db, NULL);
+ //mdb->status = DBI_ERROR_NONE;
- Dmsg0(500, "my_postgresql_batch_start finishing\n");
+ if (mdb->status == DBI_ERROR_NONE) {
+ // how many fields in the set?
+ mdb->num_fields = dbi_result_get_numfields(mdb->result);
+ mdb->num_rows = dbi_result_get_numrows(mdb->result);
+ mdb->status = (dbi_error_flag) 1;
+ } else {
+ Dmsg1(50, "Result status failed: %s\n", query);
+ goto bail_out;
+ }
- return mdb->status;
+ Dmsg0(500, "my_postgresql_batch_start finishing\n");
+
+ return mdb->status;
+ break;
+ case SQL_TYPE_SQLITE:
+ db_lock(mdb);
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob)") == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_start failed\n");
+ goto bail_out;
+ }
+ db_unlock(mdb);
+ Dmsg0(500, "my_dbi_batch_start finishing\n");
+ return 1;
+ break;
+ case SQL_TYPE_SQLITE3:
+ db_lock(mdb);
+ if (my_dbi_query(mdb, "CREATE TEMPORARY TABLE batch ("
+ "FileIndex integer,"
+ "JobId integer,"
+ "Path blob,"
+ "Name blob,"
+ "LStat tinyblob,"
+ "MD5 tinyblob)") == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_start failed\n");
+ goto bail_out;
+ }
+ db_unlock(mdb);
+ Dmsg0(500, "my_dbi_batch_start finishing\n");
+ return 1;
+ break;
+ }
bail_out:
- Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), PQerrorMessage(mdb->db));
- mdb->status = 0;
- PQclear(mdb->result);
+ Mmsg1(&mdb->errmsg, _("error starting batch mode: %s"), my_dbi_strerror(mdb));
+ mdb->status = (dbi_error_flag) 0;
+ my_dbi_free_result(mdb);
mdb->result = NULL;
return mdb->status;
}
/* set error to something to abort operation */
int my_dbi_batch_end(JCR *jcr, B_DB *mdb, const char *error)
{
- int res;
- int count=30;
- Dmsg0(500, "my_postgresql_batch_end started\n");
+ int res = 0;
+ int count = 30;
+ int (*custom_function)(void*, const char*) = NULL;
+ dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
+
+ Dmsg0(500, "my_dbi_batch_end started\n");
if (!mdb) { /* no files ? */
return 0;
}
- do {
- res = PQputCopyEnd(mdb->db, error);
- } while (res == 0 && --count > 0);
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ if(mdb) {
+ mdb->status = (dbi_error_flag) 0;
+ }
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ custom_function = (custom_function_end_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQputCopyEnd");
- if (res == 1) {
- Dmsg0(500, "ok\n");
- mdb->status = 1;
- }
-
- if (res <= 0) {
- Dmsg0(500, "we failed\n");
- mdb->status = 0;
- Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+
+ do {
+ res = (*custom_function)(myconn->connection, error);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->status = (dbi_error_flag) 1;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "we failed\n");
+ mdb->status = (dbi_error_flag) 0;
+ //Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+ }
+ break;
+ case SQL_TYPE_SQLITE:
+ if(mdb) {
+ mdb->status = (dbi_error_flag) 0;
+ }
+ break;
+ case SQL_TYPE_SQLITE3:
+ if(mdb) {
+ mdb->status = (dbi_error_flag) 0;
+ }
+ break;
}
-
- Dmsg0(500, "my_postgresql_batch_end finishing\n");
- return mdb->status;
+ Dmsg0(500, "my_dbi_batch_end finishing\n");
+
+ return true;
}
+/*
+ * This function is big and use a big switch.
+ * In near future is better split in small functions
+ * and refactory.
+ *
+ */
int my_dbi_batch_insert(JCR *jcr, B_DB *mdb, ATTR_DBR *ar)
{
int res;
int count=30;
+ dbi_conn_t *myconn = (dbi_conn_t *)(mdb->db);
+ int (*custom_function)(void*, const char*, int) = NULL;
+ char* (*custom_function_error)(void*) = NULL;
size_t len;
char *digest;
char ed1[50];
- mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
- my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+ Dmsg0(500, "my_dbi_batch_insert started \n");
+ mdb->esc_name = check_pool_memory_size(mdb->esc_name, mdb->fnl*2+1);
mdb->esc_path = check_pool_memory_size(mdb->esc_path, mdb->pnl*2+1);
- my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
if (ar->Digest == NULL || ar->Digest[0] == 0) {
- digest = "0";
+ *digest = '\0';
} else {
digest = ar->Digest;
}
- len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
- ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
- mdb->esc_name, ar->attr, digest);
+ switch (mdb->db_type) {
+ case SQL_TYPE_MYSQL:
+ db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+ db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+
+ if (my_dbi_query(mdb,mdb->cmd) == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_insert failed\n");
+ goto bail_out;
+ }
- do {
- res = PQputCopyData(mdb->db,
- mdb->cmd,
- len);
- } while (res == 0 && --count > 0);
+ Dmsg0(500, "my_dbi_batch_insert finishing\n");
- if (res == 1) {
- Dmsg0(500, "ok\n");
- mdb->changes++;
- mdb->status = 1;
- }
+ return 1;
+ break;
+ case SQL_TYPE_POSTGRESQL:
+ my_postgresql_copy_escape(mdb->esc_name, mdb->fname, mdb->fnl);
+ my_postgresql_copy_escape(mdb->esc_path, mdb->path, mdb->pnl);
+ len = Mmsg(mdb->cmd, "%u\t%s\t%s\t%s\t%s\t%s\n",
+ ar->FileIndex, edit_int64(ar->JobId, ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+
+ /* libdbi don't support CopyData and we need call a postgresql
+ * specific function to do this work
+ */
+ Dmsg2(500, "my_dbi_batch_insert :\n %s \ncmd_size: %d",mdb->cmd, len);
+ if ((custom_function = (custom_function_insert_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db),
+ "PQputCopyData")) != NULL) {
+ do {
+ res = (*custom_function)(myconn->connection, mdb->cmd, len);
+ } while (res == 0 && --count > 0);
+
+ if (res == 1) {
+ Dmsg0(500, "ok\n");
+ mdb->changes++;
+ mdb->status = (dbi_error_flag) 1;
+ }
+
+ if (res <= 0) {
+ Dmsg0(500, "my_dbi_batch_insert failed\n");
+ goto bail_out;
+ }
+
+ Dmsg0(500, "my_dbi_batch_insert finishing\n");
+ return mdb->status;
+ } else {
+ // ensure to detect a PQerror
+ custom_function_error = (custom_function_error_t)dbi_driver_specific_function(dbi_conn_get_driver(mdb->db), "PQerrorMessage");
+ Dmsg1(500, "my_dbi_batch_insert failed\n PQerrorMessage: %s", (*custom_function_error)(myconn->connection));
+ goto bail_out;
+ }
+ break;
+ case SQL_TYPE_SQLITE:
+ db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+ db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+ if (my_dbi_query(mdb,mdb->cmd) == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_insert failed\n");
+ goto bail_out;
+ }
+
+ Dmsg0(500, "my_dbi_batch_insert finishing\n");
+
+ return 1;
+ break;
+ case SQL_TYPE_SQLITE3:
+ db_escape_string(jcr, mdb, mdb->esc_name, mdb->fname, mdb->fnl);
+ db_escape_string(jcr, mdb, mdb->esc_path, mdb->path, mdb->pnl);
+ len = Mmsg(mdb->cmd, "INSERT INTO batch VALUES (%u,%s,'%s','%s','%s','%s')",
+ ar->FileIndex, edit_int64(ar->JobId,ed1), mdb->esc_path,
+ mdb->esc_name, ar->attr, digest);
+ if (my_dbi_query(mdb,mdb->cmd) == 1)
+ {
+ Dmsg0(500, "my_dbi_batch_insert failed\n");
+ goto bail_out;
+ }
- if (res <= 0) {
- Dmsg0(500, "we failed\n");
- mdb->status = 0;
- Mmsg1(&mdb->errmsg, _("error ending batch mode: %s"), PQerrorMessage(mdb->db));
+ Dmsg0(500, "my_dbi_batch_insert finishing\n");
+
+ return 1;
+ break;
}
- Dmsg0(500, "my_postgresql_batch_insert finishing\n");
+bail_out:
+ Mmsg1(&mdb->errmsg, _("error inserting batch mode: %s"), my_dbi_strerror(mdb));
+ mdb->status = (dbi_error_flag) 0;
+ my_dbi_free_result(mdb);
+ return mdb->status;
+}
- return mdb->status;
+/*
+ * Escape strings so that PostgreSQL is happy on COPY
+ *
+ * NOTE! len is the length of the old string. Your new
+ * string must be long enough (max 2*old+1) to hold
+ * the escaped output.
+ */
+char *my_postgresql_copy_escape(char *dest, char *src, size_t len)
+{
+ /* we have to escape \t, \n, \r, \ */
+ char c = '\0' ;
+
+ while (len > 0 && *src) {
+ switch (*src) {
+ case '\n':
+ c = 'n';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case '\t':
+ c = 't';
+ break;
+ case '\r':
+ c = 'r';
+ break;
+ default:
+ c = '\0' ;
+ }
+
+ if (c) {
+ *dest = '\\';
+ dest++;
+ *dest = c;
+ } else {
+ *dest = *src;
+ }
+
+ len--;
+ src++;
+ dest++;
+ }
+
+ *dest = '\0';
+ return dest;
}
#endif /* HAVE_BATCH_FILE_INSERT */
* int PQgetisnull(const PGresult *res,
* int row_number,
* int column_number);
- *
+ *
* use dbi_result_seek_row to search in result set
*/
int my_dbi_getisnull(dbi_result *result, int row_number, int column_number) {
if(row_number == 0) {
row_number++;
}
-
+
column_number++;
-
+
if(dbi_result_seek_row(result, row_number)) {
i = dbi_result_field_is_null_idx(result,column_number);
return i;
} else {
-
+
return 0;
}
-
+
}
-/* my_dbi_getvalue
+/* my_dbi_getvalue
* like PQgetvalue;
* char *PQgetvalue(const PGresult *res,
* int row_number,
*/
char *my_dbi_getvalue(dbi_result *result, int row_number, unsigned int column_number) {
- /* TODO: This is very bad, need refactoring */
- POOLMEM *buf = get_pool_memory(PM_FNAME);
- //const unsigned char *bufb = (unsigned char *)malloc(sizeof(unsigned char) * 300);
- //const unsigned char *bufb;
+ char *buf = NULL;
const char *errmsg;
- const char *field_name;
+ const char *field_name;
unsigned short dbitype;
- int32_t field_length = 0;
+ size_t field_length;
int64_t num;
-
+
/* correct the index for dbi interface
* dbi index begins 1
* I prefer do not change others functions
*/
- Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
+ Dmsg3(600, "my_dbi_getvalue pre-starting result '%p' row number '%d' column number '%d'\n",
result, row_number, column_number);
-
+
column_number++;
if(row_number == 0) {
row_number++;
}
-
- Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
+
+ Dmsg3(600, "my_dbi_getvalue starting result '%p' row number '%d' column number '%d'\n",
result, row_number, column_number);
-
+
if(dbi_result_seek_row(result, row_number)) {
field_name = dbi_result_get_field_name(result, column_number);
field_length = dbi_result_get_field_length(result, field_name);
dbitype = dbi_result_get_field_type_idx(result,column_number);
-
+
+ Dmsg3(500, "my_dbi_getvalue start: type: '%d' "
+ "field_length bytes: '%d' fieldname: '%s'\n",
+ dbitype, field_length, field_name);
+
if(field_length) {
- buf = check_pool_memory_size(buf, field_length + 1);
+ //buf = (char *)malloc(sizeof(char *) * field_length + 1);
+ buf = (char *)malloc(field_length + 1);
} else {
- buf = check_pool_memory_size(buf, 50);
+ /* if numbers */
+ buf = (char *)malloc(sizeof(char *) * 50);
}
-
- Dmsg5(500, "my_dbi_getvalue result '%p' type '%d' \n field name '%s' "
- "field_length '%d' field_length size '%d'\n",
- result, dbitype, field_name, field_length, sizeof_pool_memory(buf));
-
+
switch (dbitype) {
case DBI_TYPE_INTEGER:
- num = dbi_result_get_longlong(result, field_name);
+ num = dbi_result_get_longlong(result, field_name);
edit_int64(num, buf);
field_length = strlen(buf);
break;
case DBI_TYPE_STRING:
if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
- dbi_result_get_string(result, field_name));
+ field_length = bsnprintf(buf, field_length + 1, "%s",
+ dbi_result_get_string(result, field_name));
} else {
buf[0] = 0;
}
* following, change this to what Bacula espected
*/
if(field_length) {
- field_length = bsnprintf(buf, field_length + 1, "%s",
+ field_length = bsnprintf(buf, field_length + 1, "%s",
dbi_result_get_binary(result, field_name));
} else {
buf[0] = 0;
case DBI_TYPE_DATETIME:
time_t last;
struct tm tm;
-
+
last = dbi_result_get_datetime(result, field_name);
-
+
if(last == -1) {
- field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
+ field_length = bsnprintf(buf, 20, "0000-00-00 00:00:00");
} else {
(void)localtime_r(&last, &tm);
field_length = bsnprintf(buf, 20, "%04d-%02d-%02d %02d:%02d:%02d",
dbi_conn_error(dbi_result_get_conn(result), &errmsg);
Dmsg1(500, "my_dbi_getvalue error: %s\n", errmsg);
}
-
- Dmsg3(500, "my_dbi_getvalue finish result '%p' num bytes '%d' data '%s'\n",
- result, field_length, buf);
+
+ Dmsg3(500, "my_dbi_getvalue finish buffer: '%p' num bytes: '%d' data: '%s'\n",
+ buf, field_length, buf);
+
+ // don't worry about this buf
return buf;
}
-int my_dbi_sql_insert_id(B_DB *mdb, char *table_name)
+static int my_dbi_sequence_last(B_DB *mdb, const char *table_name)
{
/*
Obtain the current value of the sequence that
everything else can use the PostgreSQL formula.
*/
-
- char sequence[30];
- uint64_t id = 0;
+
+ char sequence[30];
+ uint64_t id = 0;
if (mdb->db_type == SQL_TYPE_POSTGRESQL) {
-
+
if (strcasecmp(table_name, "basefiles") == 0) {
bstrncpy(sequence, "basefiles_baseid", sizeof(sequence));
} else {
} else {
id = dbi_conn_sequence_last(mdb->db, NT_(table_name));
}
-
+
return id;
}
-#ifdef HAVE_BATCH_FILE_INSERT
-const char *my_dbi_batch_lock_path_query =
- "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE";
+int my_dbi_insert_autokey_record(B_DB *mdb, const char *query, const char *table_name)
+{
+ /*
+ * First execute the insert query and then retrieve the currval.
+ */
+ if (my_dbi_query(mdb, query)) {
+ return 0;
+ }
+ mdb->num_rows = sql_affected_rows(mdb);
+ if (mdb->num_rows != 1) {
+ return 0;
+ }
-const char *my_dbi_batch_lock_filename_query =
- "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE";
+ mdb->changes++;
-const char *my_dbi_batch_unlock_tables_query = "COMMIT";
+ return my_dbi_sequence_last(mdb, table_name);
+}
-const char *my_dbi_batch_fill_path_query =
+#ifdef HAVE_BATCH_FILE_INSERT
+const char *my_dbi_batch_lock_path_query[5] = {
+ /* Mysql */
+ "LOCK TABLES Path write, batch write, Path as p write",
+ /* Postgresql */
+ "BEGIN; LOCK TABLE Path IN SHARE ROW EXCLUSIVE MODE",
+ /* SQLite */
+ "BEGIN",
+ /* SQLite3 */
+ "BEGIN",
+ /* Ingres */
+ "BEGIN"
+};
+
+const char *my_dbi_batch_lock_filename_query[5] = {
+ /* Mysql */
+ "LOCK TABLES Filename write, batch write, Filename as f write",
+ /* Postgresql */
+ "BEGIN; LOCK TABLE Filename IN SHARE ROW EXCLUSIVE MODE",
+ /* SQLite */
+ "BEGIN",
+ /* SQLite3 */
+ "BEGIN",
+ /* Ingres */
+ "BEGIN"
+};
+
+const char *my_dbi_batch_unlock_tables_query[5] = {
+ /* Mysql */
+ "UNLOCK TABLES",
+ /* Postgresql */
+ "COMMIT",
+ /* SQLite */
+ "COMMIT",
+ /* SQLite3 */
+ "COMMIT",
+ /* Ingres */
+ "COMMIT"
+};
+
+const char *my_dbi_batch_fill_path_query[5] = {
+ /* Mysql */
"INSERT INTO Path (Path) "
- "SELECT a.Path FROM "
- "(SELECT DISTINCT Path FROM batch) AS a "
- "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ";
-
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a WHERE NOT EXISTS "
+ "(SELECT Path FROM Path AS p WHERE p.Path = a.Path)",
+ /* Postgresql */
+ "INSERT INTO Path (Path) "
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a "
+ "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) ",
+ /* SQLite */
+ "INSERT INTO Path (Path)"
+ " SELECT DISTINCT Path FROM batch"
+ " EXCEPT SELECT Path FROM Path",
+ /* SQLite3 */
+ "INSERT INTO Path (Path)"
+ " SELECT DISTINCT Path FROM batch"
+ " EXCEPT SELECT Path FROM Path",
+ /* Ingres */
+ "INSERT INTO Path (Path) "
+ "SELECT a.Path FROM "
+ "(SELECT DISTINCT Path FROM batch) AS a "
+ "WHERE NOT EXISTS (SELECT Path FROM Path WHERE Path = a.Path) "
+};
-const char *my_dbi_batch_fill_filename_query =
+const char *my_dbi_batch_fill_filename_query[5] = {
+ /* Mysql */
+ "INSERT INTO Filename (Name) "
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) AS a WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename AS f WHERE f.Name = a.Name)",
+ /* Postgresql */
"INSERT INTO Filename (Name) "
- "SELECT a.Name FROM "
- "(SELECT DISTINCT Name FROM batch) as a "
- "WHERE NOT EXISTS "
- "(SELECT Name FROM Filename WHERE Name = a.Name)";
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) as a "
+ "WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename WHERE Name = a.Name)",
+ /* SQLite */
+ "INSERT INTO Filename (Name)"
+ " SELECT DISTINCT Name FROM batch "
+ " EXCEPT SELECT Name FROM Filename",
+ /* SQLite3 */
+ "INSERT INTO Filename (Name)"
+ " SELECT DISTINCT Name FROM batch "
+ " EXCEPT SELECT Name FROM Filename",
+ /* Ingres */
+ "INSERT INTO Filename (Name) "
+ "SELECT a.Name FROM "
+ "(SELECT DISTINCT Name FROM batch) as a "
+ "WHERE NOT EXISTS "
+ "(SELECT Name FROM Filename WHERE Name = a.Name)"
+};
+
#endif /* HAVE_BATCH_FILE_INSERT */
+const char *my_dbi_match[5] = {
+ /* Mysql */
+ "MATCH",
+ /* Postgresql */
+ "~",
+ /* SQLite */
+ "MATCH",
+ /* SQLite3 */
+ "MATCH",
+ /* Ingres */
+ "~"
+};
+
#endif /* HAVE_DBI */