jcr = j;
jcr->inc_use_count();
db = mdb; /* need to inc ref count */
+ prev_dir = get_pool_memory(PM_NAME);
jobids = get_pool_memory(PM_NAME);
pattern = get_pool_memory(PM_NAME);
- *pattern = *jobids = 0;
+ *prev_dir = *pattern = *jobids = 0;
dir_filenameid = pwd_id = offset = 0;
see_copies = see_all_version = false;
limit = 1000;
Bvfs::~Bvfs() {
free_pool_memory(jobids);
free_pool_memory(pattern);
+ free_pool_memory(prev_dir);
free_attr(attr);
jcr->dec_use_count();
}
while (p > path && !IsPathSeparator(*p)) {
p--;
}
- p = p+1; /* skip first / */
+ if (*p == '/') {
+ p++; /* skip first / */
+ }
}
return p;
}
{
pm_strcpy(db->path, path);
db->pnl = strlen(db->path);
- pwd_id = db_get_path_record(jcr, db);
+ ch_dir(db_get_path_record(jcr, db));
return pwd_id != 0;
}
POOL_MEM query;
- Mmsg(query,
-"SELECT File.JobId, File.FileId, File.LStat, "
- "File.Md5, Media.VolumeName, Media.InChanger "
+ Mmsg(query,//0 1 2 3
+"SELECT File.FileId, File.Md5, File.JobId, File.LStat, "
+// 4 5
+ "Media.VolumeName, Media.InChanger "
"FROM File, Job, Client, JobMedia, Media "
"WHERE File.FilenameId = %s "
"AND File.PathId=%s "
"%s ORDER BY FileId LIMIT %d OFFSET %d"
,edit_uint64(fnid, ed1), edit_uint64(pathid, ed2), client, q.c_str(),
limit, offset);
-
+ Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
db_sql_query(db, query.c_str(), list_entries, user_data);
}
return db_get_path_record(jcr, db);
}
+static int path_handler(void *ctx, int fields, char **row)
+{
+ Bvfs *fs = (Bvfs *) ctx;
+ return fs->_handle_path(ctx, fields, row);
+}
+
+int Bvfs::_handle_path(void *ctx, int fields, char **row)
+{
+ if (fields == BVFS_DIR_RECORD) {
+ /* can have the same path 2 times */
+ if (strcmp(row[BVFS_Name], prev_dir)) {
+ pm_strcpy(prev_dir, row[BVFS_Name]);
+ return list_entries(user_data, fields, row);
+ }
+ }
+ return 0;
+}
+
/*
* Retrieve . and .. information
*/
get_dir_filenameid();
}
+ /* Will fetch directories */
+ *prev_dir = 0;
+
POOL_MEM query;
Mmsg(query,
"((SELECT PPathId AS PathId, '..' AS Path "
"ORDER BY tmp.Path, JobId DESC ",
query.c_str(), edit_uint64(dir_filenameid, ed2), jobids);
- Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
- db_sql_query(db, query2.c_str(), list_entries, user_data);
+ Dmsg1(dbglevel_sql, "q=%s\n", query2.c_str());
+ db_sql_query(db, query2.c_str(), path_handler, this);
}
-void Bvfs::ls_dirs()
+/* Returns true if we have dirs to read */
+bool Bvfs::ls_dirs()
{
Dmsg1(dbglevel, "ls_dirs(%lld)\n", (uint64_t)pwd_id);
char ed1[50], ed2[50];
if (!*jobids) {
- return;
+ return false;
}
POOL_MEM filter;
get_dir_filenameid();
}
+ /* the sql query displays same directory multiple time, take the first one */
+ *prev_dir = 0;
+
/* Let's retrieve the list of the visible dirs in this dir ...
* First, I need the empty filenameid to locate efficiently
* the dirs in the file table
limit, offset);
Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
- db_sql_query(db, query.c_str(), list_entries, user_data);
+
+ db_lock(db);
+ db_sql_query(db, query.c_str(), path_handler, this);
+ nb_record = db->num_rows;
+ db_unlock(db);
+
+ return nb_record == limit;
}
-void Bvfs::ls_files()
+/* Returns true if we have files to read */
+bool Bvfs::ls_files()
{
Dmsg1(dbglevel, "ls_files(%lld)\n", (uint64_t)pwd_id);
char ed1[50];
if (!*jobids) {
- return ;
+ return false;
}
if (!pwd_id) {
limit,
offset);
Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
+
+ db_lock(db);
db_sql_query(db, query.c_str(), list_entries, user_data);
+ nb_record = db->num_rows;
+ db_unlock(db);
+
+ return nb_record == limit;
}
BVFS_JobId = 2,
BVFS_LStat = 3,
- BVFS_FileId = 4, /* Only if File record */
+ /* Only if File record */
+ BVFS_FileId = 4,
+
+ /* Only if File Version record */
+ BVFS_Md5 = 1,
+ BVFS_VolName = 4,
+ BVFS_VolInchanger = 5
} bvfs_row_index;
class Bvfs {
* avoids mistakes with string encoding
*/
void ch_dir(DBId_t pathid) {
+ reset_offset();
pwd_id = pathid;
}
*/
bool ch_dir(char *path);
- void ls_files();
- void ls_dirs();
+ bool ls_files(); /* Returns true if we have more files to read */
+ bool ls_dirs(); /* Returns true if we have more dir to read */
void ls_special_dirs(); /* get . and .. */
void get_all_file_versions(DBId_t pathid, DBId_t fnid, char *client);
user_data = ctx;
}
+ DBId_t get_pwd() {
+ return pwd_id;
+ }
+
ATTR *get_attr() {
return attr;
}
return jcr;
}
+ void reset_offset() {
+ offset=0;
+ }
+
+ void next_offset() {
+ offset+=limit;
+ }
+
+ /* for internal use */
+ int _handle_path(void *, int, char **);
+
private:
JCR *jcr;
B_DB *db;
POOLMEM *jobids;
uint32_t limit;
uint32_t offset;
+ uint32_t nb_record; /* number of records of the last query */
POOLMEM *pattern;
- DBId_t pwd_id;
- DBId_t dir_filenameid;
- ATTR *attr;
+ DBId_t pwd_id; /* Current pathid */
+ DBId_t dir_filenameid; /* special FilenameId where Name='' */
+ POOLMEM *prev_dir; /* ls_dirs query returns all versions, take the 1st one */
+ ATTR *attr; /* Can be use by handler to call decode_stat() */
bool see_all_version;
bool see_copies;
DBId_t get_dir_filenameid();
-
+
DB_RESULT_HANDLER *list_entries;
void *user_data;
};
/* Local variables */
static B_DB *db;
-
+static char *file="COPYRIGHT";
+static DBId_t fnid=0;
static const char *db_name = "regress";
static const char *db_user = "regress";
static const char *db_password = "";
" -w <working> specify working directory\n"
" -j <jobids> specify jobids\n"
" -p <path> specify path\n"
-//" -f <file> specify file\n"
+" -f <file> specify file\n"
+" -l <limit> maximum tuple to fetch\n"
" -T truncate cache table before starting\n"
" -v verbose\n"
" -? print this message\n\n"), 2001, VERSION, BDATE);
{
Bvfs *vfs = (Bvfs *)ctx;
ATTR *attr = vfs->get_attr();
- char *empty = "";
+ char *empty = "A A A A A A A A A A A A A A";
+
+ memset(&attr->statp, 0, sizeof(struct stat));
+ decode_stat((row[BVFS_LStat] && row[BVFS_LStat][0])?row[BVFS_LStat]:empty,
+ &attr->statp, &attr->LinkFI);
if (fields == BVFS_DIR_RECORD || fields == BVFS_FILE_RECORD) {
- decode_stat((row[BVFS_LStat])?row[BVFS_LStat]:empty,
- &attr->statp, &attr->LinkFI);
+ /* display clean stuffs */
+
if (fields == BVFS_DIR_RECORD) {
pm_strcpy(attr->ofname, bvfs_basename_dir(row[BVFS_Name]));
} else {
+ /* if we see the requested file, note his filenameid */
+ if (bstrcmp(row[BVFS_Name], file)) {
+ fnid = str_to_int64(row[BVFS_Id]);
+ }
pm_strcpy(attr->ofname, row[BVFS_Name]);
}
print_ls_output(vfs->get_jcr(), attr);
} else {
- Pmsg6(0, "%s\t%s\t%s\t%s\t%s\t%s",
- row[0], row[1], row[2], row[3], row[4], row[5]);
+ Pmsg5(0, "JobId=%s FileId=%s\tMd5=%s\tVolName=%s\tVolInChanger=%s\n",
+ row[BVFS_JobId], row[BVFS_Id], row[BVFS_Md5], row[BVFS_VolName],
+ row[BVFS_VolInchanger]);
+
+ pm_strcpy(attr->ofname, file);
+ print_ls_output(vfs->get_jcr(), attr);
}
return 0;
}
int main (int argc, char *argv[])
{
int ch;
- char *jobids="1", *path=NULL, *file=NULL;
+ char *jobids="1", *path=NULL, *client=NULL;
+ uint64_t limit=0;
bool clean=false;
setlocale(LC_ALL, "");
bindtextdomain("bacula", LOCALEDIR);
OSDependentInit();
- while ((ch = getopt(argc, argv, "h:c:d:n:P:Su:vf:w:?j:p:f:T")) != -1) {
+ while ((ch = getopt(argc, argv, "h:c:l:d:n:P:Su:vf:w:?j:p:f:T")) != -1) {
switch (ch) {
case 'd': /* debug level */
if (*optarg == 't') {
}
}
break;
+ case 'l':
+ limit = str_to_int64(optarg);
+ break;
+
+ case 'c':
+ client = optarg;
+ break;
case 'h':
db_host = optarg;
break;
case 'f':
- path = optarg;
+ file = optarg;
break;
case 'j':
fs.set_jobids(jobids);
fs.update_cache();
+ if (limit)
+ fs.set_limit(limit);
if (path) {
fs.ch_dir(path);
fs.ls_special_dirs();
fs.ls_dirs();
- fs.ls_files();
+ while (fs.ls_files()) {
+ fs.next_offset();
+ }
+
+ if (fnid && client) {
+ Pmsg0(0, "---------------------------------------------\n");
+ Pmsg1(0, "Getting file version for %s\n", file);
+ fs.get_all_file_versions(fs.get_pwd(), fnid, client);
+ }
+
exit (0);
}
General:
07Aug09
-ebl Create cache tables on the fly when using Bvfs object (for testing)
+ebl bvfs: Add example to list files versions
+ebl bvfs: Fix directory listing
+ebl bvfs: Add limit/offset implementation to save resources on director
+ebl bvfs: Create cache tables on the fly when using Bvfs object (for testing)
06Aug09
ebl Document FT_DELETED FileIndex=0 special value in database Schema
ebl Add a new Bvfs class that implements brestore instant navigation
--- /dev/null
+#!/bin/sh
+#
+# Run a simple backup of the Bacula build directory then create some
+# new files, do an Incremental and restore those two files.
+# test the bvfs interface
+#
+TestName="bvfs-test"
+JobName=Incremental
+. scripts/functions
+
+${rscripts}/cleanup
+${rscripts}/copy-test-confs
+echo "${tmpsrc}" >${tmp}/file-list
+echo "${cwd}/build" >> ${tmp}/file-list
+
+mkdir -p ${tmpsrc}
+cp -p ${src}/src/dird/*.c ${tmpsrc}
+cd ${tmp}
+echo "${tmpsrc}/ficheriro1.txt" >restore-list
+echo "${tmpsrc}/ficheriro2.txt" >>restore-list
+cd ${cwd}
+
+change_jobname CompressedTest $JobName
+start_test
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+label storage=File volume=TestVolume001
+label storage=File volume=TestVolume002
+run job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bacula
+check_for_zombie_jobs storage=File
+#
+# Now create two new files to be restored later
+#
+sleep 1
+echo "ficheriro1.txt" >${tmpsrc}/ficheriro1.txt
+cp -f ${tmpsrc}/dird.c ${tmpsrc}/ficheriro2.txt
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+@# Force Incremental on the second Volume
+update volume=TestVolume001 VolStatus=Used
+run level=Differential job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+sleep 1
+touch ${tmpsrc}/ficheriro1.txt
+touch ${tmpsrc}/ficheriro2.txt
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Incremental job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+sleep 1
+cd ${tmpsrc}
+cp -f ficheriro2.txt 1
+sed "s%a%b%g" 1 >ficheriro2.txt
+rm -f 1
+cd ${cwd}
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Differential job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+sleep 1
+touch ${tmpsrc}/ficheriro1.txt
+touch ${tmpsrc}/ficheriro2.txt
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Incremental job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+sleep 1
+touch ${tmpsrc}/ficheriro1.txt
+touch ${tmpsrc}/ficheriro2.txt
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Incremental job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+sleep 1
+touch ${tmpsrc}/ficheriro1.txt
+touch ${tmpsrc}/ficheriro2.txt
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Incremental job=$JobName yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+sleep 1
+touch ${tmpsrc}/ficheriro1.txt
+touch ${tmpsrc}/ficheriro2.txt
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@output /dev/null
+messages
+@$out ${tmp}/log1.out
+run level=Incremental job=$JobName yes
+wait
+messages
+@#
+@# now do a restore
+@#
+@$out ${tmp}/log2.out
+setdebug level=10 storage=File
+restore where=${tmp}/bacula-restores storage=File file=<${tmp}/restore-list
+yes
+wait
+status storage=File
+messages
+@output
+quit
+END_OF_DATA
+
+run_bconsole
+check_for_zombie_jobs storage=File
+stop_bacula
+
+check_two_logs
+#
+# Delete .c files because we will only restored the txt files
+#
+rm -f ${tmpsrc}/*.c
+check_restore_tmp_build_diff
+
+
+${cwd}/build/src/tools/bvfs_test -T -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
+ -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+
+${cwd}/build/src/tools/bvfs_test -p ${cwd}/build/ -j 1,2,3,4,5,6,7,8 \
+ -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+
+${cwd}/build/src/tools/bvfs_test -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
+ -w "$working" -n "$db_name" -u "$db_user" -P "$db_password" \
+ -f ficheriro1.txt -c ${HOST}-fd
+
+end_test