-/*
- * Bacula hash table routines
- *
- * htable is a hash table of items (pointers). This code is
- * adapted and enhanced from code I wrote in 1982 for a
- * relocatable linker. At that time, the hash table size
- * was fixed and a primary number, which essentially provides
- * the randomness. In this program, the hash table can grow when
- * it gets too full, so the table size here is a binary number. The
- * hashing is provided using an idea from Tcl where the initial
- * hash code is "randomized" using a simple calculation from
- * a random number generator that multiplies by a big number
- * (I multiply by a prime number, while Tcl did not)
- * then shifts the result down and does the binary division
- * by masking. Increasing the size of the hash table is simple.
- * Just create a new larger table, walk the old table and
- * re-hash insert each entry into the new table.
- *
- *
- * Kern Sibbald, July MMIII
- *
- * Version $Id$
- *
- */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2003-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2003-2008 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
*/
+/*
+ * Bacula hash table routines
+ *
+ * htable is a hash table of items (pointers). This code is
+ * adapted and enhanced from code I wrote in 1982 for a
+ * relocatable linker. At that time, the hash table size
+ * was fixed and a primary number, which essentially provides
+ * the randomness. In this program, the hash table can grow when
+ * it gets too full, so the table size here is a binary number. The
+ * hashing is provided using an idea from Tcl where the initial
+ * hash code is "randomized" using a simple calculation from
+ * a random number generator that multiplies by a big number
+ * (I multiply by a prime number, while Tcl did not)
+ * then shifts the result down and does the binary division
+ * by masking. Increasing the size of the hash table is simple.
+ * Just create a new larger table, walk the old table and
+ * re-hash insert each entry into the new table.
+ *
+ *
+ * Kern Sibbald, July MMIII
+ *
+ * Version $Id$
+ *
+ */
#include "bacula.h"
* htable
*/
+#ifdef BIG_MALLOC
+/*
+ * This subroutine gets a big buffer.
+ */
+void htable::malloc_buf(int size)
+{
+ struct h_mem *hmem;
+
+ hmem = (struct h_mem *)malloc(size);
+ total_size += size;
+ blocks++;
+ hmem->next = this->mem;
+ this->mem = hmem;
+ hmem->mem = mem->first;
+ hmem->rem = (char *)hmem + size - hmem->mem;
+ Dmsg2(200, "malloc buf size=%d rem=%d\n", size, hmem->rem);
+}
+
+char *htable::hash_alloc(int size)
+{
+ char *buf;
+ int asize = BALIGN(size);
+
+ if (mem->rem < asize) {
+ uint32_t mb_size;
+ if (total_size >= 1000000) {
+ mb_size = 1000000;
+ } else {
+ mb_size = 100000;
+ }
+ malloc_buf(mb_size);
+ }
+ mem->rem -= asize;
+ buf = mem->mem;
+ mem->mem += asize;
+ return buf;
+}
+
+
+/* This routine frees the whole tree */
+void htable::hash_free()
+{
+ struct h_mem *hmem, *rel;
+
+ for (hmem=mem; hmem; ) {
+ rel = hmem;
+ hmem = hmem->next;
+ free(rel);
+ }
+}
+#endif
+
+
/*
* Create hash of key, stored in hash then
* create and return the pseudo random bucket index
memset(table, 0, buckets * sizeof(hlink *));
walkptr = NULL;
walk_index = 0;
+#ifdef BIG_MALLOC
+ mem = NULL;
+ malloc_buf(1000000); /* ***FIXME*** base off of size */
+#endif
}
uint32_t htable::size()
hlink link;
};
-#define NITEMS 10000
+#define NITEMS 1000000
int main()
{
Dmsg1(000, "Inserting %d items\n", NITEMS);
for (int i=0; i<NITEMS; i++) {
- sprintf(mkey, "This is htable item %d", i);
+ int len;
+ len = sprintf(mkey, "This is htable item %d", i) + 1;
+
+#ifdef BIG_MALLOC
+ jcr = (MYJCR *)jcrtbl->hash_alloc(sizeof(MYJCR));
+ jcr->key = (char *)jcrtbl->hash_alloc(len);
+#else
jcr = (MYJCR *)malloc(sizeof(MYJCR));
+ jcr->key = (char *)malloc(len);
+#endif
+ memcpy(jcr->key, mkey, len);
Dmsg2(100, "link=0x%x jcr=0x%x\n", (unsigned)&jcr->link, (unsigned)jcr);
- jcr->key = bstrdup(mkey);
jcrtbl->insert(jcr->key, jcr);
if (i == 10) {
printf("Walk the hash table:\n");
foreach_htable (jcr, jcrtbl) {
// printf("htable item = %s\n", jcr->key);
+#ifndef BIG_MALLOC
free(jcr->key);
+#endif
count++;
}
printf("Got %d items -- %s\n", count, count==NITEMS?"OK":"***ERROR***");
-/*
- *
- * Written by Kern Sibbald, MMIV
- *
- * Version $Id$
- */
/*
Bacula® - The Network Backup Solution
- Copyright (C) 2004-2006 Free Software Foundation Europe e.V.
+ Copyright (C) 2004-2008 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
(FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
Switzerland, email:ftf@fsfeurope.org.
*/
+/*
+ *
+ * Written by Kern Sibbald, MMIV
+ *
+ * Version $Id$
+ */
/* ========================================================================
*
*
*/
+/*
+ * BIG_MALLOC is to provide a large malloc service to htable
+ * not yet implemented, and not yet working.
+ */
+//#define BIG_MALLOC
+
/*
* Loop var through each member of table
*/
uint32_t hash; /* hash for this key */
};
+struct h_mem {
+ struct h_mem *next; /* next buffer */
+ int rem; /* remaining bytes */
+ char *mem; /* memory pointer */
+ char first[1]; /* first byte */
+};
+
class htable : public SMARTALLOC {
hlink **table; /* hash table */
int loffset; /* link offset in item */
uint32_t rshift; /* amount to shift down */
hlink *walkptr; /* table walk pointer */
uint32_t walk_index; /* table walk index */
+ uint32_t total_size; /* total bytes malloced */
+ uint32_t blocks; /* blocks malloced */
+#ifdef BIG_MALLOC
+ struct h_mem *mem; /* malloced memory blocks */
+ void malloc_buf(int size); /* Get a bit buffer */
+#endif
void hash_index(char *key); /* produce hash key,index */
void grow_table(); /* grow the table */
public:
void destroy();
void stats(); /* print stats about the table */
uint32_t size(); /* return size of table */
+#ifdef BIG_MALLOC
+ char *hash_alloc(int size); /* malloc bytes for a hash entry */
+ void hash_free(); /* free all hash allocated bytes */
+#endif
};
Switzerland, email:ftf@fsfeurope.org.
*/
/*
- * A simple pipe plugin for Bacula
+ * A simple pipe plugin for the Bacula File Daemon
*
* Kern Sibbald, October 2007
*
extern "C" {
#endif
-#define PLUGIN_LICENSE "GPL"
+#define PLUGIN_LICENSE "GPLv2"
#define PLUGIN_AUTHOR "Kern Sibbald"
#define PLUGIN_DATE "January 2008"
#define PLUGIN_VERSION "1"
-#define PLUGIN_DESCRIPTION "Test File Daemon Plugin"
+#define PLUGIN_DESCRIPTION "Pipe File Daemon Plugin"
/* Forward referenced functions */
static bRC newPlugin(bpContext *ctx);
static bFuncs *bfuncs = NULL;
static bInfo *binfo = NULL;
+/* Plugin Information block */
static pInfo pluginInfo = {
sizeof(pluginInfo),
PLUGIN_INTERFACE_VERSION,
PLUGIN_DESCRIPTION,
};
+/* Plugin entry points for Bacula */
static pFuncs pluginFuncs = {
sizeof(pluginFuncs),
PLUGIN_INTERFACE_VERSION,
setFileAttributes
};
+/*
+ * Plugin private context
+ */
struct plugin_ctx {
boffset_t offset;
- FILE *fd;
- bool backup;
- char *cmd;
- char *fname;
- char *reader;
- char *writer;
+ FILE *fd; /* pipe file descriptor */
+ bool backup; /* set for backup (not needed) */
+ char *cmd; /* plugin command line */
+ char *fname; /* filename to "backup/restore" */
+ char *reader; /* reader program for backup */
+ char *writer; /* writer program for backup */
};
+/*
+ * loadPlugin() and unloadPlugin() are entry points that are
+ * exported, so Bacula can directly call these two entry points
+ * they are common to all Bacula plugins.
+ */
+/*
+ * External entry point called by Bacula to "load the plugin
+ */
bRC loadPlugin(bInfo *lbinfo, bFuncs *lbfuncs, pInfo **pinfo, pFuncs **pfuncs)
{
bfuncs = lbfuncs; /* set Bacula funct pointers */
binfo = lbinfo;
-// printf("bpipe-fd: Loaded: size=%d version=%d\n", bfuncs->size, bfuncs->version);
-
*pinfo = &pluginInfo; /* return pointer to our info */
*pfuncs = &pluginFuncs; /* return pointer to our functions */
return bRC_OK;
}
+/*
+ * External entry point to unload the plugin
+ */
bRC unloadPlugin()
{
// printf("bpipe-fd: Unloaded\n");
return bRC_OK;
}
+/*
+ * The following entry points are accessed through the function
+ * pointers we supplied to Bacula. Each plugin type (dir, fd, sd)
+ * has its own set of entry points that the plugin must define.
+ */
+/*
+ * Create a new instance of the plugin i.e. allocate our private storage
+ */
static bRC newPlugin(bpContext *ctx)
{
struct plugin_ctx *p_ctx = (struct plugin_ctx *)malloc(sizeof(struct plugin_ctx));
return bRC_OK;
}
+/*
+ * Free a plugin instance, i.e. release our private storage
+ */
static bRC freePlugin(bpContext *ctx)
{
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
if (p_ctx->cmd) {
- free(p_ctx->cmd);
+ free(p_ctx->cmd); /* free any allocated command string */
}
- free(p_ctx);
+ free(p_ctx); /* free our private context */
return bRC_OK;
}
+/*
+ * Return some plugin value (none defined)
+ */
static bRC getPluginValue(bpContext *ctx, pVariable var, void *value)
{
return bRC_OK;
}
+/*
+ * Set a plugin value (none defined)
+ */
static bRC setPluginValue(bpContext *ctx, pVariable var, void *value)
{
return bRC_OK;
}
+/*
+ * Handle an event that was generated in Bacula
+ */
static bRC handlePluginEvent(bpContext *ctx, bEvent *event, void *value)
{
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
break;
case bEventStartRestoreJob:
- printf("bpipe-fd: StartRestoreJob\n");
break;
case bEventEndRestoreJob:
- printf("bpipe-fd: EndRestoreJob\n");
break;
/* Plugin command e.g. plugin = <plugin-name>:<name-space>:read command:write command */
default:
printf("bpipe-fd: unknown event=%d\n", event->eventType);
}
-// bfuncs->getBaculaValue(ctx, bVarFDName, (void *)&name);
-// printf("FD Name=%s\n", name);
-// bfuncs->JobMessage(ctx, __FILE__, __LINE__, 1, 0, "JobMesssage message");
-// bfuncs->DebugMessage(ctx, __FILE__, __LINE__, 1, "DebugMesssage message");
return bRC_OK;
}
+/*
+ * Start the backup of a specific file
+ */
static bRC startBackupFile(bpContext *ctx, struct save_pkt *sp)
{
struct plugin_ctx *p_ctx = (struct plugin_ctx *)ctx->pContext;
return bRC_OK;
}
+/*
+ * Done with backup of this file
+ */
static bRC endBackupFile(bpContext *ctx)
{
/*
* We would return bRC_More if we wanted startBackupFile to be
* called again to backup another file
*/
-// printf("bpipe-fd: endBackupFile\n");
return bRC_OK;
}
/*
- * Do actual I/O
+ * Bacula is calling us to do the actual I/O
*/
static bRC pluginIO(bpContext *ctx, struct io_pkt *io)
{
io->io_errno = 0;
switch(io->func) {
case IO_OPEN:
- printf("bpipe-fd: IO_OPEN\n");
+// printf("bpipe-fd: IO_OPEN\n");
if (io->flags & (O_CREAT | O_WRONLY)) {
p_ctx->fd = popen(p_ctx->writer, "w");
printf("bpipe-fd: IO_OPEN writer=%s\n", p_ctx->writer);
if (!p_ctx->fd) {
io->io_errno = errno;
bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0,
- "bpipe-fd: writer=%s failed: ERR=%d\n", p_ctx->writer, errno);
+ "Open pipe writer=%s failed: ERR=%s\n", p_ctx->writer, strerror(errno));
return bRC_Error;
}
} else {
p_ctx->fd = popen(p_ctx->reader, "r");
- printf("bpipe-fd: IO_OPEN reader=%s\n", p_ctx->reader);
+// printf("bpipe-fd: IO_OPEN reader=%s\n", p_ctx->reader);
if (!p_ctx->fd) {
io->io_errno = errno;
bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0,
- "bpipe-fd: reader=%s failed: ERR=%d\n", p_ctx->reader, errno);
+ "Open pipe reader=%s failed: ERR=%s\n", p_ctx->reader, strerror(errno));
return bRC_Error;
}
}
case IO_READ:
if (!p_ctx->fd) {
- bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "NULL read FD\n");
+ bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "Logic error: NULL read FD\n");
return bRC_Error;
}
io->status = fread(io->buf, 1, io->count, p_ctx->fd);
- printf("bpipe-fd: IO_READ buf=%p len=%d\n", io->buf, io->status);
+// printf("bpipe-fd: IO_READ buf=%p len=%d\n", io->buf, io->status);
if (io->status == 0 && ferror(p_ctx->fd)) {
- bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "Pipe read error\n");
- printf("Error reading pipe\n");
+ bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0,
+ "Pipe read error: ERR=%s\n", strerror(errno));
+// printf("Error reading pipe\n");
return bRC_Error;
}
-// printf("status=%d\n", io->status);
break;
case IO_WRITE:
if (!p_ctx->fd) {
- bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "NULL write FD\n");
+ bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "Logic error: NULL write FD\n");
return bRC_Error;
}
- printf("bpipe-fd: IO_WRITE fd=%p buf=%p len=%d\n", p_ctx->fd, io->buf, io->count);
+// printf("bpipe-fd: IO_WRITE fd=%p buf=%p len=%d\n", p_ctx->fd, io->buf, io->count);
io->status = fwrite(io->buf, 1, io->count, p_ctx->fd);
- printf("bpipe-fd: IO_WRITE buf=%p len=%d\n", io->buf, io->status);
+// printf("bpipe-fd: IO_WRITE buf=%p len=%d\n", io->buf, io->status);
if (io->status == 0 && ferror(p_ctx->fd)) {
- bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "Pipe write error\n");
- printf("Error writing pipe\n");
+ bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0,
+ "Pipe write error\n");
+// printf("Error writing pipe\n");
return bRC_Error;
}
-// printf("status=%d\n", io->status);
break;
case IO_CLOSE:
if (!p_ctx->fd) {
- bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "NULL FD\n");
+ bfuncs->JobMessage(ctx, __FILE__, __LINE__, M_FATAL, 0, "Logic error: NULL FD\n");
return bRC_Error;
}
io->status = pclose(p_ctx->fd);
- printf("bpipe-fd: IO_CLOSE\n");
break;
case IO_SEEK:
static bRC startRestoreFile(bpContext *ctx, const char *cmd)
{
- printf("bpipe-fd: startRestoreFile cmd=%s\n", cmd);
+// printf("bpipe-fd: startRestoreFile cmd=%s\n", cmd);
return bRC_OK;
}
static bRC endRestoreFile(bpContext *ctx)
{
- printf("bpipe-fd: endRestoreFile\n");
+// printf("bpipe-fd: endRestoreFile\n");
return bRC_OK;
}
static bRC createFile(bpContext *ctx, struct restore_pkt *rp)
{
- printf("bpipe-fd: createFile\n");
+// printf("bpipe-fd: createFile\n");
return bRC_OK;
}
static bRC setFileAttributes(bpContext *ctx, struct restore_pkt *rp)
{
- printf("bpipe-fd: setFileAttributes\n");
+// printf("bpipe-fd: setFileAttributes\n");
return bRC_OK;
}