}
/*
- * This function doesn't work very well with smartalloc
* TODO: use bigbuffer from htable
*/
int accurate_cmd(JCR *jcr)
int len;
struct stat statp;
int32_t LinkFIc;
- uint64_t nb;
+ int32_t nb;
CurFile *elt=NULL;
char *lstat;
- if (jcr->accurate==false || job_canceled(jcr) || jcr->JobLevel==L_FULL) {
+ if (!jcr->accurate || job_canceled(jcr) || jcr->JobLevel==L_FULL) {
return true;
}
dir->fsend(_("2991 Bad accurate command\n"));
return false;
}
+ Dmsg2(200, "nb=%d msg=%s\n", nb, dir->msg);
jcr->file_list = (htable *)malloc(sizeof(htable));
jcr->file_list->init(elt, &elt->link, nb);
*/
/* get current files */
while (dir->recv() >= 0) {
- len = strlen(dir->msg);
- if ((len+1) < dir->msglen) {
-// elt = (CurFile *)malloc(sizeof(CurFile));
-// elt->fname = (char *) malloc(dir->msglen+1);
-
+ len = strlen(dir->msg) + 1;
+ if (len < dir->msglen) {
/* we store CurFile, fname and ctime/mtime in the same chunk */
- elt = (CurFile *)malloc(sizeof(CurFile)+len+1);
- elt->fname = (char *) elt+sizeof(CurFile);
+ elt = (CurFile *)jcr->file_list->hash_malloc(sizeof(CurFile)+len);
+ elt->fname = (char *)elt+sizeof(CurFile);
strcpy(elt->fname, dir->msg);
- lstat = dir->msg + len + 1;
+ lstat = dir->msg + len;
decode_stat(lstat, &statp, &LinkFIc); /* decode catalog stat */
elt->ctime = statp.st_ctime;
elt->mtime = statp.st_mtime;
Dmsg2(500, "add fname=%s lstat=%s\n", elt->fname, lstat);
}
}
+
+#ifdef DEBUG
extern void *start_heap;
char b1[50], b2[50], b3[50], b4[50], b5[50];
Dmsg5(1," Heap: heap=%s smbytes=%s max_bytes=%s bufs=%s max_bufs=%s\n",
- edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
- edit_uint64_with_commas(sm_bytes, b2),
- edit_uint64_with_commas(sm_max_bytes, b3),
- edit_uint64_with_commas(sm_buffers, b4),
- edit_uint64_with_commas(sm_max_buffers, b5));
+ edit_uint64_with_commas((char *)sbrk(0)-(char *)start_heap, b1),
+ edit_uint64_with_commas(sm_bytes, b2),
+ edit_uint64_with_commas(sm_max_bytes, b3),
+ edit_uint64_with_commas(sm_buffers, b4),
+ edit_uint64_with_commas(sm_max_buffers, b5));
// jcr->file_list->stats();
+#endif
return true;
}
int stream = STREAM_UNIX_ATTRIBUTES;
- if (jcr->accurate == false || jcr->JobLevel == L_FULL) {
+ if (!jcr->accurate || jcr->JobLevel == L_FULL) {
goto bail_out;
}
} else {
buf_size = 0; /* use default */
}
- if (!bnet_set_buffer_size(sd, buf_size, BNET_SETBUF_WRITE)) {
+ if (!sd->set_buffer_size(buf_size, BNET_SETBUF_WRITE)) {
set_jcr_job_status(jcr, JS_ErrorTerminated);
Jmsg(jcr, M_FATAL, 0, _("Cannot set buffer size FD->SD.\n"));
return false;
jcr->buf_size = sd->msglen;
/* Adjust for compression so that output buffer is
- * 12 bytes + 0.1% larger than input buffer plus 18 bytes.
- * This gives a bit extra plus room for the sparse addr if any.
- * Note, we adjust the read size to be smaller so that the
- * same output buffer can be used without growing it.
+ * 12 bytes + 0.1% larger than input buffer plus 18 bytes.
+ * This gives a bit extra plus room for the sparse addr if any.
+ * Note, we adjust the read size to be smaller so that the
+ * same output buffer can be used without growing it.
*
- * The zlib compression workset is initialized here to minimise
- * the "per file" load. The jcr member is only set, if the init was successful.
+ * The zlib compression workset is initialized here to minimize
+ * the "per file" load. The jcr member is only set, if the init
+ * was successful.
*/
jcr->compress_buf_size = jcr->buf_size + ((jcr->buf_size+999) / 1000) + 30;
jcr->compress_buf = get_memory(jcr->compress_buf_size);
break;
case FT_NOACCESS: {
berrno be;
- Jmsg(jcr, M_NOTSAVED, 0, _(" Could not access %s: ERR=%s\n"), ff_pkt->fname,
+ Jmsg(jcr, M_NOTSAVED, 0, _(" Could not access \"%s\": ERR=%s\n"), ff_pkt->fname,
be.bstrerror(ff_pkt->ff_errno));
jcr->Errors++;
return 1;
}
case FT_NOFOLLOW: {
berrno be;
- Jmsg(jcr, M_NOTSAVED, 0, _(" Could not follow link %s: ERR=%s\n"),
+ Jmsg(jcr, M_NOTSAVED, 0, _(" Could not follow link \"%s\": ERR=%s\n"),
ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno));
jcr->Errors++;
return 1;
}
case FT_NOSTAT: {
berrno be;
- Jmsg(jcr, M_NOTSAVED, 0, _(" Could not stat %s: ERR=%s\n"), ff_pkt->fname,
+ Jmsg(jcr, M_NOTSAVED, 0, _(" Could not stat \"%s\": ERR=%s\n"), ff_pkt->fname,
be.bstrerror(ff_pkt->ff_errno));
jcr->Errors++;
return 1;
return 1;
case FT_NOOPEN: {
berrno be;
- Jmsg(jcr, M_NOTSAVED, 0, _(" Could not open directory %s: ERR=%s\n"),
+ Jmsg(jcr, M_NOTSAVED, 0, _(" Could not open directory \"%s\": ERR=%s\n"),
ff_pkt->fname, be.bstrerror(ff_pkt->ff_errno));
jcr->Errors++;
return 1;
if (bopen(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY | noatime, 0) < 0) {
ff_pkt->ff_errno = errno;
berrno be;
- Jmsg(jcr, M_NOTSAVED, 0, _(" Cannot open %s: ERR=%s.\n"), ff_pkt->fname,
+ Jmsg(jcr, M_NOTSAVED, 0, _(" Cannot open \"%s\": ERR=%s.\n"), ff_pkt->fname,
be.bstrerror());
jcr->Errors++;
if (tid) {
if (!bopen_rsrc(&ff_pkt->bfd, ff_pkt->fname, O_RDONLY | O_BINARY, 0) < 0) {
ff_pkt->ff_errno = errno;
berrno be;
- Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for %s: ERR=%s.\n"),
+ Jmsg(jcr, M_NOTSAVED, -1, _(" Cannot open resource fork for \"%s\": ERR=%s.\n"),
ff_pkt->fname, be.bstrerror());
jcr->Errors++;
if (is_bopen(&ff_pkt->bfd)) {
Dmsg2(200, "malloc buf size=%d rem=%d\n", size, hmem->rem);
}
-char *htable::hash_alloc(int size)
+/* This routine frees the whole tree */
+void htable::hash_free()
{
+ struct h_mem *hmem, *rel;
+
+ for (hmem=mem; hmem; ) {
+ rel = hmem;
+ hmem = hmem->next;
+ free(rel);
+ }
+}
+
+#endif
+
+char *htable::hash_malloc(int size)
+{
+#ifdef BIG_MALLOC
char *buf;
int asize = BALIGN(size);
buf = mem->mem;
mem->mem += asize;
return buf;
+#else
+ total_size += size;
+ blocks++;
+ return (char *)malloc(size);
+#endif
}
-/* This routine frees the whole tree */
-void htable::hash_free()
-{
- struct h_mem *hmem, *rel;
-
- for (hmem=mem; hmem; ) {
- rel = hmem;
- hmem = hmem->next;
- free(rel);
- }
-}
-#endif
/*
Dmsg2(100, "Leave hash_index hash=0x%x index=%d\n", hash, index);
}
+/*
+ * tsize is the estimated number of entries in the hash table
+ */
htable::htable(void *item, void *link, int tsize)
{
init(item, link, tsize);
void htable::init(void *item, void *link, int tsize)
{
int pwr;
+
+ if (tsize < 31) {
+ tsize = 31;
+ }
tsize >>= 2;
for (pwr=0; tsize; pwr++) {
tsize >>= 1;
memset(table, 0, buckets * sizeof(hlink *));
walkptr = NULL;
walk_index = 0;
+ total_size = 0;
+ blocks = 0;
#ifdef BIG_MALLOC
mem = NULL;
- malloc_buf(1000000); /* ***FIXME*** base off of size */
+ malloc_buf(1000000); /* ***FIXME*** need variable or some estimate */
#endif
}
* Take each hash link and walk down the chain of items
* that hash there counting them (i.e. the hits),
* then report that number.
- * Obiously, the more hits in a chain, the more time
+ * Obiously, the more hits in a chain, the more time
* it takes to reference them. Empty chains are not so
* hot either -- as it means unused or wasted space.
*/
for (i=0; i < MAX_COUNT; i++) {
printf("%2d: %d\n",i, hits[i]);
}
+ printf("buckets=%d num_items=%d max_items=%d\n", buckets, num_items, max_items);
printf("max hits in a bucket = %d\n", max);
+#ifdef BIG_MALLOC
+ printf("total bytes malloced = %d\n", total_size);
+ printf("total blocks malloced = %d\n", blocks);
+#endif
}
void htable::grow_table()
/* Destroy the table and its contents */
void htable::destroy()
{
+#ifdef BIG_MALLOC
+ hash_free();
+#else
void *ni;
void *li = first();
free(li);
li=ni;
}
+#endif
free(table);
table = NULL;
hlink link;
};
-#define NITEMS 1000000
+#define NITEMS 5000000
int main()
{
int len;
len = sprintf(mkey, "This is htable item %d", i) + 1;
-#ifdef BIG_MALLOC
- jcr = (MYJCR *)jcrtbl->hash_alloc(sizeof(MYJCR));
- jcr->key = (char *)jcrtbl->hash_alloc(len);
-#else
- jcr = (MYJCR *)malloc(sizeof(MYJCR));
- jcr->key = (char *)malloc(len);
-#endif
+ jcr = (MYJCR *)jcrtbl->hash_malloc(sizeof(MYJCR));
+ jcr->key = (char *)jcrtbl->hash_malloc(len);
memcpy(jcr->key, mkey, len);
Dmsg2(100, "link=%p jcr=%p\n", jcr->link, jcr);