2003-07-xx Version 1.31 Beta xxJul03
+- In restore print only volumes that will actually be used.
+- Fix bextract -- add appropriate breaks in new case code.
- Add a new test -- bsr-opt-test for testing bsr optimization. As usual,
it pointed out a bug where the directory tree handling code destroyed
the restore arg list.
Kern's ToDo List
- 27 June 2003
+ 6 July 2003
Documentation to do: (any release a little bit at a time)
- Document running a test version.
ERR=The system cannot find the path specified.
- Finish Windows implementation (add setting of correct type on restore,
add Portable Data Format flag).
-- Remove multiple simultaneous devices code in SD.
-- Check that Block number in JobMedia are correct.
+- Maybe remove multiple simultaneous devices code in SD.
- Increment DB version prior to releasing.
- Turn off FULL_DEBUG prior to releasing.
- On Windows with very long path names, it may be impossible to create
We must cd into the directory then create the file without the
full path name.
- Move JobFiles and JobBytes to SD rather than FD -- more correct.
-- Add client name to cram-md5 challenge so Director can immediately
- verify if it is the correct client.
- lstat() is not going to work on Win32 for testing date.
- Implement a Recycle command
- Something is not right in last block of fill command.
-- Implement List Volume Job=xxx or List scheduled volumes or
- Status Director
+- Implement List Volume Job=xxx or List scheduled volumes or Status Director
- Check if Incremental is working correctly when it looks for the previous Job
(Phil's problem).
-- Add next Volume to be used to status output.
-- The bsr for Dan's job has file indexes covering the whole range rather
- than only the range contained on the volume.
- Constrain FileIndex to be within range for Volume.
-- Pass prefix_links to FD.
For 1.32:
+- Add client name to cram-md5 challenge so Director can immediately
+ verify if it is the correct client.
- Implement ClientRunBeforeJob and ClientRunAfterJob.
- Implement new alist in FileSet scanning.
- Add JobLevel in FD status (but make sure it is defined).
- To link with mysqlclient_r may require -lssl -lcrypto
- Document Heart beat code
- Non-fatal errors are not correct counting attribs.c:277
-
+- Check that Block number in JobMedia are correct.
+- The bsr for Dan's job has file indexes covering the whole range rather
+ than only the range contained on the volume.
+ Constrain FileIndex to be within range for Volume.
+- Pass prefix_links to FD.
+- Fix restore list of volumes if Volume not selected.
+
start_prompt(ua, "");
for (RBSR *nbsr=bsr; nbsr; nbsr=nbsr->next) {
for (int i=0; i < nbsr->VolCount; i++) {
- add_prompt(ua, nbsr->VolParams[i].VolumeName);
+ if (nbsr->VolParams[i].VolumeName[0]) {
+ add_prompt(ua, nbsr->VolParams[i].VolumeName);
+ }
}
}
for (int i=0; i < ua->num_prompts; i++) {
for (int i=0; i < bsr->VolCount; i++) {
if (!is_volume_selected(bsr->fi, bsr->VolParams[i].FirstIndex,
bsr->VolParams[i].LastIndex)) {
+ bsr->VolParams[i].VolumeName[0] = 0; /* zap VolumeName */
continue;
}
fprintf(fd, "Volume=\"%s\"\n", bsr->VolParams[i].VolumeName);
case STREAM_WIN32_GZIP_DATA:
#ifdef HAVE_LIBZ
if (extract) {
- ser_declare;
uLong compress_len;
- uint64_t faddr;
- char ec1[50];
int stat;
if (stream == STREAM_SPARSE_GZIP_DATA) {
+ ser_declare;
+ uint64_t faddr;
+ char ec1[50];
wbuf = sd->msg + SPARSE_FADDR_SIZE;
wsize = sd->msglen - SPARSE_FADDR_SIZE;
ser_begin(sd->msg, SPARSE_FADDR_SIZE);
if (!is_bopen(&bfd)) {
Emsg0(M_ERROR, 0, _("Logic error output file should be open but is not.\n"));
}
- extract = FALSE;
set_attributes(jcr, attr, &bfd);
+ extract = FALSE;
}
if (!unpack_attributes_record(jcr, rec->Stream, rec->data, attr)) {
Jmsg(jcr, M_ERROR, 0, _("%s stream not supported on this Client.\n"),
stream_to_ascii(attr->data_stream));
}
+ extract = FALSE;
return;
}
break;
}
}
+ break;
/* Data stream and extracting */
case STREAM_FILE_DATA:
}
fileAddr += wsize;
}
-
+ break;
/* GZIP data stream */
case STREAM_GZIP_DATA:
case STREAM_WIN32_GZIP_DATA:
#ifdef HAVE_LIBZ
if (extract) {
- uLongf compress_len;
+ uLong compress_len;
int stat;
if (rec->Stream == STREAM_SPARSE_GZIP_DATA) {
ser_declare;
uint64_t faddr;
+ char ec1[50];
wbuf = rec->data + SPARSE_FADDR_SIZE;
wsize = rec->data_len - SPARSE_FADDR_SIZE;
ser_begin(rec->data, SPARSE_FADDR_SIZE);
if (fileAddr != faddr) {
fileAddr = faddr;
if (blseek(&bfd, (off_t)fileAddr, SEEK_SET) < 0) {
- Emsg2(M_ERROR, 0, _("Seek error on %s: %s\n"),
- attr->ofname, strerror(errno));
+ Emsg3(M_ERROR, 0, _("Seek to %s error on %s: ERR=%s\n"),
+ edit_uint64(fileAddr, ec1), attr->ofname, berror(&bfd));
+ extract = FALSE;
+ return;
}
}
} else {
compress_len = compress_buf_size;
if ((stat=uncompress((Bytef *)compress_buf, &compress_len,
(const Bytef *)wbuf, (uLong)wsize) != Z_OK)) {
- Emsg1(M_ERROR_TERM, 0, _("Uncompression error. ERR=%d\n"), stat);
+ Emsg1(M_ERROR, 0, _("Uncompression error. ERR=%d\n"), stat);
+ extract = FALSE;
+ return;
}
Dmsg2(100, "Write uncompressed %d bytes, total before write=%d\n", compress_len, total);
if ((uLongf)bwrite(&bfd, compress_buf, (size_t)compress_len) != compress_len) {
Pmsg0(0, "===Write error===\n");
- Emsg2(M_ERROR_TERM, 0, _("Write error on %s: %s\n"),
+ Emsg2(M_ERROR, 0, _("Write error on %s: %s\n"),
attr->ofname, strerror(errno));
+ extract = FALSE;
+ return;
}
total += compress_len;
fileAddr += compress_len;
return;
}
#endif
+ break;
case STREAM_MD5_SIGNATURE:
case STREAM_SHA1_SIGNATURE:
if (!is_bopen(&bfd)) {
Emsg0(M_ERROR, 0, "Logic error output file should be open but is not.\n");
}
- extract = FALSE;
set_attributes(jcr, attr, &bfd);
+ extract = FALSE;
}
Jmsg(jcr, M_ERROR, 0, _("Unknown stream=%d ignored. This shouldn't happen!\n"),
rec->Stream);