Kern's ToDo List
- 20 June 2006
+ 16 July 2006
Major development:
Project Developer
Volume marked as purged.
- Print warning message if LANG environment variable does not specify
UTF-8.
+- Add LocationId to update volume
+- Add LocationLog
+ LogId
+ Date
+ User text
+ MediaId
+ LocationId
+ NewState???
+- Add Comment to Media record
- New dot commands from Arno.
.update volume [enabled|disabled|*see below]
+ > However, I could easily imagine an option to "update slots" that says
+ > "enable=yes|no" that would automatically enable or disable all the Volumes
+ > found in the autochanger. This will permit the user to optionally mark all
+ > the Volumes in the magazine disabled prior to taking them offsite, and mark
+ > them all enabled when bringing them back on site. Coupled with the options
+ > to the slots keyword, you can apply the enable/disable to any or all volumes.
.show device=xxx lists information from one storage device, including
devices (I'm not even sure that information exists in the DIR...)
.move eject device=xxx mostly the same as 'unmount xxx' but perhaps with
target slot. The catalog should be updated accordingly.
.move transfer device=xxx fromslot=yyy toslot=zzz
+
- Given all the problems with FIFOs, I think the solution is to do something a
little different, though I will look at the code and see if there is not some
simple solution (i.e. some bug that was introduced). What might be a better
Kern Sibbald
General:
+19July06
+- Add additional fields as specified by Arno to LocationLog.
+- Add comment field to the Media record.
+- Add Time field to the Log record.
+- Correct migration SQL (thanks to Bill Moran) so that it
+ runs with PostgreSQL also.
+- Add spooling/despooling info in status output of SD.
+17July06
+- Spend a lot of time integrating mkcdrec with the rescue disk.
+- Add VOLMGMT message class for volume management messages (none yet).
+- Add CATALOG as a destination. It goes into the LOG table.
+- Implement the Log table in the DB.
+- Implement the Location Log table in the DB (for user use).
+- At Eric's request add a run_scripts() just after blast_data in
+ src/filed/job.c
10Jul06
- Add Enabled column to Location, correct some typos in DB schemas.
- Correct bug I introduced into RunScripts enum.
PRIMARY KEY(LocationId)
);
+CREATE TABLE LocationLog (
+ LocLogId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
+ Date DATETIME DEFAULT 0,
+ Comment BLOB NOT NULL,
+ MediaId INTEGER UNSIGNED DEFAULT 0 REFERENCES Media;
+ LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES LocationId;
+ NewVolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged',
+ 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,
+ NewEnabled TINYINT,
+ PRIMARY KEY(LocLogId)
+);
+
+
#
CREATE TABLE FileSet (
FileSetId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
InitialWrite DATETIME DEFAULT 0,
ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
+ Comment BLOB,
PRIMARY KEY(MediaId),
INDEX (PoolId)
);
);
CREATE TABLE Log (
- JobId INTEGER UNSIGNED DEFAULT 0 REFERENCES JobId,
+ LogId INTEGER UNSIGNED AUTO_INCREMENT,
+ JobId INTEGER INTEGER UNSIGNED DEFAULT 0 REFERENCES JobId,
+ Time DATETIME DEFAULT 0,
LogText BLOB NOT NULL,
+ PRIMARY KEY(LogId),
INDEX (JobId)
);
initialwrite timestamp without time zone,
scratchpoolid integer default 0,
recyclepoolid integer default 0,
+ comment text,
primary key (mediaid)
);
CREATE TABLE Log
(
- JobId serial not null,
+ LogId serial not null,
+ JobId integer not null,
+ Time timestamp without time zone,
LogText text not null,
+ primary key (LogId)
);
-
create index log_name_idx on Log (JobId);
+CREATE TABLE LocationLog (
+ LocLogId SERIAL NOT NULL,
+ Date timestamp without time zone,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER DEFAULT 0,
+ LocationId INTEGER DEFAULT 0,
+ newvolstatus text not null
+ check (volstatus in ('Full','Archive','Append',
+ 'Recycle','Purged','Read-Only','Disabled',
+ 'Error','Busy','Used','Cleaning','Scratch')),
+ newenabled smallint,
+ PRIMARY KEY(LocLogId)
+);
+
CREATE TABLE counters
HasBase TINYINT DEFAULT 0,
PRIMARY KEY(JobId)
);
-
CREATE INDEX inx6 ON Job (Name);
CREATE TABLE Location (
PRIMARY KEY(LocationId)
);
+CREATE TABLE LocationLog (
+ LocLogId INTEGER,
+ Date DATETIME NOT NULL,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER UNSIGNED REFERENCES Media DEFAULT 0,
+ LocationId INTEGER UNSIGNED REFERENCES LocationId DEFAULT 0,
+ NewVolStatus VARCHAR(20) NOT NULL,
+ NewEnabled TINYINT NOT NULL,
+ PRIMARY KEY(LocLogId)
+);
+
+
+CREATE TABLE Log (
+ LogId INTEGER,
+ JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
+ Time DATETIME NOT NULL,
+ LogText TEXT NOT NULL,
+ PRIMARY KEY(LogId)
+ );
+CREATE INDEX LogInx1 ON File (JobId);
+
CREATE TABLE FileSet (
FileSetId INTEGER,
InitialWrite DATETIME DEFAULT 0,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ Comment TEXT,
PRIMARY KEY(MediaId)
);
PRIMARY KEY (TableName)
);
-CREATE TABLE Log
- JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
- LogText TEXT NOT NULL,
- KEY (JobId)
- );
-- Initialize JobId to start at 1
HasBase TINYINT DEFAULT 0,
PRIMARY KEY(JobId)
);
-
CREATE INDEX inx6 ON Job (Name);
CREATE TABLE Location (
PRIMARY KEY(LocationId)
);
+CREATE TABLE LocationLog (
+ LocLogId INTEGER,
+ Date DATETIME NOT NULL,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER UNSIGNED REFERENCES Media DEFAULT 0,
+ LocationId INTEGER UNSIGNED REFERENCES LocationId DEFAULT 0,
+ NewVolStatus VARCHAR(20) NOT NULL,
+ NewEnabled TINYINT NOT NULL,
+ PRIMARY KEY(LocLogId)
+);
+
+
+CREATE TABLE Log (
+ LogId INTEGER,
+ JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
+ Time DATETIME NOT NULL,
+ LogText TEXT NOT NULL,
+ PRIMARY KEY(LogId)
+ );
+CREATE INDEX LogInx1 ON File (JobId);
+
CREATE TABLE FileSet (
FileSetId INTEGER,
InitialWrite DATETIME DEFAULT 0,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ Comment TEXT,
PRIMARY KEY(MediaId)
);
PRIMARY KEY (TableName)
);
-CREATE TABLE Log
- JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
- LogText TEXT NOT NULL,
- KEY (JobId)
- );
-- Initialize JobId to start at 1
ALTER TABLE Media ADD COLUMN ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;
ALTER TABLE Media ADD COLUMN RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;
ALTER TABLE Media ADD COLUMN Enabled TINYINT DEFAULT 1;
+ALTER TABLE Media ADD COLUMN Comment BLOB;
ALTER TABLE JobMedia DROP ADD COLUMN Stripe;
ALTER TABLE Job ADD COLUMN RealEndTime DATETIME DEFAULT 0;
CREATE TABLE Log (
+ LogId INTEGER UNSIGNED AUTO_INCREMENT,
JobId INTEGER INTEGER UNSIGNED DEFAULT 0 REFERENCES JobId,
+ Time DATETIME DEFAULT 0,
LogText BLOB NOT NULL,
+ PRIMARY KEY(LogId),
INDEX (JobId)
);
+CREATE TABLE LocationLog (
+ LocLogId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
+ Date DATETIME DEFAULT 0,
+ Comment BLOB NOT NULL,
+ MediaId INTEGER UNSIGNED DEFAULT 0 REFERENCES Media;
+ LocationId INTEGER UNSIGNED DEFAULT 0 REFERENCES LocationId;
+ NewVolStatus ENUM('Full', 'Archive', 'Append', 'Recycle', 'Purged',
+ 'Read-Only', 'Disabled', 'Error', 'Busy', 'Used', 'Cleaning') NOT NULL,
+ NewEnabled TINYINT,
+ PRIMARY KEY(LocLogId)
+);
+
+
CREATE TABLE Location (
LocationId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
- Location TINYBLOB NOT NULL,
+ Location BLOB NOT NULL,
Cost INTEGER DEFAULT 0,
Enabled TINYINT,
PRIMARY KEY(LocationId)
UPDATE media SET recyclepoolid=0;
ALTER TABLE media ADD COLUMN enabled integer;
UPDATE media SET enabled=1;
+ALTER TABLE media ADD COLUMN Comment TEXT;
ALTER TABLE job ADD COLUMN RealEndTime timestamp without time zone;
UPDATE job SET RealEndTime=0;
PRIMARY KEY (LocationId)
);
+CREATE TABLE LocationLog (
+ LocLogId SERIAL NOT NULL,
+ Date timestamp without time zone,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER DEFAULT 0,
+ LocationId INTEGER DEFAULT 0,
+ newvolstatus text not null
+ check (volstatus in ('Full','Archive','Append',
+ 'Recycle','Purged','Read-Only','Disabled',
+ 'Error','Busy','Used','Cleaning','Scratch')),
+ newenabled smallint,
+ PRIMARY KEY(LocLogId)
+);
+
+
CREATE TABLE Log
(
- JobId serial not null,
+ LogId serial not null,
+ JobId integer not null,
+ Time timestamp without time zone,
LogText text not null,
+ primary key (LogId)
);
-
create index log_name_idx on Log (JobId);
InitialWrite DATETIME DEFAULT 0,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ Comment TEXT,
PRIMARY KEY(MediaId)
);
DROP TABLE Job_backup;
-CREATE TABLE Log
+CREATE TABLE LocationLog (
+ LocLogId INTEGER,
+ Date DATETIME NOT NULL,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER UNSIGNED REFERENCES Media DEFAULT 0,
+ LocationId INTEGER UNSIGNED REFERENCES LocationId DEFAULT 0,
+ NewVolStatus VARCHAR(20) NOT NULL,
+ NewEnabled TINYINT NOT NULL,
+ PRIMARY KEY(LocLogId)
+);
+
+CREATE TABLE Log (
+ LogId INTEGER,
JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
+ Time DATETIME NOT NULL,
LogText TEXT NOT NULL,
- KEY (JobId)
+ PRIMARY KEY(LogId)
);
+CREATE INDEX LogInx1 ON File (JobId);
CREATE TABLE Location (
LocationId INTEGER,
InitialWrite DATETIME DEFAULT 0,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ Comment TEXT,
PRIMARY KEY(MediaId)
);
DROP TABLE Job_backup;
-CREATE TABLE Log
+CREATE TABLE LocationLog (
+ LocLogId INTEGER,
+ Date DATETIME NOT NULL,
+ Comment TEXT NOT NULL,
+ MediaId INTEGER UNSIGNED REFERENCES Media DEFAULT 0,
+ LocationId INTEGER UNSIGNED REFERENCES LocationId DEFAULT 0,
+ NewVolStatus VARCHAR(20) NOT NULL,
+ NewEnabled TINYINT NOT NULL,
+ PRIMARY KEY(LocLogId)
+);
+
+CREATE TABLE Log (
+ LogId INTEGER,
JobId INTEGER UNSIGNED REFERENCES Job NOT NULL,
+ Time DATETIME NOT NULL,
LogText TEXT NOT NULL,
- KEY (JobId)
+ PRIMARY KEY(LogId)
);
+CREATE INDEX LogInx1 ON File (JobId);
CREATE TABLE Location (
LocationId INTEGER,
/* Get JobIds from regex'ed Job names */
const char *sql_jobids_from_job =
- "SELECT DISTINCT Job.JobId FROM Job,Pool"
+ "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
" WHERE Job.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
" ORDER by Job.StartTime";
/* Get JobIds from regex'ed Client names */
const char *sql_jobids_from_client =
- "SELECT DISTINCT Job.JobId FROM Job,Pool"
+ "SELECT DISTINCT Job.JobId,Job.StartTime FROM Job,Pool"
" WHERE Client.Name='%s' AND Pool.Name='%s' AND Job.PoolId=Pool.PoolId"
" AND Job.ClientId=Client.ClientId "
" ORDER by Job.StartTime";
/* Get JobIds from regex'ed Volume names */
const char *sql_jobids_from_vol =
- "SELECT DISTINCT Job.JobId FROM Media,JobMedia,Job"
+ "SELECT DISTINCT Job.JobId,Job.StartTime FROM Media,JobMedia,Job"
" WHERE Media.VolumeName='%s' AND Media.MediaId=JobMedia.MediaId"
" AND JobMedia.JobId=Job.JobId"
" ORDER by Job.StartTime";
" ORDER BY LastWritten ASC LIMIT 1";
const char *sql_jobids_from_mediaid =
- "SELECT DISTINCT Job.JobId FROM JobMedia,Job"
+ "SELECT DISTINCT Job.JobId,Job.StartTime FROM JobMedia,Job"
" WHERE JobMedia.JobId=Job.JobId AND JobMedia.MediaId=%s"
" ORDER by Job.StartTime";
bnet_suppress_error_messages(sd, 1);
bget_msg(sd); /* Read final response from append_data */
Dmsg0(110, "Error in blast_data.\n");
+ /* run shortly after end of data transmission */
+ run_scripts(jcr, jcr->RunScripts, "ClientAfterJobShort");
} else {
set_jcr_job_status(jcr, JS_Terminated);
- /* run shortly after end of data transmission */
- run_scripts(jcr, jcr->RunScripts, "ClientAfterJobShort");
if (jcr->JobStatus != JS_Terminated) {
bnet_suppress_error_messages(sd, 1);
}
if (p_sql_query) {
POOL_MEM cmd(PM_MESSAGE);
- Mmsg(cmd, "INSERT INTO Log (JobId, LogText) VALUES (%s, '%s')",
- edit_int64(jcr->JobId, ed1), msg);
+ bstrftimes(dt, sizeof(dt), mtime);
+ Mmsg(cmd, "INSERT INTO Log (JobId, Time, LogText) VALUES (%s,'%s','%s')",
+ edit_int64(jcr->JobId, ed1), dt, msg);
p_sql_query(jcr, cmd.c_str());
}
break;
{"director", store_msgs, ITEM(res_msgs), MD_DIRECTOR, 0, 0},
{"console", store_msgs, ITEM(res_msgs), MD_CONSOLE, 0, 0},
{"operator", store_msgs, ITEM(res_msgs), MD_OPERATOR, 0, 0},
+ {"catalog", store_msgs, ITEM(res_msgs), MD_CATALOG, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
{"restored", M_RESTORED},
{"security", M_SECURITY},
{"alert", M_ALERT},
+ {"volmgmt", M_VOLMGMT},
{"all", M_MAX+1},
{NULL, 0}
};
case MD_STDERR:
case MD_SYSLOG: /* syslog */
case MD_CONSOLE:
+ case MD_CATALOG:
scan_types(lc, (MSGS *)(item->value), item->code, NULL, NULL);
break;
case MD_OPERATOR: /* send to operator */
int spool_fd; /* fd if spooling */
bool spool_data; /* set to spool data */
bool spooling; /* set when actually spooling */
+ bool despooling; /* set when despooling */
bool dev_locked; /* set if dev already locked */
bool NewVol; /* set if new Volume mounted */
bool WroteVol; /* set if Volume written */
* Version $Id$
*/
/*
- Copyright (C) 2004-2005 Kern Sibbald
+ Copyright (C) 2004-2006 Kern Sibbald
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
char ec1[50];
Dmsg0(100, "Despooling data\n");
+ /* Commit means that the job is done, so we commit, otherwise, we
+ * are despooling because of user spool size max or some error
+ * (e.g. filesystem full).
+ */
if (commit) {
Jmsg(jcr, M_INFO, 0, _("Committing spooled data to Volume \"%s\". Despooling %s bytes ...\n"),
jcr->dcr->VolumeName,
edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1));
- }
- else {
+ } else {
Jmsg(jcr, M_INFO, 0, _("Writing spooled data to Volume. Despooling %s bytes ...\n"),
edit_uint64_with_commas(jcr->dcr->job_spool_size, ec1));
}
dcr->spooling = false;
+ dcr->despooling = true;
lock_device(dcr->dev);
dcr->dev_locked = true;
unlock_device(dcr->dev);
dcr->dev_locked = false;
dcr->spooling = true; /* turn on spooling again */
+ dcr->despooling = false;
return ok;
}
dcr->pool_name,
dcr->dev?dcr->dev->print_name():
dcr->device->device_name);
+ bnet_fsend(user, _(" spooling=%d despooling=%d devblocked=%d\n"),
+ dcr->spooling, dcr->despooling, dcr->dev->dev_blocked);
}
sec = time(NULL) - jcr->run_time;
if (sec <= 0) {
#undef VERSION
#define VERSION "1.39.16"
-#define BDATE "9 July 2006"
-#define LSMDATE "09Jul06"
+#define BDATE "19 July 2006"
+#define LSMDATE "19Jul06"
/* Debug flags */
#undef DEBUG