LStat TINYBLOB NOT NULL,
MD5 TINYBLOB NOT NULL,
PRIMARY KEY(FileId),
+ INDEX (JobId),
INDEX (FilenameId, PathId)
);
# to the above File table if your Verifies are
# too slow.
#
-# INDEX (JobId),
# INDEX (PathId),
# INDEX (FilenameId),
# INDEX (JobId, PathId, FilenameId)
#
-# Adding an index on JobId can speed up pruning
-#
CREATE TABLE Job (
JobId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
MaxVolFiles INTEGER UNSIGNED NOT NULL DEFAULT 0,
MaxVolBytes BIGINT UNSIGNED NOT NULL DEFAULT 0,
InChanger TINYINT NOT NULL DEFAULT 0,
+ StorageId INTEGER UNSIGNED DEFAULT 0 REFERENCES Storage,
MediaAddressing TINYINT NOT NULL DEFAULT 0,
VolReadTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
VolWriteTime BIGINT UNSIGNED NOT NULL DEFAULT 0,
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
RecyclePoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
+ NextPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name(128)),
PRIMARY KEY (PoolId)
);
maxvolfiles integer not null default 0,
maxvolbytes bigint not null default 0,
inchanger smallint not null default 0,
+ StorageId integer default 0,
mediaaddressing smallint not null default 0,
volreadtime bigint not null default 0,
volwritetime bigint not null default 0,
labeltype integer not null default 0,
labelformat text not null,
enabled smallint not null default 1,
- scratchpoolid integer,
- recyclepoolid integer,
+ scratchpoolid integer default 0,
+ recyclepoolid integer default 0,
+ NextPoolId integer default 0,
+ MigrationHighBytes BIGINT DEFAULT 0,
+ MigrationLowBytes BIGINT DEFAULT 0,
+ MigrationTime BIGINT DEFAULT 0,
primary key (poolid)
);
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
USE bacula;
ALTER TABLE Media ADD COLUMN LabelType INTEGER UNSIGNED NOT NULL DEFAULT 0;
-ALTER TABLE Pool ADD COLUMN LabelType INTEGER UNSIGNED NOT NULL DEFAULT 0;
+ALTER TABLE Media ADD COLUMN StorageId INTEGER UNSIGNED DEFAULT 0 REFERENCES Storage;
ALTER TABLE Media ADD COLUMN VolParts INTEGER UNSIGNED NOT NULL DEFAULT 0;
+ALTER TABLE Pool ADD COLUMN LabelType INTEGER UNSIGNED NOT NULL DEFAULT 0;
+ALTER TABLE Pool ADD COLUMN NextPoolId INTEGER UNSIGNED DEFAULT 0 REFERENCES Pool;
+ALTER TABLE Pool ADD COLUMN MigrationHighBytes BIGINT UNSIGNED DEFAULT 0;
+ALTER TABLE Pool ADD COLUMN MigrationLowBytes BIGINT UNSIGNED DEFAULT 0;
+ALTER TABLE Pool ADD COLUMN MigrationTime BIGINT UNSIGNED DEFAULT 0;
+
CREATE TABLE MediaType (
MediaTypeId INTEGER UNSIGNED NOT NULL AUTO_INCREMENT,
MediaType VARCHAR(128) NOT NULL,
ALTER TABLE media ADD COLUMN labeltype integer;
UPDATE media SET labeltype=0;
ALTER TABLE media ALTER COLUMN labeltype SET NOT NULL;
+ALTER TABLE media ADD COLUMN StorageId integer;
+UPDATE media SET StorageId=0;
+
ALTER TABLE pool ADD COLUMN labeltype integer;
UPDATE pool set labeltype=0;
ALTER TABLE pool ALTER COLUMN labeltype SET NOT NULL;
+ALTER TABLE pool ADD COLUMN NextPoolId integer;
+ALTER TABLE pool SET NextPoolId=0;
+ALTER TABLE pool ADD COLUMN MigrationHighBytes BIGINT;
+ALTER TABLE pool SET MigrationHighBytes=0;
+ALTER TABLE pool ADD COLUMN MigrationLowBytes BIGINT;
+ALTER TABLE pool SET MigrationLowBytes=0;
+ALTER TABLE pool ADD COLUMN MigrationTime BIGINT;
+ALTER TABLE pool SET MigrationTime=0;
+
ALTER TABLE media ADD COLUMN volparts integer;
UPDATE media SET volparts=0;
#!/bin/sh
#
-# shell script to update SQLite from version 1.34 to 1.35.5
+# shell script to update SQLite from version 1.36 to 1.37.3
#
echo " "
-echo "This script will update a Bacula SQLite database from version 7 to 8"
+echo "This script will update a Bacula SQLite database from version 8 to 9"
echo "Depending on the size of your database,"
echo "this script may take several minutes to run."
echo " "
MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
InChanger TINYINT DEFAULT 0,
+ StorageId INTEGER UNSIGNED REFERENCES Storage,
MediaAddressing TINYINT DEFAULT 0,
VolReadTime BIGINT UNSIGNED DEFAULT 0,
VolWriteTime BIGINT UNSIGNED DEFAULT 0,
VolMounts, VolBytes, 0, VolErrors, VolWrites,
VolCapacityBytes, VolStatus, Recycle,
VolRetention, VolUseDuration, MaxVolJobs,
- MaxVolFiles, MaxVolBytes, InChanger, MediaAddressing,
+ MaxVolFiles, MaxVolBytes, InChanger, 0, MediaAddressing,
VolReadTime, VolWriteTime, EndFile, EndBlock
FROM Media;
MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
InChanger TINYINT DEFAULT 0,
+ StorageId INTEGER UNSIGNED REFERENCES Storage,
MediaAddressing TINYINT DEFAULT 0,
VolReadTime BIGINT UNSIGNED DEFAULT 0,
VolWriteTime BIGINT UNSIGNED DEFAULT 0,
VolCapacityBytes, VolStatus, Recycle,
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes,
- InChanger, MediaAddressing,
+ InChanger, StorageId, MediaAddressing,
VolReadTime, VolWriteTime,
EndFile, EndBlock)
SELECT * FROM Media_backup;
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes, AutoPrune,
Recycle, PoolType, 0, LabelFormat,
- Enabled, ScratchPoolId, RecyclePoolId
+ Enabled, ScratchPoolId, RecyclePoolId,
+ 0, 0, 0, 0
FROM Pool;
DROP TABLE Pool;
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes, AutoPrune,
Recycle, PoolType, LabelType, LabelFormat,
- Enabled, ScratchPoolId, RecyclePoolId )
+ Enabled, ScratchPoolId, RecyclePoolId,
+ NextPoolId, MigrationHighBytes,
+ MigrationLowBytes, MigrationTime )
SELECT * FROM Pool_backup;
DROP TABLE Pool_backup;
+CREATE TABLE MediaType (
+ MediaTypeId INTERGER,
+ MediaType VARCHAR(128) NOT NULL,
+ ReadOnly TINYINT DEFAULT 0,
+ PRIMARY KEY(MediaTypeId)
+ );
+
+CREATE TABLE Device (
+ DeviceId INTEGER,
+ Name VARCHAR(128) NOT NULL,
+ MediaTypeId INTEGER UNSIGNED REFERENCES MediaType NOT NULL,
+ StorageId INTEGER UNSIGNED REFERENCES Storage,
+ DevMounts INTEGER UNSIGNED DEFAULT 0,
+ DevReadBytes BIGINT UNSIGNED DEFAULT 0,
+ DevWriteBytes BIGINT UNSIGNED DEFAULT 0,
+ DevReadBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,
+ DevWriteBytesSinceCleaning BIGINT UNSIGNED DEFAULT 0,
+ DevReadTime BIGINT UNSIGNED DEFAULT 0,
+ DevWriteTime BIGINT UNSIGNED DEFAULT 0,
+ DevReadTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,
+ DevWriteTimeSinceCleaning BIGINT UNSIGNED DEFAULT 0,
+ CleaningDate DATETIME DEFAULT 0,
+ CleaningPeriod BIGINT UNSIGNED DEFAULT 0,
+ PRIMARY KEY(DeviceId)
+ );
+
+CREATE TABLE Storage (
+ StorageId INTEGER,
+ Name VARCHAR(128) NOT NULL,
+ AutoChanger TINYINT DEFAULT 0,
+ PRIMARY KEY(StorageId)
+ );
+
COMMIT;
END-OF-DATA
MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
InChanger TINYINT DEFAULT 0,
+ StorageId INTEGER UNSIGNED REFERENCES Storage,
MediaAddressing TINYINT DEFAULT 0,
VolReadTime BIGINT UNSIGNED DEFAULT 0,
VolWriteTime BIGINT UNSIGNED DEFAULT 0,
VolMounts, VolBytes, 0, VolErrors, VolWrites,
VolCapacityBytes, VolStatus, Recycle,
VolRetention, VolUseDuration, MaxVolJobs,
- MaxVolFiles, MaxVolBytes, InChanger, MediaAddressing,
+ MaxVolFiles, MaxVolBytes, InChanger, 0, MediaAddressing,
VolReadTime, VolWriteTime, EndFile, EndBlock
FROM Media;
MaxVolFiles INTEGER UNSIGNED DEFAULT 0,
MaxVolBytes BIGINT UNSIGNED DEFAULT 0,
InChanger TINYINT DEFAULT 0,
+ StorageId INTEGER UNSIGNED REFERENCES Storage,
MediaAddressing TINYINT DEFAULT 0,
VolReadTime BIGINT UNSIGNED DEFAULT 0,
VolWriteTime BIGINT UNSIGNED DEFAULT 0,
VolCapacityBytes, VolStatus, Recycle,
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes,
- InChanger, MediaAddressing,
+ InChanger, StorageId, MediaAddressing,
VolReadTime, VolWriteTime,
EndFile, EndBlock)
SELECT * FROM Media_backup;
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes, AutoPrune,
Recycle, PoolType, 0, LabelFormat,
- Enabled, ScratchPoolId, RecyclePoolId
+ Enabled, ScratchPoolId, RecyclePoolId,
+ 0, 0, 0, 0
FROM Pool;
DROP TABLE Pool;
Enabled TINYINT DEFAULT 1,
ScratchPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
RecyclePoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ NextPoolId INTEGER UNSIGNED REFERENCES Pool DEFAULT 0,
+ MigrationHighBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationLowBytes BIGINT UNSIGNED DEFAULT 0,
+ MigrationTime BIGINT UNSIGNED DEFAULT 0,
UNIQUE (Name),
PRIMARY KEY (PoolId)
);
VolRetention, VolUseDuration, MaxVolJobs,
MaxVolFiles, MaxVolBytes, AutoPrune,
Recycle, PoolType, LabelType, LabelFormat,
- Enabled, ScratchPoolId, RecyclePoolId )
+ Enabled, ScratchPoolId, RecyclePoolId,
+ NextPoolId, MigrationHighBytes,
+ MigrationLowBytes, MigrationTime )
SELECT * FROM Pool_backup;
DROP TABLE Pool_backup;
static char OKbackup[] = "2000 OK backup\n";
static char OKstore[] = "2000 OK storage\n";
static char EndJob[] = "2800 End Job TermCode=%d JobFiles=%u "
- "ReadBytes=%" lld " JobBytes=%" lld " Errors=%u\n";
+ "ReadBytes=%lld JobBytes=%lld Errors=%u\n";
/* Forward referenced functions */
Dmsg2(100, "Level=%c last start time=%s\n", jcr->JobLevel, jcr->stime);
}
+static void send_since_time(JCR *jcr)
+{
+ BSOCK *fd = jcr->file_bsock;
+ utime_t stime;
+ char ed1[50];
+
+ stime = str_to_utime(jcr->stime);
+ bnet_fsend(fd, levelcmd, "since_utime ", edit_uint64(stime, ed1), 0);
+ while (bget_dirmsg(fd) >= 0) { /* allow him to poll us to sync clocks */
+ Jmsg(jcr, M_INFO, 0, "%s\n", fd->msg);
+ }
+}
+
/*
* Send level command to FD.
int send_level_command(JCR *jcr)
{
BSOCK *fd = jcr->file_bsock;
- utime_t stime;
- char ed1[50];
/*
* Send Level command to File daemon
*/
bnet_fsend(fd, levelcmd, "full", " ", 0);
break;
case L_DIFFERENTIAL:
+ bnet_fsend(fd, levelcmd, "differential", " ", 0);
+ send_since_time(jcr);
+ break;
case L_INCREMENTAL:
- stime = str_to_utime(jcr->stime);
- bnet_fsend(fd, levelcmd, "since_utime ", edit_uint64(stime, ed1), 0);
- while (bget_dirmsg(fd) >= 0) { /* allow him to poll us to sync clocks */
- Jmsg(jcr, M_INFO, 0, "%s\n", fd->msg);
- }
+ bnet_fsend(fd, levelcmd, "incremental", " ", 0);
+ send_since_time(jcr);
break;
case L_SINCE:
default:
extern "C" void *jobq_server(void *arg);
extern "C" void *sched_wait(void *arg);
-static int start_server(jobq_t *jq);
+static int start_server(jobq_t *jq);
+static bool acquire_resources(JCR *jcr);
* move it to the ready queue
*/
Dmsg0(2300, "Done check ready, now check wait queue.\n");
- while (!jq->waiting_jobs->empty() && !jq->quit) {
+ if (!jq->waiting_jobs->empty() && !jq->quit) {
int Priority;
je = (jobq_item_t *)jq->waiting_jobs->first();
jobq_item_t *re = (jobq_item_t *)jq->running_jobs->first();
for ( ; je; ) {
/* je is current job item on the queue, jn is the next one */
JCR *jcr = je->jcr;
- bool skip_this_jcr = false;
jobq_item_t *jn = (jobq_item_t *)jq->waiting_jobs->next(je);
+
Dmsg3(2300, "Examining Job=%d JobPri=%d want Pri=%d\n",
jcr->JobId, jcr->JobPriority, Priority);
+
/* Take only jobs of correct Priority */
if (jcr->JobPriority != Priority) {
set_jcr_job_status(jcr, JS_WaitPriority);
break;
}
- if (jcr->JobType == JT_RESTORE || jcr->JobType == JT_VERIFY) {
- /* Let only one Restore/verify job run at a time regardless of MaxConcurrentJobs */
- if (jcr->store->NumConcurrentJobs == 0) {
- jcr->store->NumConcurrentJobs = 1;
- } else {
- set_jcr_job_status(jcr, JS_WaitStoreRes);
- je = jn; /* point to next waiting job */
- continue;
- }
- /* We are not doing a Restore or Verify */
- } else if (jcr->store->NumConcurrentJobs == 0 &&
- jcr->store->NumConcurrentJobs < jcr->store->MaxConcurrentJobs) {
- /* Simple case, first job */
- jcr->store->NumConcurrentJobs = 1;
- } else if (jcr->store->NumConcurrentJobs < jcr->store->MaxConcurrentJobs) {
- /*
- * At this point, we already have at least one Job running
- * for this Storage daemon, so we must ensure that there
- * is no Volume conflict. In general, it should be OK, if
- * all Jobs pull from the same Pool, so we check the Pools.
- */
- JCR *njcr;
- lock_jcr_chain();
- for (njcr=jobs; njcr; njcr=njcr->next) {
- if (njcr->JobId == 0 || njcr == jcr) {
- continue;
- }
- if (njcr->pool != jcr->pool) {
- skip_this_jcr = true;
- break;
- }
- }
- unlock_jcr_chain();
- if (!skip_this_jcr) {
- jcr->store->NumConcurrentJobs++;
- }
- } else {
- skip_this_jcr = true;
- }
- if (skip_this_jcr) {
- set_jcr_job_status(jcr, JS_WaitStoreRes);
- je = jn; /* point to next waiting job */
- continue;
- }
- if (jcr->client->NumConcurrentJobs < jcr->client->MaxConcurrentJobs) {
- jcr->client->NumConcurrentJobs++;
- } else {
- /* Back out previous locks */
- jcr->store->NumConcurrentJobs--;
- set_jcr_job_status(jcr, JS_WaitClientRes);
- je = jn; /* point to next waiting job */
- continue;
- }
- if (jcr->job->NumConcurrentJobs < jcr->job->MaxConcurrentJobs) {
- jcr->job->NumConcurrentJobs++;
- } else {
- /* Back out previous locks */
- jcr->store->NumConcurrentJobs--;
- jcr->client->NumConcurrentJobs--;
- set_jcr_job_status(jcr, JS_WaitJobRes);
- je = jn; /* Point to next waiting job */
+ if (!acquire_resources(jcr)) {
+ je = jn; /* point to next waiting job */
continue;
}
+
/* Got all locks, now remove it from wait queue and append it
* to the ready queue
*/
Dmsg1(2300, "moved JobId=%d from wait to ready queue\n", je->jcr->JobId);
je = jn; /* Point to next waiting job */
} /* end for loop */
- break;
- } /* end while loop */
+
+ } /* end if */
+
Dmsg0(2300, "Done checking wait queue.\n");
/*
* If no more ready work and we are asked to quit, then do it
* important, release the lock so that a job that has
* terminated can give us the resource.
*/
- if ((stat = pthread_mutex_unlock(&jq->mutex)) != 0) {
- berrno be;
- Jmsg1(NULL, M_ERROR, 0, "pthread_mutex_unlock: ERR=%s\n", be.strerror(stat));
- jq->num_workers--;
- return NULL;
- }
+ V(jq->mutex);
bmicrosleep(2, 0); /* pause for 2 seconds */
- if ((stat = pthread_mutex_lock(&jq->mutex)) != 0) {
- berrno be;
- Jmsg1(NULL, M_ERROR, 0, "pthread_mutex_lock: ERR=%s\n", be.strerror(stat));
- jq->num_workers--;
- return NULL;
- }
+ P(jq->mutex);
/* Recompute work as something may have changed in last 2 secs */
work = !jq->ready_jobs->empty() || !jq->waiting_jobs->empty();
}
} /* end of big for loop */
Dmsg0(200, "unlock mutex\n");
- if ((stat = pthread_mutex_unlock(&jq->mutex)) != 0) {
- berrno be;
- Jmsg1(NULL, M_ERROR, 0, "pthread_mutex_unlock: ERR=%s\n", be.strerror(stat));
- }
+ V(jq->mutex);
Dmsg0(2300, "End jobq_server\n");
return NULL;
}
+
+/*
+ * See if we can acquire all the necessary resources for the job (JCR)
+ *
+ * Returns: true if successful
+ * false if resource failure
+ */
+static bool acquire_resources(JCR *jcr)
+{
+ bool skip_this_jcr = false;
+
+ if (jcr->JobType == JT_RESTORE || jcr->JobType == JT_VERIFY) {
+ /*
+ * Let only one Restore/verify job run at a time regardless
+ * of MaxConcurrentJobs.
+ */
+ if (jcr->store->NumConcurrentJobs == 0) {
+ jcr->store->NumConcurrentJobs = 1;
+ } else {
+ set_jcr_job_status(jcr, JS_WaitStoreRes);
+ return false;
+ }
+ /* We are not doing a Restore or Verify */
+ } else if (jcr->store->NumConcurrentJobs == 0 &&
+ jcr->store->NumConcurrentJobs < jcr->store->MaxConcurrentJobs) {
+ /* Simple case, first job */
+ jcr->store->NumConcurrentJobs = 1;
+ } else if (jcr->store->NumConcurrentJobs < jcr->store->MaxConcurrentJobs) {
+ /*
+ * At this point, we already have at least one Job running
+ * for this Storage daemon, so we must ensure that there
+ * is no Volume conflict. In general, it should be OK, if
+ * all Jobs pull from the same Pool, so we check the Pools.
+ */
+ JCR *njcr;
+ lock_jcr_chain();
+ for (njcr=jobs; njcr; njcr=njcr->next) {
+ if (njcr->JobId == 0 || njcr == jcr) {
+ continue;
+ }
+ if (njcr->pool != jcr->pool) {
+ skip_this_jcr = true;
+ break;
+ }
+ }
+ unlock_jcr_chain();
+ if (!skip_this_jcr) {
+ jcr->store->NumConcurrentJobs++;
+ }
+ } else {
+ skip_this_jcr = true;
+ }
+ if (skip_this_jcr) {
+ set_jcr_job_status(jcr, JS_WaitStoreRes);
+ return false;
+ }
+
+ if (jcr->client->NumConcurrentJobs < jcr->client->MaxConcurrentJobs) {
+ jcr->client->NumConcurrentJobs++;
+ } else {
+ /* Back out previous locks */
+ jcr->store->NumConcurrentJobs--;
+ set_jcr_job_status(jcr, JS_WaitClientRes);
+ return false;
+ }
+ if (jcr->job->NumConcurrentJobs < jcr->job->MaxConcurrentJobs) {
+ jcr->job->NumConcurrentJobs++;
+ } else {
+ /* Back out previous locks */
+ jcr->store->NumConcurrentJobs--;
+ jcr->client->NumConcurrentJobs--;
+ set_jcr_job_status(jcr, JS_WaitJobRes);
+ return false;
+ }
+ return true;
+}
const char *select_restore_del =
"SELECT DISTINCT DelCandidates.JobId "
"FROM Job,DelCandidates "
- "WHERE (Job.JobTdate<%s AND delCandidates.JobStatus!='T') OR "
+ "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
"(Job.JobTDate>%s "
"AND Job.ClientId=%u "
"AND Job.Type='R')";
const char *select_admin_del =
"SELECT DISTINCT DelCandidates.JobId "
"FROM Job,DelCandidates "
- "WHERE (Job.JobTdate<%s AND delCandidates.JobStatus!='T') OR "
+ "WHERE (Job.JobTdate<%s AND DelCandidates.JobStatus!='T') OR "
"(Job.JobTDate>%s "
"AND Job.ClientId=%u "
"AND Job.Type='D')";
{
BSOCK *dir = jcr->dir_bsock;
POOLMEM *level, *buf = NULL;
- struct tm tm;
- time_t mtime;
int mtime_only;
level = get_memory(dir->msglen+1);
/* Full backup requested? */
} else if (strcmp(level, "full") == 0) {
jcr->JobLevel = L_FULL;
- /*
- * Backup requested since <date> <time>
- * This form is also used for incremental and differential
- * This code is deprecated. See since_utime for new code.
- */
- } else if (strcmp(level, "since") == 0) {
- jcr->JobLevel = L_SINCE;
- if (sscanf(dir->msg, "level = since %d-%d-%d %d:%d:%d mtime_only=%d",
- &tm.tm_year, &tm.tm_mon, &tm.tm_mday,
- &tm.tm_hour, &tm.tm_min, &tm.tm_sec, &mtime_only) != 7) {
- goto bail_out;
- }
- tm.tm_year -= 1900;
- tm.tm_mon -= 1;
- tm.tm_wday = tm.tm_yday = 0;
- tm.tm_isdst = -1;
- mtime = mktime(&tm);
- Dmsg2(100, "Got since time: %s mtime_only=%d\n", ctime(&mtime), mtime_only);
- jcr->incremental = 1; /* set incremental or decremental backup */
- jcr->mtime = mtime; /* set since time */
+ } else if (strcmp(level, "differential") == 0) {
+ jcr->JobLevel = L_DIFFERENTIAL;
+ free_memory(level);
+ return 1;
+ } else if (strcmp(level, "incremental") == 0) {
+ jcr->JobLevel = L_INCREMENTAL;
+ free_memory(level);
+ return 1;
/*
* We get his UTC since time, then sync the clocks and correct it
* to agree with our clock.
buf = get_memory(dir->msglen+1);
utime_t since_time, adj;
btime_t his_time, bt_start, rt=0, bt_adj=0;
- jcr->JobLevel = L_SINCE;
if (sscanf(dir->msg, "level = since_utime %s mtime_only=%d",
buf, &mtime_only) != 2) {
goto bail_out;
const char *build_addresses_str(dlist *addrs, char *buf, int blen)
{
- if (addrs->size() == 0) {
+ if (!addrs || addrs->size() == 0) {
bstrncpy(buf, "", blen);
return buf;
}
int get_first_port_net_order(dlist * addrs)
{
- return ((IPADDR *)(addrs->first()))->get_port_net_order();
+ if (!addrs) {
+ return 0;
+ } else {
+ return ((IPADDR *)(addrs->first()))->get_port_net_order();
+ }
}
int get_first_port_host_order(dlist * addrs)
{
- return ((IPADDR *)(addrs->first()))->get_port_host_order();
+ if (!addrs) {
+ return 0;
+ } else {
+ return ((IPADDR *)(addrs->first()))->get_port_host_order();
+ }
}
void init_default_addresses(dlist **out, int port)
*/
/*
- Copyright (C) 2004 Kern Sibbald and John Walker
+ Copyright (C) 2004-2005 Kern Sibbald
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
}
/*
- * Extremely simple sscanf. Handles only %(u,d,ld,lu,lld,llu,c,nns)
+ * Extremely simple sscanf. Handles only %(u,d,ld,qd,qu,lu,lld,llu,c,nns)
*/
const int BIG = 1000;
int bsscanf(const char *buf, const char *fmt, ...)
return NULL;
}
- dev = init_dev(NULL, device);
+ dev = init_dev(jcr, NULL, device);
if (!dev) {
Jmsg1(jcr, M_FATAL, 0, _("Cannot init device %s\n"), dev_name);
return NULL;
*
*/
DEVICE *
-init_dev(DEVICE *dev, DEVRES *device)
+init_dev(JCR *jcr, DEVICE *dev, DEVRES *device)
{
struct stat statp;
bool tape, fifo;
if (dev) {
dev->dev_errno = errno;
}
- Jmsg2(NULL, M_FATAL, 0, _("Unable to stat device %s: ERR=%s\n"),
+ Jmsg2(jcr, M_FATAL, 0, _("Unable to stat device %s: ERR=%s\n"),
device->device_name, be.strerror());
return NULL;
}
if (dev) {
dev->dev_errno = ENODEV;
}
- Emsg2(M_FATAL, 0, _("%s is an unknown device type. Must be tape or directory. st_mode=%x\n"),
+ Jmsg2(jcr, M_FATAL, 0, _("%s is an unknown device type. Must be tape or directory. st_mode=%x\n"),
device->device_name, statp.st_mode);
return NULL;
}
if (stat(device->mount_point, &statp) < 0) {
berrno be;
dev->dev_errno = errno;
- Jmsg2(NULL, M_FATAL, 0, _("Unable to stat mount point %s: ERR=%s\n"),
+ Jmsg2(jcr, M_FATAL, 0, _("Unable to stat mount point %s: ERR=%s\n"),
device->mount_point, be.strerror());
return NULL;
}
if (!device->mount_command || !device->unmount_command) {
- Jmsg0(NULL, M_ERROR_TERM, 0, _("Mount and unmount commands must defined for a device which requires mount.\n"));
+ Jmsg0(jcr, M_ERROR_TERM, 0, _("Mount and unmount commands must defined for a device which requires mount.\n"));
}
if (!device->write_part_command) {
- Jmsg0(NULL, M_ERROR_TERM, 0, _("Write part command must be defined for a device which requires mount.\n"));
+ Jmsg0(jcr, M_ERROR_TERM, 0, _("Write part command must be defined for a device which requires mount.\n"));
}
dev->state |= ST_DVD;
}
if (dev->max_block_size > 1000000) {
- Emsg3(M_ERROR, 0, _("Block size %u on device %s is too large, using default %u\n"),
+ Jmsg3(jcr, M_ERROR, 0, _("Block size %u on device %s is too large, using default %u\n"),
dev->max_block_size, dev->dev_name, DEFAULT_BLOCK_SIZE);
dev->max_block_size = 0;
}
if (dev->max_block_size % TAPE_BSIZE != 0) {
- Emsg2(M_WARNING, 0, _("Max block size %u not multiple of device %s block size.\n"),
+ Jmsg2(jcr, M_WARNING, 0, _("Max block size %u not multiple of device %s block size.\n"),
dev->max_block_size, dev->dev_name);
}
berrno be;
dev->dev_errno = errstat;
Mmsg1(dev->errmsg, _("Unable to init mutex: ERR=%s\n"), be.strerror(errstat));
- Emsg0(M_FATAL, 0, dev->errmsg);
+ Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg);
}
if ((errstat = pthread_cond_init(&dev->wait, NULL)) != 0) {
berrno be;
dev->dev_errno = errstat;
Mmsg1(dev->errmsg, _("Unable to init cond variable: ERR=%s\n"), be.strerror(errstat));
- Emsg0(M_ERROR_TERM, 0, dev->errmsg);
+ Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg);
}
if ((errstat = pthread_cond_init(&dev->wait_next_vol, NULL)) != 0) {
berrno be;
dev->dev_errno = errstat;
Mmsg1(dev->errmsg, _("Unable to init cond variable: ERR=%s\n"), be.strerror(errstat));
- Emsg0(M_ERROR_TERM, 0, dev->errmsg);
+ Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg);
}
if ((errstat = pthread_mutex_init(&dev->spool_mutex, NULL)) != 0) {
berrno be;
dev->dev_errno = errstat;
Mmsg1(dev->errmsg, _("Unable to init mutex: ERR=%s\n"), be.strerror(errstat));
- Emsg0(M_ERROR_TERM, 0, dev->errmsg);
+ Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg);
}
if ((errstat = rwl_init(&dev->lock)) != 0) {
berrno be;
dev->dev_errno = errstat;
Mmsg1(dev->errmsg, _("Unable to init mutex: ERR=%s\n"), be.strerror(errstat));
- Emsg0(M_ERROR_TERM, 0, dev->errmsg);
+ Jmsg0(jcr, M_ERROR_TERM, 0, dev->errmsg);
}
dev->fd = -1;
const int name_len = MAX_NAME_LENGTH;
DCR *dcr;
UnlockRes();
+ if (!device->dev) {
+ device->dev = init_dev(jcr, NULL, device);
+ }
if (!device->dev) {
Jmsg(jcr, M_FATAL, 0, _("\n"
" Archive \"%s\" requested by DIR could not be opened or does not exist.\n"),
ok = reserve_device_for_read(jcr, device->dev);
}
if (!ok) {
- bnet_fsend(dir, _("Could not get dcr for device: %s\n"), dev_name.c_str());
+ bnet_fsend(dir, _("3927 Could not reserve device: %s\n"), dev_name.c_str());
free_dcr(jcr->dcr);
return false;
}
LockRes();
foreach_res(device, R_DEVICE) {
/* Find resource, and make sure we were able to open it */
- if (fnmatch(dev_name.c_str(), device->hdr.name, 0) == 0 &&
- device->dev) {
+ if (fnmatch(dev_name.c_str(), device->hdr.name, 0) == 0) {
+ if (!device->dev) {
+ device->dev = init_dev(jcr, NULL, device);
+ }
+ if (!device->dev) {
+ break;
+ }
DEVICE *dev = device->dev;
POOL_MEM VolumeName, MediaType;
UnlockRes();
/* From dev.c */
-DEVICE *init_dev(DEVICE *dev, DEVRES *device);
+DEVICE *init_dev(JCR *jcr, DEVICE *dev, DEVRES *device);
int open_dev(DEVICE *dev, char *VolName, int mode);
off_t lseek_dev(DEVICE *dev, off_t offset, int whence);
int open_first_part(DEVICE *dev);
foreach_res(device, R_DEVICE) {
Dmsg1(90, "calling init_dev %s\n", device->device_name);
- device->dev = init_dev(NULL, device);
+ device->dev = init_dev(NULL, NULL, device);
Dmsg1(10, "SD init done %s\n", device->device_name);
if (!device->dev) {
Jmsg1(NULL, M_ERROR, 0, _("Could not initialize %s\n"), device->device_name);
{"device", dev_items, R_DEVICE},
{"messages", msgs_items, R_MSGS},
{"autochanger", changer_items, R_AUTOCHANGER},
- {NULL, NULL, 0}
+ {NULL, NULL, 0}
};
return;
}
sendit(sock, "dump_resource type=%d\n", type);
- if (type < 0) { /* no recursion */
+ if (type < 0) { /* no recursion */
type = - type;
recurse = 0;
}
break;
case R_STORAGE:
sendit(sock, "Storage: name=%s SDaddr=%s SDport=%d SDDport=%d HB=%s\n",
- res->res_store.hdr.name,
- NPRT(get_first_address(res->res_store.sdaddrs, buf, sizeof(buf))),
- get_first_port_host_order(res->res_store.sdaddrs),
- get_first_port_host_order(res->res_store.sddaddrs),
- edit_utime(res->res_store.heartbeat_interval, buf, sizeof(buf)));
- foreach_dlist(p, res->res_store.sdaddrs) {
- sendit(sock, " SDaddr=%s SDport=%d\n",
- p->get_address(buf, sizeof(buf)), p->get_port_host_order());
- }
- foreach_dlist(p, res->res_store.sddaddrs) {
- sendit(sock, " SDDaddr=%s SDDport=%d\n",
- p->get_address(buf, sizeof(buf)), p->get_port_host_order());
- }
+ res->res_store.hdr.name,
+ NPRT(get_first_address(res->res_store.sdaddrs, buf, sizeof(buf))),
+ get_first_port_host_order(res->res_store.sdaddrs),
+ get_first_port_host_order(res->res_store.sddaddrs),
+ edit_utime(res->res_store.heartbeat_interval, buf, sizeof(buf)));
+ if (res->res_store.sdaddrs) {
+ foreach_dlist(p, res->res_store.sdaddrs) {
+ sendit(sock, " SDaddr=%s SDport=%d\n",
+ p->get_address(buf, sizeof(buf)), p->get_port_host_order());
+ }
+ }
+ if (res->res_store.sddaddrs) {
+ foreach_dlist(p, res->res_store.sddaddrs) {
+ sendit(sock, " SDDaddr=%s SDDport=%d\n",
+ p->get_address(buf, sizeof(buf)), p->get_port_host_order());
+ }
+ }
break;
case R_DEVICE:
sendit(sock, "Device: name=%s MediaType=%s Device=%s LabelType=%d\n",
- res->res_dev.hdr.name,
- res->res_dev.media_type, res->res_dev.device_name,
- res->res_dev.label_type);
+ res->res_dev.hdr.name,
+ res->res_dev.media_type, res->res_dev.device_name,
+ res->res_dev.label_type);
sendit(sock, " rew_wait=%d min_bs=%d max_bs=%d\n",
- res->res_dev.max_rewind_wait, res->res_dev.min_block_size,
- res->res_dev.max_block_size);
+ res->res_dev.max_rewind_wait, res->res_dev.min_block_size,
+ res->res_dev.max_block_size);
sendit(sock, " max_jobs=%d max_files=%" lld " max_size=%" lld "\n",
- res->res_dev.max_volume_jobs, res->res_dev.max_volume_files,
- res->res_dev.max_volume_size);
+ res->res_dev.max_volume_jobs, res->res_dev.max_volume_files,
+ res->res_dev.max_volume_size);
sendit(sock, " max_file_size=%" lld " capacity=%" lld "\n",
- res->res_dev.max_file_size, res->res_dev.volume_capacity);
+ res->res_dev.max_file_size, res->res_dev.volume_capacity);
sendit(sock, " spool_directory=%s\n", NPRT(res->res_dev.spool_directory));
sendit(sock, " max_spool_size=%" lld " max_job_spool_size=%" lld "\n",
- res->res_dev.max_spool_size, res->res_dev.max_job_spool_size);
+ res->res_dev.max_spool_size, res->res_dev.max_job_spool_size);
bstrncpy(buf, " ", sizeof(buf));
if (res->res_dev.cap_bits & CAP_EOF) {
bstrncat(buf, "CAP_EOF ", sizeof(buf));
case R_AUTOCHANGER:
DEVRES *dev;
sendit(sock, "Changer: name=%s Changer_devname=%s Changer_cmd=%s\n",
- res->res_changer.hdr.name,
- res->res_changer.changer_name, res->res_changer.changer_command);
+ res->res_changer.hdr.name,
+ res->res_changer.changer_name, res->res_changer.changer_command);
foreach_alist(dev, res->res_changer.device) {
sendit(sock, " --->Device: name=%s\n", dev->hdr.name);
}
switch (type) {
case R_DIRECTOR:
if (res->res_dir.password) {
- free(res->res_dir.password);
+ free(res->res_dir.password);
}
if (res->res_dir.address) {
- free(res->res_dir.address);
+ free(res->res_dir.address);
}
break;
case R_AUTOCHANGER:
if (res->res_changer.changer_name) {
- free(res->res_changer.changer_name);
+ free(res->res_changer.changer_name);
}
if (res->res_changer.changer_command) {
- free(res->res_changer.changer_command);
+ free(res->res_changer.changer_command);
+ }
+ if (res->res_changer.device) {
+ delete res->res_changer.device;
}
+ break;
case R_STORAGE:
if (res->res_store.sdaddrs) {
- free_addresses(res->res_store.sdaddrs);
+ free_addresses(res->res_store.sdaddrs);
}
if (res->res_store.sddaddrs) {
- free_addresses(res->res_store.sddaddrs);
+ free_addresses(res->res_store.sddaddrs);
}
if (res->res_store.working_directory) {
- free(res->res_store.working_directory);
+ free(res->res_store.working_directory);
}
if (res->res_store.pid_directory) {
- free(res->res_store.pid_directory);
+ free(res->res_store.pid_directory);
}
if (res->res_store.subsys_directory) {
- free(res->res_store.subsys_directory);
+ free(res->res_store.subsys_directory);
}
break;
case R_DEVICE:
if (res->res_dev.media_type) {
- free(res->res_dev.media_type);
+ free(res->res_dev.media_type);
}
if (res->res_dev.device_name) {
- free(res->res_dev.device_name);
+ free(res->res_dev.device_name);
}
if (res->res_dev.changer_name) {
- free(res->res_dev.changer_name);
+ free(res->res_dev.changer_name);
}
if (res->res_dev.changer_command) {
- free(res->res_dev.changer_command);
+ free(res->res_dev.changer_command);
}
if (res->res_dev.alert_command) {
- free(res->res_dev.alert_command);
+ free(res->res_dev.alert_command);
}
if (res->res_dev.spool_directory) {
- free(res->res_dev.spool_directory);
+ free(res->res_dev.spool_directory);
}
if (res->res_dev.mount_point) {
- free(res->res_dev.mount_point);
+ free(res->res_dev.mount_point);
}
if (res->res_dev.mount_command) {
- free(res->res_dev.mount_command);
+ free(res->res_dev.mount_command);
}
if (res->res_dev.unmount_command) {
- free(res->res_dev.unmount_command);
+ free(res->res_dev.unmount_command);
}
if (res->res_dev.write_part_command) {
- free(res->res_dev.write_part_command);
+ free(res->res_dev.write_part_command);
}
if (res->res_dev.free_space_command) {
- free(res->res_dev.free_space_command);
+ free(res->res_dev.free_space_command);
}
break;
case R_MSGS:
if (res->res_msgs.mail_cmd) {
- free(res->res_msgs.mail_cmd);
+ free(res->res_msgs.mail_cmd);
}
if (res->res_msgs.operator_cmd) {
- free(res->res_msgs.operator_cmd);
+ free(res->res_msgs.operator_cmd);
}
free_msgs_res((MSGS *)res); /* free message resource */
res = NULL;
*/
for (i=0; items[i].name; i++) {
if (items[i].flags & ITEM_REQUIRED) {
- if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
+ if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
Emsg2(M_ERROR_TERM, 0, _("\"%s\" item is required in \"%s\" resource, but not found.\n"),
- items[i].name, resources[rindex]);
- }
+ items[i].name, resources[rindex]);
+ }
}
/* If this triggers, take a look at lib/parse_conf.h */
if (i >= MAX_RES_ITEMS) {
case R_DIRECTOR:
case R_DEVICE:
case R_MSGS:
- break;
+ break;
/* Resources containing a resource or an alist */
case R_STORAGE:
- if ((res = (URES *)GetResWithName(R_STORAGE, res_all.res_dir.hdr.name)) == NULL) {
+ if ((res = (URES *)GetResWithName(R_STORAGE, res_all.res_dir.hdr.name)) == NULL) {
Emsg1(M_ERROR_TERM, 0, "Cannot find Storage resource \"%s\"\n", res_all.res_dir.hdr.name);
- }
- res->res_store.messages = res_all.res_store.messages;
- break;
+ }
+ res->res_store.messages = res_all.res_store.messages;
+ break;
case R_AUTOCHANGER:
- if ((res = (URES *)GetResWithName(type, res_all.res_changer.hdr.name)) == NULL) {
+ if ((res = (URES *)GetResWithName(type, res_all.res_changer.hdr.name)) == NULL) {
Emsg1(M_ERROR_TERM, 0, "Cannot find AutoChanger resource %s\n",
- res_all.res_changer.hdr.name);
- }
- /* we must explicitly copy the device alist pointer */
- res->res_changer.device = res_all.res_changer.device;
- break;
+ res_all.res_changer.hdr.name);
+ }
+ /* we must explicitly copy the device alist pointer */
+ res->res_changer.device = res_all.res_changer.device;
+ break;
default:
printf("Unknown resource type %d\n", type);
- error = 1;
- break;
+ error = 1;
+ break;
}
if (res_all.res_dir.hdr.name) {
- free(res_all.res_dir.hdr.name);
- res_all.res_dir.hdr.name = NULL;
+ free(res_all.res_dir.hdr.name);
+ res_all.res_dir.hdr.name = NULL;
}
if (res_all.res_dir.hdr.desc) {
- free(res_all.res_dir.hdr.desc);
- res_all.res_dir.hdr.desc = NULL;
+ free(res_all.res_dir.hdr.desc);
+ res_all.res_dir.hdr.desc = NULL;
}
return;
}
/* The following code is only executed on pass 1 */
switch (type) {
case R_DIRECTOR:
- size = sizeof(DIRRES);
- break;
+ size = sizeof(DIRRES);
+ break;
case R_STORAGE:
- size = sizeof(STORES);
- break;
+ size = sizeof(STORES);
+ break;
case R_DEVICE:
- size = sizeof(DEVRES);
- break;
+ size = sizeof(DEVRES);
+ break;
case R_MSGS:
- size = sizeof(MSGS);
- break;
+ size = sizeof(MSGS);
+ break;
case R_AUTOCHANGER:
- size = sizeof(AUTOCHANGER);
- break;
+ size = sizeof(AUTOCHANGER);
+ break;
default:
printf("Unknown resource type %d\n", type);
- error = 1;
- size = 1;
- break;
+ error = 1;
+ size = 1;
+ break;
}
/* Common */
if (!error) {
res = (URES *)malloc(size);
memcpy(res, &res_all, size);
if (!res_head[rindex]) {
- res_head[rindex] = (RES *)res; /* store first entry */
+ res_head[rindex] = (RES *)res; /* store first entry */
} else {
- RES *next;
- /* Add new res to end of chain */
- for (next=res_head[rindex]; next->next; next=next->next) {
- if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
- Emsg2(M_ERROR_TERM, 0,
+ RES *next;
+ /* Add new res to end of chain */
+ for (next=res_head[rindex]; next->next; next=next->next) {
+ if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
+ Emsg2(M_ERROR_TERM, 0,
_("Attempt to define second \"%s\" resource named \"%s\" is not permitted.\n"),
- resources[rindex].name, res->res_dir.hdr.name);
- }
- }
- next->next = (RES *)res;
+ resources[rindex].name, res->res_dir.hdr.name);
+ }
+ }
+ next->next = (RES *)res;
Dmsg2(90, "Inserting %s res: %s\n", res_to_str(type),
- res->res_dir.hdr.name);
+ res->res_dir.hdr.name);
}
}
}
/* */
#undef VERSION
-#define VERSION "1.37.3"
-#define BDATE "10 February 2005"
-#define LSMDATE "10Feb05"
+#define VERSION "1.37.4"
+#define BDATE "18 February 2005"
+#define LSMDATE "18Feb05"
/* Debug flags */
#undef DEBUG