/*
Bacula® - The Network Backup Solution
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
+ Copyright (C) 2000-2011 Free Software Foundation Europe e.V.
The main author of Bacula is Kern Sibbald, with contributions from
many others, a complete list can be found in the file AUTHORS.
This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
+ modify it under the terms of version three of the GNU Affero General Public
License as published by the Free Software Foundation and included
in the file LICENSE.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
- You should have received a copy of the GNU General Public License
+ You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
*
* Kern Sibbald, January MM
*
- * Version $Id$
*/
void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass);
+static void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_runscript_when(LEX *lc, RES_ITEM *item, int index, int pass);
{"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
{"autochanger", store_bool, ITEM(res_store.autochanger), 0, ITEM_DEFAULT, 0},
{"enabled", store_bool, ITEM(res_store.enabled), 0, ITEM_DEFAULT, true},
+ {"allowcompression", store_bool, ITEM(res_store.AllowCompress), 0, ITEM_DEFAULT, true},
{"heartbeatinterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 0},
{"maximumconcurrentjobs", store_pint32, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
{"sddport", store_pint32, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
{"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
/* Turned off for the moment */
{"multipleconnections", store_bit, ITEM(res_cat.mult_db_connections), 0, 0, 0},
+ {"disablebatchinsert", store_bool, ITEM(res_cat.disable_batch_insert), 0, ITEM_DEFAULT, false},
{NULL, NULL, {0}, 0, 0, 0}
};
{"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
{"maxfullinterval", store_time, ITEM(res_job.MaxFullInterval), 0, 0, 0},
{"maxdiffinterval", store_time, ITEM(res_job.MaxDiffInterval), 0, 0, 0},
- {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
{"prefixlinks", store_bool, ITEM(res_job.PrefixLinks), 0, ITEM_DEFAULT, false},
{"prunejobs", store_bool, ITEM(res_job.PruneJobs), 0, ITEM_DEFAULT, false},
{"prunefiles", store_bool, ITEM(res_job.PruneFiles), 0, ITEM_DEFAULT, false},
{"prunevolumes",store_bool, ITEM(res_job.PruneVolumes), 0, ITEM_DEFAULT, false},
+ {"purgemigrationjob", store_bool, ITEM(res_job.PurgeMigrateJob), 0, ITEM_DEFAULT, false},
{"enabled", store_bool, ITEM(res_job.enabled), 0, ITEM_DEFAULT, true},
{"spoolattributes",store_bool, ITEM(res_job.SpoolAttributes), 0, ITEM_DEFAULT, false},
{"spooldata", store_bool, ITEM(res_job.spool_data), 0, ITEM_DEFAULT, false},
- {"spoolsize", store_size, ITEM(res_job.spool_size), 0, 0, 0},
+ {"spoolsize", store_size64, ITEM(res_job.spool_size), 0, 0, 0},
{"rerunfailedlevels", store_bool, ITEM(res_job.rerun_failed_levels), 0, ITEM_DEFAULT, false},
{"prefermountedvolumes", store_bool, ITEM(res_job.PreferMountedVolumes), 0, ITEM_DEFAULT, true},
{"runbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
{"maximumconcurrentjobs", store_pint32, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
{"rescheduleonerror", store_bool, ITEM(res_job.RescheduleOnError), 0, ITEM_DEFAULT, false},
{"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
- {"rescheduletimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 0},
+ {"rescheduletimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 5},
{"priority", store_pint32, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
{"allowmixedpriority", store_bool, ITEM(res_job.allow_mixed_priority), 0, ITEM_DEFAULT, false},
{"writepartafterjob", store_bool, ITEM(res_job.write_part_after_job), 0, ITEM_DEFAULT, true},
{"runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0},
{"selectiontype", store_migtype, ITEM(res_job.selection_type), 0, 0, 0},
{"accurate", store_bool, ITEM(res_job.accurate), 0,0,0},
- {"allowduplicatejobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, false},
+ {"allowduplicatejobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, true},
{"allowhigherduplicates", store_bool, ITEM(res_job.AllowHigherDuplicates), 0, ITEM_DEFAULT, true},
+ {"cancellowerlevelduplicates", store_bool, ITEM(res_job.CancelLowerLevelDuplicates), 0, ITEM_DEFAULT, false},
{"cancelqueuedduplicates", store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false},
{"cancelrunningduplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false},
{"pluginoptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0},
* name handler value code flags default_value
*/
static RES_ITEM sch_items[] = {
- {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
- {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
+ {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
+ {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
{"usecatalog", store_bool, ITEM(res_pool.use_catalog), 0, ITEM_DEFAULT, true},
{"usevolumeonce", store_bool, ITEM(res_pool.use_volume_once), 0, 0, 0},
{"purgeoldestvolume", store_bool, ITEM(res_pool.purge_oldest_volume), 0, 0, 0},
+ {"actiononpurge", store_actiononpurge, ITEM(res_pool.action_on_purge), 0, 0, 0},
{"recycleoldestvolume", store_bool, ITEM(res_pool.recycle_oldest_volume), 0, 0, 0},
{"recyclecurrentvolume", store_bool, ITEM(res_pool.recycle_current_volume), 0, 0, 0},
{"maximumvolumes", store_pint32, ITEM(res_pool.max_volumes), 0, 0, 0},
{"maximumvolumejobs", store_pint32, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
{"maximumvolumefiles", store_pint32, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
- {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
+ {"maximumvolumebytes", store_size64, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
{"catalogfiles", store_bool, ITEM(res_pool.catalog_files), 0, ITEM_DEFAULT, true},
{"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
{"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
{"migrationtime", store_time, ITEM(res_pool.MigrationTime), 0, 0, 0},
- {"migrationhighbytes", store_size, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
- {"migrationlowbytes", store_size, ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
+ {"migrationhighbytes", store_size64, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
+ {"migrationlowbytes", store_size64, ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
{"nextpool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0},
{"storage", store_alist_res, ITEM(res_pool.storage), R_STORAGE, 0, 0},
{"autoprune", store_bool, ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true},
{"scratchpool", store_res, ITEM(res_pool.ScratchPool), R_POOL, 0, 0},
{"copypool", store_alist_res, ITEM(res_pool.CopyPool), R_POOL, 0, 0},
{"catalog", store_res, ITEM(res_pool.catalog), R_CATALOG, 0, 0},
+ {"fileretention", store_time, ITEM(res_pool.FileRetention), 0, 0, 0},
+ {"jobretention", store_time, ITEM(res_pool.JobRetention), 0, 0, 0},
+
{NULL, NULL, {0}, 0, 0, 0}
};
if (res->res_job.MaxStartDelay) {
sendit(sock, _(" --> MaxStartDelay=%u\n"), res->res_job.MaxStartDelay);
}
+ if (res->res_job.MaxRunSchedTime) {
+ sendit(sock, _(" --> MaxRunSchedTime=%u\n"), res->res_job.MaxRunSchedTime);
+ }
if (res->res_job.storage) {
STORE *store;
foreach_alist(store, res->res_job.storage) {
}
sendit(sock, " N\n");
}
+ if (incexe->ignoredir) {
+ sendit(sock, " Z %s\n", incexe->ignoredir);
+ }
for (j=0; j<incexe->name_list.size(); j++) {
sendit(sock, " I %s\n", incexe->name_list.get(j));
}
NPRT(res->res_pool.label_format));
sendit(sock, _(" CleaningPrefix=%s LabelType=%d\n"),
NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType);
- sendit(sock, _(" RecyleOldest=%d PurgeOldest=%d\n"),
+ sendit(sock, _(" RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n"),
res->res_pool.recycle_oldest_volume,
- res->res_pool.purge_oldest_volume);
+ res->res_pool.purge_oldest_volume,
+ res->res_pool.action_on_purge);
sendit(sock, _(" MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n"),
res->res_pool.MaxVolJobs,
res->res_pool.MaxVolFiles,
edit_utime(res->res_pool.MigrationTime, ed1, sizeof(ed1)),
edit_uint64(res->res_pool.MigrationHighBytes, ed2),
edit_uint64(res->res_pool.MigrationLowBytes, ed3));
+ sendit(sock, _(" JobRetention=%s FileRetention=%s\n"),
+ edit_utime(res->res_pool.JobRetention, ed1, sizeof(ed1)),
+ edit_utime(res->res_pool.FileRetention, ed2, sizeof(ed2)));
if (res->res_pool.NextPool) {
sendit(sock, _(" NextPool=%s\n"), res->res_pool.NextPool->name());
}
if (incexe->opts_list) {
free(incexe->opts_list);
}
+ if (incexe->ignoredir) {
+ free(incexe->ignoredir);
+ }
free(incexe);
}
}
}
+static void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass)
+{
+ uint32_t *destination = (uint32_t*)item->value;
+ lex_get_token(lc, T_NAME);
+ if (strcasecmp(lc->str, "truncate") == 0) {
+ *destination = (*destination) | ON_PURGE_TRUNCATE;
+ } else {
+ scan_err2(lc, _("Expected one of: %s, got: %s"), "Truncate", lc->str);
+ return;
+ }
+ scan_to_eol(lc);
+ set_bit(index, res_all.hdr.item_present);
+}
+
/*
* Store Device. Note, the resource is created upon the
* first reference. The details of the resource are obtained
*/
static void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token;
URES *res;
int rindex = R_DEVICE - r_first;
int size = sizeof(DEVICE);
bool found = false;
if (pass == 1) {
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
if (!res_head[rindex]) {
res = (URES *)malloc(size);
memset(res, 0, size);
*/
void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the type both pass 1 and pass 2 */
for (i=0; migtypes[i].type_name; i++) {
if (strcasecmp(lc->str, migtypes[i].type_name) == 0) {
*/
void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the type both pass 1 and pass 2 */
for (i=0; jobtypes[i].type_name; i++) {
if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
*/
void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the level pass 2 so that type is defined */
for (i=0; joblevels[i].level_name; i++) {
if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
- token = lex_get_token(lc, T_NAME);
+ int i;
+ lex_get_token(lc, T_NAME);
/* Scan Replacement options */
for (i=0; ReplaceOptions[i].name; i++) {
if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
int token;
for (;;) {
- token = lex_get_token(lc, T_STRING);
+ lex_get_token(lc, T_STRING);
if (pass == 1) {
if (((alist **)item->value)[item->code] == NULL) {
((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
/* Each runscript command takes 2 entries in commands list */
pm_strcpy(c, lc->str);
((RUNSCRIPT*) item->value)->commands->prepend(c); /* command line */
- ((RUNSCRIPT*) item->value)->commands->prepend((void *)item->code); /* command type */
+ ((RUNSCRIPT*) item->value)->commands->prepend((void *)(intptr_t)item->code); /* command type */
}
scan_to_eol(lc);
}
if (pass == 2) {
RUNSCRIPT *script = new_runscript();
- script->set_job_code_callback(job_code_callback_filesetname);
+ script->set_job_code_callback(job_code_callback_director);
script->set_command(lc->str);
* - POOLMEM command string (ex: /bin/true)
* - int command type (ex: SHELL_CMD)
*/
- res_runscript.set_job_code_callback(job_code_callback_filesetname);
+ res_runscript.set_job_code_callback(job_code_callback_director);
while ((c=(char*)res_runscript.commands->pop()) != NULL) {
t = (intptr_t)res_runscript.commands->pop();
RUNSCRIPT *script = new_runscript();
}
/* callback function for edit_job_codes */
-extern "C" char *job_code_callback_filesetname(JCR *jcr, const char* param)
+/* See ../lib/util.c, function edit_job_codes, for more remaining codes */
+extern "C" char *job_code_callback_director(JCR *jcr, const char* param)
{
- if (param[0] == 'f') {
- return jcr->fileset->name();
- } else {
- return NULL;
+ static char yes[] = "yes";
+ static char no[] = "no";
+ switch (param[0]) {
+ case 'f':
+ if (jcr->fileset) {
+ return jcr->fileset->name();
+ }
+ break;
+ case 'h':
+ if (jcr->client) {
+ return jcr->client->address;
+ }
+ break;
+ case 'p':
+ if (jcr->pool) {
+ return jcr->pool->name();
+ }
+ break;
+ case 'w':
+ if (jcr->wstore) {
+ return jcr->wstore->name();
+ }
+ break;
+ case 'x':
+ return jcr->spool_data ? yes : no;
+ break;
+ case 'D':
+ return my_name;
+ break;
}
+ return NULL;
}
bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code)