/*
- Bacula® - The Network Backup Solution
-
- Copyright (C) 2000-2009 Free Software Foundation Europe e.V.
-
- The main author of Bacula is Kern Sibbald, with contributions from
- many others, a complete list can be found in the file AUTHORS.
- This program is Free Software; you can redistribute it and/or
- modify it under the terms of version two of the GNU General Public
- License as published by the Free Software Foundation and included
- in the file LICENSE.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA.
-
- Bacula® is a registered trademark of Kern Sibbald.
- The licensor of Bacula is the Free Software Foundation Europe
- (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
- Switzerland, email:ftf@fsfeurope.org.
+ Bacula(R) - The Network Backup Solution
+
+ Copyright (C) 2000-2017 Kern Sibbald
+
+ The original author of Bacula is Kern Sibbald, with contributions
+ from many others, a complete list can be found in the file AUTHORS.
+
+ You may use this file and others of this release according to the
+ license defined in the LICENSE file, which includes the Affero General
+ Public License, v3.0 ("AGPLv3") and some additional permissions and
+ terms pursuant to its AGPLv3 Section 7.
+
+ This notice must be preserved when any source code is
+ conveyed and/or propagated.
+
+ Bacula(R) is a registered trademark of Kern Sibbald.
*/
/*
* Main configuration file parser for Bacula Directors,
* for the resource records.
*
* Kern Sibbald, January MM
- *
- * Version $Id$
*/
*/
int32_t r_first = R_FIRST;
int32_t r_last = R_LAST;
-static RES *sres_head[R_LAST - R_FIRST + 1];
-RES **res_head = sres_head;
+RES_HEAD **res_head;
+
+static pthread_mutex_t globals_mutex = PTHREAD_MUTEX_INITIALIZER;
+dlist client_globals;
+dlist job_globals;
+dlist store_globals;
+dlist sched_globals;
+
/* Imported subroutines */
extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass);
-static void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
-static void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_ac_res(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_runscript_when(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_runscript_cmd(LEX *lc, RES_ITEM *item, int index, int pass);
static void store_short_runscript(LEX *lc, RES_ITEM *item, int index, int pass);
#endif
int32_t res_all_size = sizeof(res_all);
+/* Implementation of certain classes */
+
+void CLIENT::create_client_globals()
+{
+ globals = (CLIENT_GLOBALS *)malloc(sizeof(CLIENT_GLOBALS));
+ memset(globals, 0, sizeof(CLIENT_GLOBALS));
+ globals->name = bstrdup(name());
+ client_globals.append(globals);
+}
+
+int32_t CLIENT::getNumConcurrentJobs()
+{
+ if (!globals) {
+ return 0;
+ }
+ return globals->NumConcurrentJobs;
+}
+
+void CLIENT::setNumConcurrentJobs(int32_t num)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_client_globals();
+ /* Copy .conf IP address and Enabled */
+ globals->enabled = Enabled;
+ globals->SetIPaddress = bstrdup(client_address);
+ }
+ globals->NumConcurrentJobs = num;
+ V(globals_mutex);
+ ASSERT(num >= 0);
+ Dmsg2(200, "Set NumConcurrentJobs=%ld for Client %s\n",
+ num, globals->name);
+}
+
+char *CLIENT::address()
+{
+ if (!globals) {
+ return client_address;
+ }
+ if (!globals->SetIPaddress) {
+ return client_address;
+ }
+ return globals->SetIPaddress;
+}
+
+void CLIENT::setAddress(char *addr)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_client_globals();
+ globals->enabled = Enabled; /* copy .conf variable */
+ }
+ if (globals->SetIPaddress) {
+ free(globals->SetIPaddress);
+ }
+ globals->SetIPaddress = bstrdup(addr);
+ V(globals_mutex);
+}
+
+bool CLIENT::is_enabled()
+{
+ if (!globals) {
+ return Enabled;
+ }
+ return globals->enabled;
+}
+
+void CLIENT::setEnabled(bool val)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_client_globals();
+ globals->SetIPaddress = bstrdup(client_address); /* copy .conf variable */
+ }
+ globals->enabled = val;
+ V(globals_mutex);
+ Dmsg2(200, "Set Enabled=%d for Client %s\n",
+ val, globals->name);
+}
+
+void JOB::create_job_globals()
+{
+ globals = (JOB_GLOBALS *)malloc(sizeof(JOB_GLOBALS));
+ memset(globals, 0, sizeof(JOB_GLOBALS));
+ globals->name = bstrdup(name());
+ job_globals.append(globals);
+}
+
+int32_t JOB::getNumConcurrentJobs()
+{
+ if (!globals) {
+ return 0;
+ }
+ return globals->NumConcurrentJobs;
+}
+
+void JOB::setNumConcurrentJobs(int32_t num)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_job_globals();
+ globals->enabled = Enabled; /* copy .conf variable */
+ }
+ globals->NumConcurrentJobs = num;
+ V(globals_mutex);
+ ASSERT(num >= 0);
+ Dmsg2(200, "Set NumConcurrentJobs=%ld for Job %s\n",
+ num, globals->name);
+}
+
+bool JOB::is_enabled()
+{
+ if (!globals) {
+ return Enabled;
+ }
+ return globals->enabled;
+}
+
+void JOB::setEnabled(bool val)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_job_globals();
+ }
+ globals->enabled = val;
+ V(globals_mutex);
+ Dmsg2(200, "Set Enabled=%d for Job %s\n",
+ val, globals->name);
+}
+
+void STORE::create_store_globals()
+{
+ globals = (STORE_GLOBALS *)malloc(sizeof(STORE_GLOBALS));
+ memset(globals, 0, sizeof(STORE_GLOBALS));
+ globals->name = bstrdup(name());
+ store_globals.append(globals);
+}
+
+int32_t STORE::getNumConcurrentReadJobs()
+{
+ if (!globals) {
+ return 0;
+ }
+ return globals->NumConcurrentReadJobs;
+}
+
+void STORE::setNumConcurrentReadJobs(int32_t num)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_store_globals();
+ globals->enabled = Enabled; /* copy .conf variable */
+ }
+ globals->NumConcurrentReadJobs = num;
+ V(globals_mutex);
+ Dmsg2(200, "Set NumConcurrentReadJobs=%ld for Store %s\n",
+ num, globals->name);
+ ASSERT(num >= 0);
+}
+
+int32_t STORE::getNumConcurrentJobs()
+{
+ if (!globals) {
+ return 0;
+ }
+ return globals->NumConcurrentJobs;
+}
+
+void STORE::setNumConcurrentJobs(int32_t num)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_store_globals();
+ globals->enabled = Enabled; /* copy .conf variable */
+ }
+ globals->NumConcurrentJobs = num;
+ V(globals_mutex);
+ Dmsg2(200, "Set numconcurrentJobs=%ld for Store %s\n",
+ num, globals->name);
+ ASSERT(num >= 0);
+}
+
+bool STORE::is_enabled()
+{
+ if (!globals) {
+ return Enabled;
+ }
+ return globals->enabled;
+}
-/* Definition of records permitted within each
+void STORE::setEnabled(bool val)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_store_globals();
+ }
+ globals->enabled = val;
+ V(globals_mutex);
+ Dmsg2(200, "Set Enabled=%d for Storage %s\n",
+ val, globals->name);
+}
+
+void SCHED::create_sched_globals()
+{
+ globals = (SCHED_GLOBALS *)malloc(sizeof(CLIENT_GLOBALS));
+ memset(globals, 0, sizeof(SCHED_GLOBALS));
+ globals->name = bstrdup(name());
+ sched_globals.append(globals);
+}
+
+bool SCHED::is_enabled()
+{
+ if (!globals) {
+ return Enabled;
+ }
+ return globals->enabled;
+}
+
+void SCHED::setEnabled(bool val)
+{
+ P(globals_mutex);
+ if (!globals) {
+ create_sched_globals();
+ }
+ globals->enabled = val;
+ V(globals_mutex);
+ Dmsg2(200, "Set Enabled=%d for Schedule %s\n",
+ val, globals->name);
+}
+
+/*
+ * Definition of records permitted within each
* resource with the routine to process the record
* information. NOTE! quoted names must be in lower case.
*/
* name handler value code flags default_value
*/
static RES_ITEM dir_items[] = {
- {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
- {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
- {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
- {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
- {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
- {"dirsourceaddress",store_addresses_address, ITEM(res_dir.DIRsrc_addr), 0, ITEM_DEFAULT, 0},
- {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
- {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
- {"plugindirectory", store_dir, ITEM(res_dir.plugin_directory), 0, 0, 0},
- {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
- {"piddirectory", store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
- {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
- {"maximumconcurrentjobs", store_pint32, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
- {"maximumconsoleconnections", store_pint32, ITEM(res_dir.MaxConsoleConnect), 0, ITEM_DEFAULT, 20},
- {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
- {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 3 * 60},
- {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 30 * 60},
- {"heartbeatinterval", store_time, ITEM(res_dir.heartbeat_interval), 0, ITEM_DEFAULT, 0},
- {"tlsauthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0},
- {"tlsenable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0},
- {"tlsrequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0},
- {"tlsverifypeer", store_bool, ITEM(res_dir.tls_verify_peer), 0, ITEM_DEFAULT, true},
- {"tlscacertificatefile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0},
- {"tlscacertificatedir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0},
- {"tlscertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0},
- {"tlskey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0},
- {"tlsdhfile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0},
- {"tlsallowedcn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0},
- {"statisticsretention", store_time, ITEM(res_dir.stats_retention), 0, ITEM_DEFAULT, 60*60*24*31*12*5},
- {"verid", store_str, ITEM(res_dir.verid), 0, 0, 0},
+ {"Name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
+ {"Messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
+ {"DirPort", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
+ {"DirAddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
+ {"DirAddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
+ {"DirSourceAddress",store_addresses_address, ITEM(res_dir.DIRsrc_addr), 0, ITEM_DEFAULT, 0},
+ {"QueryFile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
+ {"WorkingDirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
+ {"PluginDirectory", store_dir, ITEM(res_dir.plugin_directory), 0, 0, 0},
+ {"ScriptsDirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
+ {"PidDirectory", store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
+ {"SubsysDirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
+ {"MaximumConcurrentJobs", store_pint32, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 20},
+ {"MaximumReloadRequests", store_pint32, ITEM(res_dir.MaxReload), 0, ITEM_DEFAULT, 32},
+ {"MaximumConsoleConnections", store_pint32, ITEM(res_dir.MaxConsoleConnect), 0, ITEM_DEFAULT, 20},
+ {"Password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
+ {"FdConnectTimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 3 * 60},
+ {"SdConnectTimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 30 * 60},
+ {"HeartbeatInterval", store_time, ITEM(res_dir.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60},
+ {"TlsAuthenticate", store_bool, ITEM(res_dir.tls_authenticate), 0, 0, 0},
+ {"TlsEnable", store_bool, ITEM(res_dir.tls_enable), 0, 0, 0},
+ {"TlsRequire", store_bool, ITEM(res_dir.tls_require), 0, 0, 0},
+ {"TlsVerifyPeer", store_bool, ITEM(res_dir.tls_verify_peer), 0, ITEM_DEFAULT, true},
+ {"TlsCaCertificateFile", store_dir, ITEM(res_dir.tls_ca_certfile), 0, 0, 0},
+ {"TlsCaCertificateDir", store_dir, ITEM(res_dir.tls_ca_certdir), 0, 0, 0},
+ {"TlsCertificate", store_dir, ITEM(res_dir.tls_certfile), 0, 0, 0},
+ {"TlsKey", store_dir, ITEM(res_dir.tls_keyfile), 0, 0, 0},
+ {"TlsDhFile", store_dir, ITEM(res_dir.tls_dhfile), 0, 0, 0},
+ {"TlsAllowedCn", store_alist_str, ITEM(res_dir.tls_allowed_cns), 0, 0, 0},
+ {"StatisticsRetention", store_time, ITEM(res_dir.stats_retention), 0, ITEM_DEFAULT, 60*60*24*31*12*5},
+ {"VerId", store_str, ITEM(res_dir.verid), 0, 0, 0},
+ {"CommCompression", store_bool, ITEM(res_dir.comm_compression), 0, ITEM_DEFAULT, true},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM con_items[] = {
- {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
- {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
- {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
- {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
- {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
- {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
- {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
- {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
- {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
- {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
- {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
- {"whereacl", store_acl, ITEM(res_con.ACL_lists), Where_ACL, 0, 0},
- {"pluginoptionsacl", store_acl, ITEM(res_con.ACL_lists), PluginOptions_ACL, 0, 0},
- {"tlsauthenticate", store_bool, ITEM(res_con.tls_authenticate), 0, 0, 0},
- {"tlsenable", store_bool, ITEM(res_con.tls_enable), 0, 0, 0},
- {"tlsrequire", store_bool, ITEM(res_con.tls_require), 0, 0, 0},
- {"tlsverifypeer", store_bool, ITEM(res_con.tls_verify_peer), 0, ITEM_DEFAULT, true},
- {"tlscacertificatefile", store_dir, ITEM(res_con.tls_ca_certfile), 0, 0, 0},
- {"tlscacertificatedir", store_dir, ITEM(res_con.tls_ca_certdir), 0, 0, 0},
- {"tlscertificate", store_dir, ITEM(res_con.tls_certfile), 0, 0, 0},
- {"tlskey", store_dir, ITEM(res_con.tls_keyfile), 0, 0, 0},
- {"tlsdhfile", store_dir, ITEM(res_con.tls_dhfile), 0, 0, 0},
- {"tlsallowedcn", store_alist_str, ITEM(res_con.tls_allowed_cns), 0, 0, 0},
+ {"Name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
+ {"Password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
+ {"JobAcl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
+ {"ClientAcl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
+ {"StorageAcl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
+ {"ScheduleAcl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
+ {"RunAcl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
+ {"PoolAcl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
+ {"CommandAcl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
+ {"FilesetAcl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
+ {"CatalogAcl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
+ {"WhereAcl", store_acl, ITEM(res_con.ACL_lists), Where_ACL, 0, 0},
+ {"RestoreClientAcl", store_acl, ITEM(res_con.ACL_lists), RestoreClient_ACL, 0, 0},
+ {"BackupClientAcl", store_acl, ITEM(res_con.ACL_lists), BackupClient_ACL, 0, 0},
+ {"PluginOptionsAcl", store_acl, ITEM(res_con.ACL_lists), PluginOptions_ACL, 0, 0},
+ {"TlsAuthenticate", store_bool, ITEM(res_con.tls_authenticate), 0, 0, 0},
+ {"TlsEnable", store_bool, ITEM(res_con.tls_enable), 0, 0, 0},
+ {"TlsRequire", store_bool, ITEM(res_con.tls_require), 0, 0, 0},
+ {"TlsVerifyPeer", store_bool, ITEM(res_con.tls_verify_peer), 0, ITEM_DEFAULT, true},
+ {"TlsCaCertificateFile", store_dir, ITEM(res_con.tls_ca_certfile), 0, 0, 0},
+ {"TlsCaCertificateDir", store_dir, ITEM(res_con.tls_ca_certdir), 0, 0, 0},
+ {"TlsCertificate", store_dir, ITEM(res_con.tls_certfile), 0, 0, 0},
+ {"TlsKey", store_dir, ITEM(res_con.tls_keyfile), 0, 0, 0},
+ {"TlsDhFile", store_dir, ITEM(res_con.tls_dhfile), 0, 0, 0},
+ {"TlsAllowedCn", store_alist_str, ITEM(res_con.tls_allowed_cns), 0, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
*/
static RES_ITEM cli_items[] = {
- {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
- {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
- {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
- {"fdport", store_pint32, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
- {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
+ {"Name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
+ {"fdaddress", store_str, ITEM(res_client.client_address), 0, 0, 0},
+ {"Address", store_str, ITEM(res_client.client_address), 0, ITEM_REQUIRED, 0},
+ {"FdPort", store_pint32, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
{"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
- {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
- {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
- {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
- {"heartbeatinterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 0},
- {"autoprune", store_bool, ITEM(res_client.AutoPrune), 0, ITEM_DEFAULT, true},
- {"maximumconcurrentjobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
- {"tlsauthenticate", store_bool, ITEM(res_client.tls_authenticate), 0, 0, 0},
- {"tlsenable", store_bool, ITEM(res_client.tls_enable), 0, 0, 0},
- {"tlsrequire", store_bool, ITEM(res_client.tls_require), 0, 0, 0},
- {"tlscacertificatefile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0},
- {"tlscacertificatedir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0},
- {"tlscertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0},
- {"tlskey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0},
- {"tlsallowedcn", store_alist_str, ITEM(res_client.tls_allowed_cns), 0, 0, 0},
+ {"Password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
+ {"FdStorageAddress", store_str, ITEM(res_client.fd_storage_address), 0, 0, 0},
+ {"Catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
+ {"FileRetention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
+ {"JobRetention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
+ {"HeartbeatInterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60},
+ {"AutoPrune", store_bool, ITEM(res_client.AutoPrune), 0, ITEM_DEFAULT, true},
+ {"SDCallsClient", store_bool, ITEM(res_client.sd_calls_client), 0, ITEM_DEFAULT, false},
+ {"SnapshotRetention", store_time, ITEM(res_client.SnapRetention), 0, ITEM_DEFAULT, 0},
+ {"MaximumConcurrentJobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
+ {"TlsAuthenticate", store_bool, ITEM(res_client.tls_authenticate), 0, 0, 0},
+ {"TlsEnable", store_bool, ITEM(res_client.tls_enable), 0, 0, 0},
+ {"TlsRequire", store_bool, ITEM(res_client.tls_require), 0, 0, 0},
+ {"TlsCaCertificateFile", store_dir, ITEM(res_client.tls_ca_certfile), 0, 0, 0},
+ {"TlsCaCertificateDir", store_dir, ITEM(res_client.tls_ca_certdir), 0, 0, 0},
+ {"TlsCertificate", store_dir, ITEM(res_client.tls_certfile), 0, 0, 0},
+ {"TlsKey", store_dir, ITEM(res_client.tls_keyfile), 0, 0, 0},
+ {"TlsAllowedCn", store_alist_str, ITEM(res_client.tls_allowed_cns), 0, 0, 0},
+ {"MaximumBandwidthPerJob", store_speed, ITEM(res_client.max_bandwidth), 0, 0, 0},
+ {"Enabled", store_bool, ITEM(res_client.Enabled), 0, ITEM_DEFAULT, true},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM store_items[] = {
- {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
- {"sdport", store_pint32, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
- {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
+ {"Name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
+ {"SdPort", store_pint32, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
{"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
- {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
+ {"Address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
+ {"FdStorageAddress", store_str, ITEM(res_store.fd_storage_address), 0, 0, 0},
{"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
- {"device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0},
- {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
- {"autochanger", store_bool, ITEM(res_store.autochanger), 0, ITEM_DEFAULT, 0},
- {"enabled", store_bool, ITEM(res_store.enabled), 0, ITEM_DEFAULT, true},
- {"heartbeatinterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 0},
- {"maximumconcurrentjobs", store_pint32, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
+ {"Password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
+ {"Device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0},
+ {"MediaType", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
+ /* _bool,
+ * Big kludge, these two autochanger definitions must be in
+ * this order and together.
+ */
+ {"Autochanger", store_ac_res, ITEM(res_store.changer), 0, ITEM_DEFAULT, 0},
+ {"Autochanger", store_bool, ITEM(res_store.autochanger), 0, ITEM_DEFAULT, false},
+ {"SharedStorage", store_ac_res, ITEM(res_store.shared_storage), 1, ITEM_DEFAULT, 0},
+ {"Enabled", store_bool, ITEM(res_store.Enabled), 0, ITEM_DEFAULT, true},
+ {"AllowCompression", store_bool, ITEM(res_store.AllowCompress), 0, ITEM_DEFAULT, true},
+ {"HeartbeatInterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 5 * 60},
+ {"MaximumConcurrentJobs", store_pint32, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
+ {"MaximumConcurrentReadjobs", store_pint32, ITEM(res_store.MaxConcurrentReadJobs), 0, ITEM_DEFAULT, 0},
{"sddport", store_pint32, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
- {"tlsauthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0},
- {"tlsenable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0},
- {"tlsrequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0},
- {"tlscacertificatefile", store_dir, ITEM(res_store.tls_ca_certfile), 0, 0, 0},
- {"tlscacertificatedir", store_dir, ITEM(res_store.tls_ca_certdir), 0, 0, 0},
- {"tlscertificate", store_dir, ITEM(res_store.tls_certfile), 0, 0, 0},
- {"tlskey", store_dir, ITEM(res_store.tls_keyfile), 0, 0, 0},
+ {"TlsAuthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0},
+ {"TlsEnable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0},
+ {"TlsRequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0},
+ {"TlsCaCertificateFile", store_dir, ITEM(res_store.tls_ca_certfile), 0, 0, 0},
+ {"TlsCaCertificateDir", store_dir, ITEM(res_store.tls_ca_certdir), 0, 0, 0},
+ {"TlsCertificate", store_dir, ITEM(res_store.tls_certfile), 0, 0, 0},
+ {"TlsKey", store_dir, ITEM(res_store.tls_keyfile), 0, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM cat_items[] = {
- {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
- {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
+ {"Name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
{"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
- {"dbport", store_pint32, ITEM(res_cat.db_port), 0, 0, 0},
+ {"Address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
+ {"DbPort", store_pint32, ITEM(res_cat.db_port), 0, 0, 0},
/* keep this password as store_str for the moment */
- {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
{"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
+ {"Password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
{"dbuser", store_str, ITEM(res_cat.db_user), 0, 0, 0},
- {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
- {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
+ {"User", store_str, ITEM(res_cat.db_user), 0, 0, 0},
+ {"DbName", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
{"dbdriver", store_str, ITEM(res_cat.db_driver), 0, 0, 0},
- {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
+ {"DbSocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
+ {"dbsslmode", store_str, ITEM(res_cat.db_ssl_mode), 0, 0, 0},
+ {"dbsslkey", store_str, ITEM(res_cat.db_ssl_key), 0, 0, 0},
+ {"dbsslcert", store_str, ITEM(res_cat.db_ssl_cert), 0, 0, 0},
+ {"dbsslca", store_str, ITEM(res_cat.db_ssl_ca), 0, 0, 0},
+ {"dbsslcapath", store_str, ITEM(res_cat.db_ssl_capath), 0, 0, 0},
+ {"DbSocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
/* Turned off for the moment */
- {"multipleconnections", store_bit, ITEM(res_cat.mult_db_connections), 0, 0, 0},
+ {"MultipleConnections", store_bit, ITEM(res_cat.mult_db_connections), 0, 0, 0},
+ {"DisableBatchInsert", store_bool, ITEM(res_cat.disable_batch_insert), 0, ITEM_DEFAULT, false},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
RES_ITEM job_items[] = {
- {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
- {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
- {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
- {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
- {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, 0, 0},
- {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
- {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
- {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
- {"differentialbackuppool", store_res, ITEM(res_job.diff_pool), R_POOL, 0, 0},
- {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
- {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
- {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
- {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
- {"jobtoverify", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
- {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
- {"run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0},
+ {"Name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
+ {"Type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
+ {"Level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
+ {"Messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
+ {"Storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, 0, 0},
+ {"Pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
+ {"NextPool", store_res, ITEM(res_job.next_pool), R_POOL, 0, 0},
+ {"FullBackupPool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
+ {"VirtualFullBackupPool", store_res, ITEM(res_job.vfull_pool), R_POOL, 0, 0},
+ {"IncrementalBackupPool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
+ {"DifferentialBackupPool", store_res, ITEM(res_job.diff_pool), R_POOL, 0, 0},
+ {"Client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
+ {"Fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
+ {"Schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
+ {"VerifyJob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
+ {"JobToVerify", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
+ {"JobDefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
+ {"Run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0},
/* Root of where to restore files */
- {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
- {"regexwhere", store_str, ITEM(res_job.RegexWhere), 0, 0, 0},
- {"stripprefix", store_str, ITEM(res_job.strip_prefix), 0, 0, 0},
- {"addprefix", store_str, ITEM(res_job.add_prefix), 0, 0, 0},
- {"addsuffix", store_str, ITEM(res_job.add_suffix), 0, 0, 0},
+ {"Where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
+ {"RegexWhere", store_str, ITEM(res_job.RegexWhere), 0, 0, 0},
+ {"StripPrefix", store_str, ITEM(res_job.strip_prefix), 0, 0, 0},
+ {"AddPrefix", store_str, ITEM(res_job.add_prefix), 0, 0, 0},
+ {"AddSuffix", store_str, ITEM(res_job.add_suffix), 0, 0, 0},
/* Where to find bootstrap during restore */
- {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
+ {"Bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
/* Where to write bootstrap file during backup */
- {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
- {"writeverifylist",store_dir,ITEM(res_job.WriteVerifyList), 0, 0, 0},
- {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
- {"maxrunschedtime", store_time, ITEM(res_job.MaxRunSchedTime), 0, 0, 0},
- {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
+ {"WriteBootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
+ {"WriteVerifyList",store_dir,ITEM(res_job.WriteVerifyList), 0, 0, 0},
+ {"Replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
+ {"MaximumBandwidth", store_speed, ITEM(res_job.max_bandwidth), 0, 0, 0},
+ {"MaxRunSchedTime", store_time, ITEM(res_job.MaxRunSchedTime), 0, 0, 0},
+ {"MaxRunTime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
/* xxxMaxWaitTime are deprecated */
{"fullmaxwaittime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0},
{"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0},
{"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0},
- {"fullmaxruntime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0},
- {"incrementalmaxruntime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0},
- {"differentialmaxruntime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0},
- {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
- {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
- {"maxfullinterval", store_time, ITEM(res_job.MaxFullInterval), 0, 0, 0},
- {"maxdiffinterval", store_time, ITEM(res_job.MaxDiffInterval), 0, 0, 0},
- {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
- {"prefixlinks", store_bool, ITEM(res_job.PrefixLinks), 0, ITEM_DEFAULT, false},
- {"prunejobs", store_bool, ITEM(res_job.PruneJobs), 0, ITEM_DEFAULT, false},
- {"prunefiles", store_bool, ITEM(res_job.PruneFiles), 0, ITEM_DEFAULT, false},
- {"prunevolumes",store_bool, ITEM(res_job.PruneVolumes), 0, ITEM_DEFAULT, false},
- {"enabled", store_bool, ITEM(res_job.enabled), 0, ITEM_DEFAULT, true},
- {"spoolattributes",store_bool, ITEM(res_job.SpoolAttributes), 0, ITEM_DEFAULT, false},
- {"spooldata", store_bool, ITEM(res_job.spool_data), 0, ITEM_DEFAULT, false},
- {"spoolsize", store_size, ITEM(res_job.spool_size), 0, 0, 0},
- {"rerunfailedlevels", store_bool, ITEM(res_job.rerun_failed_levels), 0, ITEM_DEFAULT, false},
- {"prefermountedvolumes", store_bool, ITEM(res_job.PreferMountedVolumes), 0, ITEM_DEFAULT, true},
+ {"FullMaxRunTime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0},
+ {"IncrementalMaxRunTime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0},
+ {"DifferentialMaxRunTime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0},
+ {"MaxWaitTime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
+ {"MaxStartDelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
+ {"MaxFullInterval", store_time, ITEM(res_job.MaxFullInterval), 0, 0, 0},
+ {"MaxVirtualFullInterval", store_time, ITEM(res_job.MaxVirtualFullInterval), 0, 0, 0},
+ {"MaxDiffInterval", store_time, ITEM(res_job.MaxDiffInterval), 0, 0, 0},
+ {"PrefixLinks", store_bool, ITEM(res_job.PrefixLinks), 0, ITEM_DEFAULT, false},
+ {"PruneJobs", store_bool, ITEM(res_job.PruneJobs), 0, ITEM_DEFAULT, false},
+ {"PruneFiles", store_bool, ITEM(res_job.PruneFiles), 0, ITEM_DEFAULT, false},
+ {"PruneVolumes",store_bool, ITEM(res_job.PruneVolumes), 0, ITEM_DEFAULT, false},
+ {"PurgeMigrationJob", store_bool, ITEM(res_job.PurgeMigrateJob), 0, ITEM_DEFAULT, false},
+ {"Enabled", store_bool, ITEM(res_job.Enabled), 0, ITEM_DEFAULT, true},
+ {"SnapshotRetention", store_time, ITEM(res_job.SnapRetention), 0, ITEM_DEFAULT, 0},
+ {"SpoolAttributes",store_bool, ITEM(res_job.SpoolAttributes), 0, ITEM_DEFAULT, true},
+ {"SpoolData", store_bool, ITEM(res_job.spool_data), 0, ITEM_DEFAULT, false},
+ {"SpoolSize", store_size64, ITEM(res_job.spool_size), 0, 0, 0},
+ {"ReRunFailedLevels", store_bool, ITEM(res_job.rerun_failed_levels), 0, ITEM_DEFAULT, false},
+ {"PreferMountedVolumes", store_bool, ITEM(res_job.PreferMountedVolumes), 0, ITEM_DEFAULT, true},
+ /*
+ * JSON tools skip Directive in lowercase. They are deprecated or
+ * are synonym with an other one that follows. Like User and dbuser.
+ */
{"runbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
{"runafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
{"runafterfailedjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
{"clientrunbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
{"clientrunafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
- {"maximumconcurrentjobs", store_pint32, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
- {"rescheduleonerror", store_bool, ITEM(res_job.RescheduleOnError), 0, ITEM_DEFAULT, false},
- {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
- {"rescheduletimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 0},
- {"priority", store_pint32, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
- {"allowmixedpriority", store_bool, ITEM(res_job.allow_mixed_priority), 0, ITEM_DEFAULT, false},
- {"writepartafterjob", store_bool, ITEM(res_job.write_part_after_job), 0, ITEM_DEFAULT, true},
- {"selectionpattern", store_str, ITEM(res_job.selection_pattern), 0, 0, 0},
- {"runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0},
- {"selectiontype", store_migtype, ITEM(res_job.selection_type), 0, 0, 0},
- {"accurate", store_bool, ITEM(res_job.accurate), 0,0,0},
- {"allowduplicatejobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, false},
+ {"consolerunbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
+ {"consolerunafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0},
+ {"Runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0},
+ {"MaximumConcurrentJobs", store_pint32, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
+ {"MaximumSpawnedJobs", store_pint32, ITEM(res_job.MaxSpawnedJobs), 0, ITEM_DEFAULT, 600},
+ {"RescheduleOnError", store_bool, ITEM(res_job.RescheduleOnError), 0, ITEM_DEFAULT, false},
+ {"RescheduleIncompleteJobs", store_bool, ITEM(res_job.RescheduleIncompleteJobs), 0, ITEM_DEFAULT, true},
+ {"RescheduleInterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
+ {"RescheduleTimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 0},
+ {"Priority", store_pint32, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
+ {"BackupsToKeep", store_pint32, ITEM(res_job.BackupsToKeep), 0, ITEM_DEFAULT, 0},
+ {"AllowMixedPriority", store_bool, ITEM(res_job.allow_mixed_priority), 0, ITEM_DEFAULT, false},
+ {"WritePartAfterJob", store_bool, ITEM(res_job.write_part_after_job), 0, ITEM_DEFAULT, true},
+ {"SelectionPattern", store_str, ITEM(res_job.selection_pattern), 0, 0, 0},
+ {"SelectionType", store_migtype, ITEM(res_job.selection_type), 0, 0, 0},
+ {"Accurate", store_bool, ITEM(res_job.accurate), 0,0,0},
+ {"AllowDuplicateJobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, true},
{"allowhigherduplicates", store_bool, ITEM(res_job.AllowHigherDuplicates), 0, ITEM_DEFAULT, true},
- {"cancelqueuedduplicates", store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false},
- {"cancelrunningduplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false},
- {"pluginoptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0},
- {"base", store_alist_res, ITEM(res_job.base), R_JOB, 0, 0},
+ {"CancelLowerLevelDuplicates", store_bool, ITEM(res_job.CancelLowerLevelDuplicates), 0, ITEM_DEFAULT, false},
+ {"CancelQueuedDuplicates", store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false},
+ {"CancelRunningDuplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false},
+ {"DeleteConsolidatedJobs", store_bool, ITEM(res_job.DeleteConsolidatedJobs), 0, ITEM_DEFAULT, false},
+ {"PluginOptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0},
+ {"Base", store_alist_res, ITEM(res_job.base), R_JOB, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
-/* FileSet resource
+/* Fileset resource
*
- * name handler value code flags default_value
+ * Name handler value code flags default_value
*/
static RES_ITEM fs_items[] = {
- {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
- {"include", store_inc, {0}, 0, ITEM_NO_EQUALS, 0},
- {"exclude", store_inc, {0}, 1, ITEM_NO_EQUALS, 0},
- {"ignorefilesetchanges", store_bool, ITEM(res_fs.ignore_fs_changes), 0, ITEM_DEFAULT, false},
- {"enablevss", store_bool, ITEM(res_fs.enable_vss), 0, ITEM_DEFAULT, true},
+ {"Name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
+ {"IgnoreFilesetChanges", store_bool, ITEM(res_fs.ignore_fs_changes), 0, ITEM_DEFAULT, false},
+ {"EnableVss", store_bool, ITEM(res_fs.enable_vss), 0, ITEM_DEFAULT, true},
+ {"EnableSnapshot",store_bool, ITEM(res_fs.enable_snapshot), 0, ITEM_DEFAULT, false},
+ {"Include", store_inc, {0}, 0, ITEM_NO_EQUALS, 0},
+ {"Exclude", store_inc, {0}, 1, ITEM_NO_EQUALS, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM sch_items[] = {
- {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
- {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
+ {"Name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
+ {"Run", store_run, ITEM(res_sch.run), 0, 0, 0},
+ {"Enabled", store_bool, ITEM(res_sch.Enabled), 0, ITEM_DEFAULT, true},
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM pool_items[] = {
- {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
- {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
- {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
- {"labeltype", store_label, ITEM(res_pool.LabelType), 0, 0, 0},
- {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
- {"usecatalog", store_bool, ITEM(res_pool.use_catalog), 0, ITEM_DEFAULT, true},
- {"usevolumeonce", store_bool, ITEM(res_pool.use_volume_once), 0, 0, 0},
- {"purgeoldestvolume", store_bool, ITEM(res_pool.purge_oldest_volume), 0, 0, 0},
- {"recycleoldestvolume", store_bool, ITEM(res_pool.recycle_oldest_volume), 0, 0, 0},
- {"recyclecurrentvolume", store_bool, ITEM(res_pool.recycle_current_volume), 0, 0, 0},
- {"maximumvolumes", store_pint32, ITEM(res_pool.max_volumes), 0, 0, 0},
- {"maximumvolumejobs", store_pint32, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
- {"maximumvolumefiles", store_pint32, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
- {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
- {"catalogfiles", store_bool, ITEM(res_pool.catalog_files), 0, ITEM_DEFAULT, true},
- {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
- {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
- {"migrationtime", store_time, ITEM(res_pool.MigrationTime), 0, 0, 0},
- {"migrationhighbytes", store_size, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
- {"migrationlowbytes", store_size, ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
- {"nextpool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0},
- {"storage", store_alist_res, ITEM(res_pool.storage), R_STORAGE, 0, 0},
- {"autoprune", store_bool, ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true},
- {"recycle", store_bool, ITEM(res_pool.Recycle), 0, ITEM_DEFAULT, true},
- {"recyclepool", store_res, ITEM(res_pool.RecyclePool), R_POOL, 0, 0},
- {"scratchpool", store_res, ITEM(res_pool.ScratchPool), R_POOL, 0, 0},
- {"copypool", store_alist_res, ITEM(res_pool.CopyPool), R_POOL, 0, 0},
- {"catalog", store_res, ITEM(res_pool.catalog), R_CATALOG, 0, 0},
+ {"Name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
+ {"PoolType", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
+ {"LabelFormat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
+ {"LabelType", store_label, ITEM(res_pool.LabelType), 0, 0, 0},
+ {"CleaningPrefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
+ {"UseCatalog", store_bool, ITEM(res_pool.use_catalog), 0, ITEM_DEFAULT, true},
+ {"UseVolumeOnce", store_bool, ITEM(res_pool.use_volume_once), 0, 0, 0},
+ {"PurgeOldestVolume", store_bool, ITEM(res_pool.purge_oldest_volume), 0, 0, 0},
+ {"ActionOnPurge", store_actiononpurge, ITEM(res_pool.action_on_purge), 0, 0, 0},
+ {"RecycleOldestVolume", store_bool, ITEM(res_pool.recycle_oldest_volume), 0, 0, 0},
+ {"RecycleCurrentVolume", store_bool, ITEM(res_pool.recycle_current_volume), 0, 0, 0},
+ {"MaximumVolumes", store_pint32, ITEM(res_pool.max_volumes), 0, 0, 0},
+ {"MaximumVolumeJobs", store_pint32, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
+ {"MaximumVolumeFiles", store_pint32, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
+ {"MaximumVolumeBytes", store_size64, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
+ {"CatalogFiles", store_bool, ITEM(res_pool.catalog_files), 0, ITEM_DEFAULT, true},
+ {"CacheRetention", store_time, ITEM(res_pool.CacheRetention), 0, 0, 0},
+ {"VolumeRetention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
+ {"VolumeUseDuration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
+ {"MigrationTime", store_time, ITEM(res_pool.MigrationTime), 0, 0, 0},
+ {"MigrationHighBytes", store_size64, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
+ {"MigrationLowBytes", store_size64, ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
+ {"NextPool", store_res, ITEM(res_pool.NextPool), R_POOL, 0, 0},
+ {"Storage", store_alist_res, ITEM(res_pool.storage), R_STORAGE, 0, 0},
+ {"AutoPrune", store_bool, ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true},
+ {"Recycle", store_bool, ITEM(res_pool.Recycle), 0, ITEM_DEFAULT, true},
+ {"RecyclePool", store_res, ITEM(res_pool.RecyclePool), R_POOL, 0, 0},
+ {"ScratchPool", store_res, ITEM(res_pool.ScratchPool), R_POOL, 0, 0},
+ {"CopyPool", store_alist_res, ITEM(res_pool.CopyPool), R_POOL, 0, 0},
+ {"Catalog", store_res, ITEM(res_pool.catalog), R_CATALOG, 0, 0},
+ {"FileRetention", store_time, ITEM(res_pool.FileRetention), 0, 0, 0},
+ {"JobRetention", store_time, ITEM(res_pool.JobRetention), 0, 0, 0},
+
{NULL, NULL, {0}, 0, 0, 0}
};
* name handler value code flags default_value
*/
static RES_ITEM counter_items[] = {
- {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
- {"minimum", store_int32, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
- {"maximum", store_pint32, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
- {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
- {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
+ {"Name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"Description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
+ {"Minimum", store_int32, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
+ {"Maximum", store_pint32, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
+ {"WrapCounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
+ {"Catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
{NULL, NULL, {0}, 0, 0, 0}
};
* NOTE!!! keep it in the same order as the R_codes
* or eliminate all resources[rindex].name
*
- * name items rcode res_head
+ * name items rcode
*/
RES_TABLE resources[] = {
- {"director", dir_items, R_DIRECTOR},
- {"client", cli_items, R_CLIENT},
- {"job", job_items, R_JOB},
- {"storage", store_items, R_STORAGE},
- {"catalog", cat_items, R_CATALOG},
- {"schedule", sch_items, R_SCHEDULE},
- {"fileset", fs_items, R_FILESET},
- {"pool", pool_items, R_POOL},
- {"messages", msgs_items, R_MSGS},
- {"counter", counter_items, R_COUNTER},
- {"console", con_items, R_CONSOLE},
- {"jobdefs", job_items, R_JOBDEFS},
- {"device", NULL, R_DEVICE}, /* info obtained from SD */
+ {"Director", dir_items, R_DIRECTOR},
+ {"Client", cli_items, R_CLIENT},
+ {"Job", job_items, R_JOB},
+ {"Storage", store_items, R_STORAGE},
+ {"Catalog", cat_items, R_CATALOG},
+ {"Schedule", sch_items, R_SCHEDULE},
+ {"Fileset", fs_items, R_FILESET},
+ {"Pool", pool_items, R_POOL},
+ {"Messages", msgs_items, R_MSGS},
+ {"Counter", counter_items, R_COUNTER},
+ {"Console", con_items, R_CONSOLE},
+ {"JobDefs", job_items, R_JOBDEFS},
+ {"Device", NULL, R_DEVICE}, /* info obtained from SD */
+ {"Autochanger", store_items, R_AUTOCHANGER}, /* alias for R_STORAGE */
{NULL, NULL, 0}
};
{"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
{"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
{"Data", L_VERIFY_DATA, JT_VERIFY},
+ {"Full", L_FULL, JT_COPY},
+ {"Incremental", L_INCREMENTAL, JT_COPY},
+ {"Differential", L_DIFFERENTIAL, JT_COPY},
+ {"Full", L_FULL, JT_MIGRATE},
+ {"Incremental", L_INCREMENTAL, JT_MIGRATE},
+ {"Differential", L_DIFFERENTIAL, JT_MIGRATE},
{" ", L_NONE, JT_ADMIN},
{" ", L_NONE, JT_RESTORE},
{NULL, 0, 0}
};
+
/* Keywords (RHS) permitted in Job type records
*
* type_name job_type
*/
-struct s_jt jobtypes[] = {
- {"backup", JT_BACKUP},
- {"admin", JT_ADMIN},
- {"verify", JT_VERIFY},
- {"restore", JT_RESTORE},
- {"migrate", JT_MIGRATE},
- {"copy", JT_COPY},
+s_jt jobtypes[] = {
+ {"Backup", JT_BACKUP},
+ {"Admin", JT_ADMIN},
+ {"Verify", JT_VERIFY},
+ {"Restore", JT_RESTORE},
+ {"Migrate", JT_MIGRATE},
+ {"Copy", JT_COPY},
{NULL, 0}
};
*
* type_name job_type
*/
-struct s_jt migtypes[] = {
- {"smallestvolume", MT_SMALLEST_VOL},
- {"oldestvolume", MT_OLDEST_VOL},
- {"pooloccupancy", MT_POOL_OCCUPANCY},
- {"pooltime", MT_POOL_TIME},
- {"pooluncopiedjobs", MT_POOL_UNCOPIED_JOBS},
- {"client", MT_CLIENT},
- {"volume", MT_VOLUME},
- {"job", MT_JOB},
- {"sqlquery", MT_SQLQUERY},
+s_jt migtypes[] = {
+ {"SmallestVolume", MT_SMALLEST_VOL},
+ {"OldestVolume", MT_OLDEST_VOL},
+ {"PoolOccupancy", MT_POOL_OCCUPANCY},
+ {"PoolTime", MT_POOL_TIME},
+ {"PoolUncopiedJobs", MT_POOL_UNCOPIED_JOBS},
+ {"Client", MT_CLIENT},
+ {"Volume", MT_VOLUME},
+ {"Job", MT_JOB},
+ {"SqlQuery", MT_SQLQUERY},
{NULL, 0}
};
/* Options permitted in Restore replace= */
-struct s_kw ReplaceOptions[] = {
- {"always", REPLACE_ALWAYS},
- {"ifnewer", REPLACE_IFNEWER},
- {"ifolder", REPLACE_IFOLDER},
- {"never", REPLACE_NEVER},
+s_kw ReplaceOptions[] = {
+ {"Always", REPLACE_ALWAYS},
+ {"IfNewer", REPLACE_IFNEWER},
+ {"IfOlder", REPLACE_IFOLDER},
+ {"Never", REPLACE_NEVER},
{NULL, 0}
};
}
/* Dump contents of resource */
-void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
+void dump_resource(int type, RES *ares, void sendit(void *sock, const char *fmt, ...), void *sock)
{
- URES *res = (URES *)reshdr;
+ RES *next;
+ URES *res = (URES *)ares;
bool recurse = true;
char ed1[100], ed2[100], ed3[100];
DEVICE *dev;
+ UAContext *ua = (UAContext *)sock;
if (res == NULL) {
sendit(sock, _("No %s resource defined\n"), res_to_str(type));
return;
}
if (type < 0) { /* no recursion */
- type = - type;
+ type = -type;
recurse = false;
}
switch (type) {
case R_DIRECTOR:
sendit(sock, _("Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n"),
- reshdr->name, res->res_dir.MaxConcurrentJobs,
+ ares->name, res->res_dir.MaxConcurrentJobs,
edit_uint64(res->res_dir.FDConnectTimeout, ed1),
edit_uint64(res->res_dir.SDConnectTimeout, ed2));
if (res->res_dir.query_file) {
break;
case R_CLIENT:
- sendit(sock, _("Client: name=%s address=%s FDport=%d MaxJobs=%u\n"),
- res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
- res->res_client.MaxConcurrentJobs);
+ if (!acl_access_ok(ua, Client_ACL, res->res_client.name())) {
+ break;
+ }
+ sendit(sock, _("Client: Name=%s Enabled=%d Address=%s FDport=%d MaxJobs=%u NumJobs=%u\n"),
+ res->res_client.name(), res->res_client.is_enabled(),
+ res->res_client.address(), res->res_client.FDport,
+ res->res_client.MaxConcurrentJobs, res->res_client.getNumConcurrentJobs());
sendit(sock, _(" JobRetention=%s FileRetention=%s AutoPrune=%d\n"),
edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
res->res_client.AutoPrune);
+ if (res->res_client.fd_storage_address) {
+ sendit(sock, " FDStorageAddress=%s\n", res->res_client.fd_storage_address);
+ }
+ if (res->res_client.max_bandwidth) {
+ sendit(sock, _(" MaximumBandwidth=%lld\n"),
+ res->res_client.max_bandwidth);
+ }
if (res->res_client.catalog) {
sendit(sock, _(" --> "));
dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
dev->VolumeName, dev->MediaType);
break;
+ case R_AUTOCHANGER:
case R_STORAGE:
- sendit(sock, _("Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
-" DeviceName=%s MediaType=%s StorageId=%s\n"),
+ if (!acl_access_ok(ua, Storage_ACL, res->res_store.hdr.name)) {
+ break;
+ }
+ sendit(sock, _("%s: name=%s address=%s SDport=%d MaxJobs=%u NumJobs=%u\n"
+" DeviceName=%s MediaType=%s StorageId=%s Autochanger=%d\n"),
+ res->res_store.changer == &res->res_store ? "Autochanger" : "Storage",
res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
res->res_store.MaxConcurrentJobs,
+ res->res_store.getNumConcurrentJobs(),
res->res_store.dev_name(),
res->res_store.media_type,
- edit_int64(res->res_store.StorageId, ed1));
+ edit_int64(res->res_store.StorageId, ed1),
+ res->res_store.autochanger);
+ if (res->res_store.fd_storage_address) {
+ sendit(sock, " FDStorageAddress=%s\n", res->res_store.fd_storage_address);
+ }
+ if (res->res_store.ac_group) {
+ STORE *shstore = res->res_store.shared_storage;
+ sendit(sock, " AC group=%s ShareStore=%s\n", res->res_store.ac_group,
+ shstore?shstore->name():"*none*");
+ }
+ if (res->res_store.changer && res->res_store.changer != &res->res_store) {
+ sendit(sock, _(" Parent --> "));
+ dump_resource(-R_STORAGE, (RES *)res->res_store.changer, sendit, sock);
+ }
break;
case R_CATALOG:
+ if (!acl_access_ok(ua, Catalog_ACL, res->res_cat.hdr.name)) {
+ break;
+ }
sendit(sock, _("Catalog: name=%s address=%s DBport=%d db_name=%s\n"
" db_driver=%s db_user=%s MutliDBConn=%d\n"),
res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
- res->res_cat.db_port, res->res_cat.db_name,
+ res->res_cat.db_port, res->res_cat.db_name,
NPRT(res->res_cat.db_driver), NPRT(res->res_cat.db_user),
res->res_cat.mult_db_connections);
break;
case R_JOB:
case R_JOBDEFS:
+ if (!acl_access_ok(ua, Job_ACL, res->res_job.hdr.name)) {
+ break;
+ }
sendit(sock, _("%s: name=%s JobType=%d level=%s Priority=%d Enabled=%d\n"),
type == R_JOB ? _("Job") : _("JobDefs"),
res->res_job.hdr.name, res->res_job.JobType,
level_to_str(res->res_job.JobLevel), res->res_job.Priority,
- res->res_job.enabled);
- sendit(sock, _(" MaxJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n"),
- res->res_job.MaxConcurrentJobs,
+ res->res_job.is_enabled());
+ sendit(sock, _(" MaxJobs=%u NumJobs=%u Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n"),
+ res->res_job.MaxConcurrentJobs,
+ res->res_job.getNumConcurrentJobs(),
res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
res->res_job.spool_data, res->res_job.write_part_after_job);
if (res->res_job.JobType == JT_BACKUP) {
sendit(sock, _(" Accurate=%d\n"), res->res_job.accurate);
}
+ if (res->res_job.max_bandwidth) {
+ sendit(sock, _(" MaximumBandwidth=%lld\n"),
+ res->res_job.max_bandwidth);
+ }
if (res->res_job.JobType == JT_MIGRATE || res->res_job.JobType == JT_COPY) {
sendit(sock, _(" SelectionType=%d\n"), res->res_job.selection_type);
}
+ if (res->res_job.JobType == JT_RESTORE) {
+ sendit(sock, _(" PrefixLinks=%d\n"), res->res_job.PrefixLinks);
+ }
if (res->res_job.client) {
sendit(sock, _(" --> "));
dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
if (res->res_job.MaxStartDelay) {
sendit(sock, _(" --> MaxStartDelay=%u\n"), res->res_job.MaxStartDelay);
}
+ if (res->res_job.MaxRunSchedTime) {
+ sendit(sock, _(" --> MaxRunSchedTime=%u\n"), res->res_job.MaxRunSchedTime);
+ }
if (res->res_job.storage) {
STORE *store;
foreach_alist(store, res->res_job.storage) {
sendit(sock, _(" --> "));
dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
}
+ if (res->res_job.vfull_pool) {
+ sendit(sock, _(" --> VFullBackup"));
+ dump_resource(-R_POOL, (RES *)res->res_job.vfull_pool, sendit, sock);
+ }
if (res->res_job.full_pool) {
- sendit(sock, _(" --> "));
+ sendit(sock, _(" --> FullBackup"));
dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
}
if (res->res_job.inc_pool) {
- sendit(sock, _(" --> "));
+ sendit(sock, _(" --> IncrementalBackup"));
dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
}
if (res->res_job.diff_pool) {
- sendit(sock, _(" --> "));
+ sendit(sock, _(" --> DifferentialBackup"));
dump_resource(-R_POOL, (RES *)res->res_job.diff_pool, sendit, sock);
}
- if (res->res_job.verify_job) {
- sendit(sock, _(" --> "));
- dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
+ if (res->res_job.next_pool) {
+ sendit(sock, _(" --> Next")); /* Pool will be added by dump_resource */
+ dump_resource(-R_POOL, (RES *)res->res_job.next_pool, sendit, sock);
+ }
+ if (res->res_job.JobType == JT_VERIFY && res->res_job.verify_job) {
+ sendit(sock, _(" --> JobToVerify %s"), (RES *)res->res_job.verify_job->name());
}
if (res->res_job.run_cmds) {
char *runcmd;
case R_FILESET:
{
int i, j, k;
- sendit(sock, _("FileSet: name=%s\n"), res->res_fs.hdr.name);
+ if (!acl_access_ok(ua, FileSet_ACL, res->res_fs.hdr.name)) {
+ break;
+ }
+ sendit(sock, _("FileSet: name=%s IgnoreFileSetChanges=%d\n"), res->res_fs.hdr.name, res->res_fs.ignore_fs_changes);
for (i=0; i<res->res_fs.num_includes; i++) {
INCEXE *incexe = res->res_fs.include_items[i];
for (j=0; j<incexe->num_opts; j++) {
}
sendit(sock, " N\n");
}
+ if (incexe->ignoredir) {
+ sendit(sock, " Z %s\n", incexe->ignoredir);
+ }
for (j=0; j<incexe->name_list.size(); j++) {
sendit(sock, " I %s\n", incexe->name_list.get(j));
}
if (incexe->plugin_list.size()) {
sendit(sock, " N\n");
}
-
- }
+ } /* end for over includes */
for (i=0; i<res->res_fs.num_excludes; i++) {
INCEXE *incexe = res->res_fs.exclude_items[i];
}
}
break;
- }
+ } /* end case R_FILESET */
case R_SCHEDULE:
+ if (!acl_access_ok(ua, Schedule_ACL, res->res_sch.hdr.name)) {
+ break;
+ }
+
if (res->res_sch.run) {
int i;
RUN *run = res->res_sch.run;
char buf[1000], num[30];
- sendit(sock, _("Schedule: name=%s\n"), res->res_sch.hdr.name);
+ sendit(sock, _("Schedule: Name=%s Enabled=%d\n"),
+ res->res_sch.hdr.name, res->res_sch.is_enabled());
if (!run) {
break;
}
next_run:
sendit(sock, _(" --> Run Level=%s\n"), level_to_str(run->level));
+ if (run->MaxRunSchedTime) {
+ sendit(sock, _(" MaxRunSchedTime=%u\n"), run->MaxRunSchedTime);
+ }
+ if (run->Priority) {
+ sendit(sock, _(" Priority=%u\n"), run->Priority);
+ }
bstrncpy(buf, _(" hour="), sizeof(buf));
for (i=0; i<24; i++) {
if (bit_is_set(i, run->hour)) {
bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
bstrncpy(buf, _(" mday="), sizeof(buf));
- for (i=0; i<31; i++) {
+ for (i=0; i<32; i++) {
if (bit_is_set(i, run->mday)) {
bsnprintf(num, sizeof(num), "%d ", i);
bstrncat(buf, num, sizeof(buf));
bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
bstrncpy(buf, _(" wom="), sizeof(buf));
- for (i=0; i<5; i++) {
+ for (i=0; i<6; i++) {
if (bit_is_set(i, run->wom)) {
bsnprintf(num, sizeof(num), "%d ", i);
bstrncat(buf, num, sizeof(buf));
sendit(sock, _(" --> "));
dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
}
+ if (run->next_pool) {
+ sendit(sock, _(" --> Next")); /* Pool will be added by dump_resource */
+ dump_resource(-R_POOL, (RES *)run->next_pool, sendit, sock);
+ }
if (run->storage) {
sendit(sock, _(" --> "));
dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
break;
case R_POOL:
+ if (!acl_access_ok(ua, Pool_ACL, res->res_pool.hdr.name)) {
+ break;
+ }
sendit(sock, _("Pool: name=%s PoolType=%s\n"), res->res_pool.hdr.name,
res->res_pool.pool_type);
sendit(sock, _(" use_cat=%d use_once=%d cat_files=%d\n"),
NPRT(res->res_pool.label_format));
sendit(sock, _(" CleaningPrefix=%s LabelType=%d\n"),
NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType);
- sendit(sock, _(" RecyleOldest=%d PurgeOldest=%d\n"),
+ sendit(sock, _(" RecyleOldest=%d PurgeOldest=%d ActionOnPurge=%d\n"),
res->res_pool.recycle_oldest_volume,
- res->res_pool.purge_oldest_volume);
+ res->res_pool.purge_oldest_volume,
+ res->res_pool.action_on_purge);
sendit(sock, _(" MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n"),
- res->res_pool.MaxVolJobs,
+ res->res_pool.MaxVolJobs,
res->res_pool.MaxVolFiles,
edit_uint64(res->res_pool.MaxVolBytes, ed1));
sendit(sock, _(" MigTime=%s MigHiBytes=%s MigLoBytes=%s\n"),
edit_utime(res->res_pool.MigrationTime, ed1, sizeof(ed1)),
edit_uint64(res->res_pool.MigrationHighBytes, ed2),
edit_uint64(res->res_pool.MigrationLowBytes, ed3));
+ sendit(sock, _(" CacheRetention=%s\n"),
+ edit_utime(res->res_pool.CacheRetention, ed1, sizeof(ed1)));
+ sendit(sock, _(" JobRetention=%s FileRetention=%s\n"),
+ edit_utime(res->res_pool.JobRetention, ed1, sizeof(ed1)),
+ edit_utime(res->res_pool.FileRetention, ed2, sizeof(ed2)));
if (res->res_pool.NextPool) {
sendit(sock, _(" NextPool=%s\n"), res->res_pool.NextPool->name());
}
sendit(sock, _("Unknown resource type %d in dump_resource.\n"), type);
break;
}
- if (recurse && res->res_dir.hdr.next) {
- dump_resource(type, res->res_dir.hdr.next, sendit, sock);
+ if (recurse) {
+ next = GetNextRes(0, (RES *)res);
+ if (next) {
+ dump_resource(type, next, sendit, sock);
+ }
}
}
if (incexe->opts_list) {
free(incexe->opts_list);
}
+ if (incexe->ignoredir) {
+ free(incexe->ignoredir);
+ }
free(incexe);
}
+
/*
* Free memory of resource -- called when daemon terminates.
* NB, we don't need to worry about freeing any references
* resource chain is traversed. Mainly we worry about freeing
* allocated strings (names).
*/
-void free_resource(RES *sres, int type)
+void free_resource(RES *rres, int type)
{
int num;
- RES *nres; /* next resource if linked */
- URES *res = (URES *)sres;
+ URES *res = (URES *)rres;
- if (res == NULL)
+ if (res == NULL) {
return;
+ }
+ Dmsg3(200, "type=%d res=%p name=%s\n", type, res, res->res_dir.hdr.name);
/* common stuff -- free the resource name and description */
- nres = (RES *)res->res_dir.hdr.next;
if (res->res_dir.hdr.name) {
free(res->res_dir.hdr.name);
}
if (res->res_dir.DIRsrc_addr) {
free_addresses(res->res_dir.DIRsrc_addr);
}
- if (res->res_dir.tls_ctx) {
+ if (res->res_dir.tls_ctx) {
free_tls_context(res->res_dir.tls_ctx);
}
if (res->res_dir.tls_ca_certfile) {
if (res->res_con.password) {
free(res->res_con.password);
}
- if (res->res_con.tls_ctx) {
+ if (res->res_con.tls_ctx) {
free_tls_context(res->res_con.tls_ctx);
}
if (res->res_con.tls_ca_certfile) {
}
break;
case R_CLIENT:
- if (res->res_client.address) {
- free(res->res_client.address);
+ if (res->res_client.client_address) {
+ free(res->res_client.client_address);
+ }
+ if (res->res_client.fd_storage_address) {
+ free(res->res_client.fd_storage_address);
}
if (res->res_client.password) {
free(res->res_client.password);
}
- if (res->res_client.tls_ctx) {
+ if (res->res_client.tls_ctx) {
free_tls_context(res->res_client.tls_ctx);
}
if (res->res_client.tls_ca_certfile) {
delete res->res_client.tls_allowed_cns;
}
break;
+ case R_AUTOCHANGER:
case R_STORAGE:
if (res->res_store.address) {
free(res->res_store.address);
}
+ if (res->res_store.fd_storage_address) {
+ free(res->res_store.fd_storage_address);
+ }
if (res->res_store.password) {
free(res->res_store.password);
}
if (res->res_store.media_type) {
free(res->res_store.media_type);
}
+ if (res->res_store.ac_group) {
+ free_pool_memory(res->res_store.ac_group);
+ }
if (res->res_store.device) {
delete res->res_store.device;
}
- if (res->res_store.tls_ctx) {
+ if (res->res_store.tls_ctx) {
free_tls_context(res->res_store.tls_ctx);
}
if (res->res_store.tls_ca_certfile) {
if (res->res_cat.db_password) {
free(res->res_cat.db_password);
}
+ if (res->res_cat.db_ssl_mode) {
+ free(res->res_cat.db_ssl_mode);
+ }
+ if (res->res_cat.db_ssl_key) {
+ free(res->res_cat.db_ssl_key);
+ }
+ if (res->res_cat.db_ssl_cert) {
+ free(res->res_cat.db_ssl_cert);
+ }
+ if (res->res_cat.db_ssl_ca) {
+ free(res->res_cat.db_ssl_ca);
+ }
+ if (res->res_cat.db_ssl_capath) {
+ free(res->res_cat.db_ssl_capath);
+ }
+ if (res->res_cat.db_ssl_cipher) {
+ free(res->res_cat.db_ssl_cipher);
+ }
break;
case R_FILESET:
if ((num=res->res_fs.num_includes)) {
if (res) {
free(res);
}
- if (nres) {
- free_resource(nres, type);
- }
}
/*
* pointers because they may not have been defined until
* later in pass 1.
*/
-void save_resource(int type, RES_ITEM *items, int pass)
+bool save_resource(CONFIG *config, int type, RES_ITEM *items, int pass)
{
URES *res;
int rindex = type - r_first;
for (i=0; items[i].name; i++) {
if (items[i].flags & ITEM_REQUIRED) {
if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
- Emsg2(M_ERROR_TERM, 0, _("%s item is required in %s resource, but not found.\n"),
- items[i].name, resources[rindex]);
+ Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"),
+ items[i].name, resources[rindex].name);
+ return false;
}
}
/* If this triggers, take a look at lib/parse_conf.h */
if (i >= MAX_RES_ITEMS) {
- Emsg1(M_ERROR_TERM, 0, _("Too many items in %s resource\n"), resources[rindex]);
+ Mmsg(config->m_errmsg, _("Too many directives in \"%s\" resource\n"), resources[rindex].name);
+ return false;
}
}
} else if (type == R_JOB) {
*/
if (items[0].flags & ITEM_REQUIRED) {
if (!bit_is_set(0, res_all.res_dir.hdr.item_present)) {
- Emsg2(M_ERROR_TERM, 0, _("%s item is required in %s resource, but not found.\n"),
- items[0].name, resources[rindex]);
+ Mmsg(config->m_errmsg, _("\"%s\" directive is required in \"%s\" resource, but not found.\n"),
+ items[0].name, resources[rindex].name);
+ return false;
}
}
}
case R_POOL:
/* Find resource saved in pass 1 */
if ((res = (URES *)GetResWithName(R_POOL, res_all.res_con.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Pool resource %s\n"), res_all.res_con.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Pool resource %s\n"), res_all.res_con.hdr.name);
+ return false;
}
/* Explicitly copy resource pointers from this pass (res_all) */
res->res_pool.NextPool = res_all.res_pool.NextPool;
break;
case R_CONSOLE:
if ((res = (URES *)GetResWithName(R_CONSOLE, res_all.res_con.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Console resource %s\n"), res_all.res_con.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Console resource %s\n"), res_all.res_con.hdr.name);
+ return false;
}
res->res_con.tls_allowed_cns = res_all.res_con.tls_allowed_cns;
break;
case R_DIRECTOR:
if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Director resource %s\n"), res_all.res_dir.hdr.name);
+ return false;
}
res->res_dir.messages = res_all.res_dir.messages;
res->res_dir.tls_allowed_cns = res_all.res_dir.tls_allowed_cns;
break;
+ case R_AUTOCHANGER: /* alias for R_STORAGE */
case R_STORAGE:
+ type = R_STORAGE; /* force Storage type */
if ((res = (URES *)GetResWithName(type, res_all.res_store.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Storage resource %s\n"),
- res_all.res_dir.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Storage resource %s\n"),
+ res_all.res_dir.hdr.name);
+ return false;
}
/* we must explicitly copy the device alist pointer */
res->res_store.device = res_all.res_store.device;
+ res->res_store.changer = res_all.res_store.changer;
+ res->res_store.shared_storage = res_all.res_store.shared_storage;
+ res->res_store.autochanger = res_all.res_store.autochanger;
+ if (strcasecmp(resources[rindex].name, "autochanger") == 0) {
+ res->res_store.changer = &res->res_store;
+ res->res_store.autochanger = true;
+ }
break;
case R_JOB:
case R_JOBDEFS:
if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Job resource %s\n"),
- res_all.res_dir.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Job resource %s\n"),
+ res_all.res_dir.hdr.name);
+ return false;
}
res->res_job.messages = res_all.res_job.messages;
res->res_job.schedule = res_all.res_job.schedule;
res->res_job.storage = res_all.res_job.storage;
res->res_job.base = res_all.res_job.base;
res->res_job.pool = res_all.res_job.pool;
+ res->res_job.next_pool = res_all.res_job.next_pool;
res->res_job.full_pool = res_all.res_job.full_pool;
+ res->res_job.vfull_pool = res_all.res_job.vfull_pool;
res->res_job.inc_pool = res_all.res_job.inc_pool;
res->res_job.diff_pool = res_all.res_job.diff_pool;
res->res_job.verify_job = res_all.res_job.verify_job;
res->res_job.RunScripts = res_all.res_job.RunScripts;
/* TODO: JobDefs where/regexwhere doesn't work well (but this
- * is not very useful)
+ * is not very useful)
* We have to set_bit(index, res_all.hdr.item_present);
* or something like that
*/
/* we take RegexWhere before all other options */
- if (!res->res_job.RegexWhere
+ if (!res->res_job.RegexWhere
&&
(res->res_job.strip_prefix ||
res->res_job.add_suffix ||
break;
case R_COUNTER:
if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Counter resource %s\n"), res_all.res_counter.hdr.name);
+ Mmsg(config->m_errmsg, _("Cannot find Counter resource %s\n"), res_all.res_counter.hdr.name);
+ return false;
}
res->res_counter.Catalog = res_all.res_counter.Catalog;
res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
break;
case R_CLIENT:
- if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Client resource %s\n"), res_all.res_client.hdr.name);
+ if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.name())) == NULL) {
+ Mmsg(config->m_errmsg, _("Cannot find Client resource %s\n"), res_all.res_client.name());
+ return false;
}
res->res_client.catalog = res_all.res_client.catalog;
res->res_client.tls_allowed_cns = res_all.res_client.tls_allowed_cns;
* in by run_conf.c during pass 2, so here we jam the pointer
* into the Schedule resource.
*/
- if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Cannot find Schedule resource %s\n"), res_all.res_client.hdr.name);
+ if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.name())) == NULL) {
+ Mmsg(config->m_errmsg, _("Cannot find Schedule resource %s\n"), res_all.res_client.name());
+ return false;
}
res->res_sch.run = res_all.res_sch.run;
break;
free(res_all.res_dir.hdr.desc);
res_all.res_dir.hdr.desc = NULL;
}
- return;
+ return true;
+ }
+
+ /* R_AUTOCHANGER is alias so turn it into an R_STORAGE */
+ if (type == R_AUTOCHANGER) {
+ type = R_STORAGE;
+ rindex = type - r_first;
}
/*
break;
default:
printf(_("Unknown resource type %d in save_resource.\n"), type);
- error = true;
+ error = true;
break;
}
/* Common */
if (!error) {
- res = (URES *)malloc(size);
- memcpy(res, &res_all, size);
- if (!res_head[rindex]) {
- res_head[rindex] = (RES *)res; /* store first entry */
- Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
- res->res_dir.hdr.name, rindex);
- } else {
- RES *next, *last;
- if (res->res_dir.hdr.name == NULL) {
- Emsg1(M_ERROR_TERM, 0, _("Name item is required in %s resource, but not found.\n"),
- resources[rindex]);
- }
- /* Add new res to end of chain */
- for (last=next=res_head[rindex]; next; next=next->next) {
- last = next;
- if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
- Emsg2(M_ERROR_TERM, 0,
- _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
- resources[rindex].name, res->res_dir.hdr.name);
- }
- }
- last->next = (RES *)res;
- Dmsg4(900, _("Inserting %s res: %s index=%d pass=%d\n"), res_to_str(type),
- res->res_dir.hdr.name, rindex, pass);
+ if (!config->insert_res(rindex, size)) {
+ return false;
}
}
+ return true;
+}
+
+void store_actiononpurge(LEX *lc, RES_ITEM *item, int index, int pass)
+{
+ uint32_t *destination = (uint32_t*)item->value;
+ lex_get_token(lc, T_NAME);
+ if (strcasecmp(lc->str, "truncate") == 0) {
+ *destination = (*destination) | ON_PURGE_TRUNCATE;
+ } else {
+ scan_err2(lc, _("Expected one of: %s, got: %s"), "Truncate", lc->str);
+ return;
+ }
+ scan_to_eol(lc);
+ set_bit(index, res_all.hdr.item_present);
}
+/*
+ * Store an autochanger resource. Used by Autochanger and
+ * SharedStorage direcives.
+ */
+void store_ac_res(LEX *lc, RES_ITEM *item, int index, int pass)
+{
+ RES *res;
+ RES_ITEM *next = item + 1;
+
+ lex_get_token(lc, T_NAME);
+ Dmsg1(100, "Got name=%s\n", lc->str);
+ /*
+ * For backward compatibility, if yes/no, set the next item
+ */
+ if (strcasecmp(item->name, "autochanger") == 0) {
+ if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) {
+ *(bool *)(next->value) = true;
+ Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str);
+ scan_to_eol(lc);
+ return;
+ } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) {
+ *(bool *)(next->value) = false;
+ Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str);
+ scan_to_eol(lc);
+ return;
+ }
+ }
+ Dmsg2(100, "Item=%s got value=%s\n", item->name, lc->str);
+
+ if (pass == 2) {
+ res = GetResWithName(R_STORAGE, lc->str);
+ if (res == NULL) {
+ scan_err3(lc, _("Could not find Storage Resource %s referenced on line %d : %s\n"),
+ lc->str, lc->line_no, lc->line);
+ return;
+ }
+ if (*(item->value)) {
+ scan_err3(lc, _("Attempt to redefine Storage resource \"%s\" referenced on line %d : %s\n"),
+ item->name, lc->line_no, lc->line);
+ return;
+ }
+ Dmsg2(100, "Store %s value=%p\n", lc->str, res);
+ *(item->value) = (char *)res;
+ *(bool *)(next->value) = true;
+ }
+ scan_to_eol(lc);
+ set_bit(index, res_all.hdr.item_present);
+}
+
+
/*
* Store Device. Note, the resource is created upon the
* first reference. The details of the resource are obtained
* later from the SD.
*/
-static void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
+void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token;
- URES *res;
int rindex = R_DEVICE - r_first;
int size = sizeof(DEVICE);
- bool found = false;
if (pass == 1) {
- token = lex_get_token(lc, T_NAME);
- if (!res_head[rindex]) {
- res = (URES *)malloc(size);
- memset(res, 0, size);
- res->res_dev.hdr.name = bstrdup(lc->str);
- res_head[rindex] = (RES *)res; /* store first entry */
- Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(R_DEVICE),
- res->res_dir.hdr.name, rindex);
+ URES *ures;
+ RES *res;
+
+ lex_get_token(lc, T_NAME);
+ rblist *list = res_head[rindex]->res_list;
+ ures = (URES *)malloc(size);
+ memset(ures, 0, size);
+ ures->res_dev.hdr.name = bstrdup(lc->str);
+ res = (RES *)ures;
+ if (list->empty()) {
+ list->insert(res, res_compare);
+ res_head[rindex]->first = res;
+ res_head[rindex]->last = res;
} else {
- RES *next;
- /* See if it is already defined */
- for (next=res_head[rindex]; next->next; next=next->next) {
- if (strcmp(next->name, lc->str) == 0) {
- found = true;
- break;
- }
- }
- if (!found) {
- res = (URES *)malloc(size);
- memset(res, 0, size);
- res->res_dev.hdr.name = bstrdup(lc->str);
- next->next = (RES *)res;
- Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(R_DEVICE),
- res->res_dir.hdr.name, rindex, pass);
+ RES *item, *prev;
+ prev = res_head[rindex]->last;
+ item = (RES *)list->insert(res, res_compare);
+ if (item == res) {
+ prev->res_next = res;
+ res_head[rindex]->last = res;
+ } else {
+ /* res not inserted */
+ free(ures->res_dev.hdr.name);
+ free(ures);
}
}
-
scan_to_eol(lc);
set_bit(index, res_all.hdr.item_present);
} else {
*/
void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the type both pass 1 and pass 2 */
for (i=0; migtypes[i].type_name; i++) {
if (strcasecmp(lc->str, migtypes[i].type_name) == 0) {
*/
void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the type both pass 1 and pass 2 */
for (i=0; jobtypes[i].type_name; i++) {
if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
*/
void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
+ int i;
- token = lex_get_token(lc, T_NAME);
+ lex_get_token(lc, T_NAME);
/* Store the level pass 2 so that type is defined */
for (i=0; joblevels[i].level_name; i++) {
if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
{
- int token, i;
- token = lex_get_token(lc, T_NAME);
+ int i;
+ lex_get_token(lc, T_NAME);
/* Scan Replacement options */
for (i=0; ReplaceOptions[i].name; i++) {
if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
int token;
for (;;) {
- token = lex_get_token(lc, T_STRING);
+ lex_get_token(lc, T_STRING);
if (pass == 1) {
if (((alist **)item->value)[item->code] == NULL) {
((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
*(uint32_t *)(item->value) = SCRIPT_After;
} else if (strcasecmp(lc->str, "aftervss") == 0) {
*(uint32_t *)(item->value) = SCRIPT_AfterVSS;
+ } else if (strcasecmp(lc->str, "aftersnapshot") == 0) {
+ *(uint32_t *)(item->value) = SCRIPT_AfterVSS;
} else if (strcasecmp(lc->str, "always") == 0) {
*(uint32_t *)(item->value) = SCRIPT_Any;
} else {
}
/* Store a runscript->target
- *
+ *
*/
static void store_runscript_target(LEX *lc, RES_ITEM *item, int index, int pass)
{
/* Each runscript command takes 2 entries in commands list */
pm_strcpy(c, lc->str);
((RUNSCRIPT*) item->value)->commands->prepend(c); /* command line */
- ((RUNSCRIPT*) item->value)->commands->prepend((void *)item->code); /* command type */
+ ((RUNSCRIPT*) item->value)->commands->prepend((void *)(intptr_t)item->code); /* command type */
}
scan_to_eol(lc);
}
if (pass == 2) {
RUNSCRIPT *script = new_runscript();
- script->set_job_code_callback(job_code_callback_filesetname);
+ script->set_job_code_callback(job_code_callback_director);
script->set_command(lc->str);
/* TODO: remove all script->old_proto with bacula 1.42 */
- if (strcmp(item->name, "runbeforejob") == 0) {
+ if (strcasecmp(item->name, "runbeforejob") == 0) {
script->when = SCRIPT_Before;
script->fail_on_error = true;
script->set_target("");
- } else if (strcmp(item->name, "runafterjob") == 0) {
+ } else if (strcasecmp(item->name, "runafterjob") == 0) {
script->when = SCRIPT_After;
script->on_success = true;
script->on_failure = false;
script->set_target("");
-
- } else if (strcmp(item->name, "clientrunafterjob") == 0) {
+
+ } else if (strcasecmp(item->name, "clientrunbeforejob") == 0) {
+ script->old_proto = true;
+ script->when = SCRIPT_Before;
+ script->set_target("%c");
+ script->fail_on_error = true;
+
+ } else if (strcasecmp(item->name, "clientrunafterjob") == 0) {
script->old_proto = true;
script->when = SCRIPT_After;
script->set_target("%c");
script->on_success = true;
script->on_failure = false;
- } else if (strcmp(item->name, "clientrunbeforejob") == 0) {
- script->old_proto = true;
+ } else if (strcasecmp(item->name, "consolerunbeforejob") == 0) {
script->when = SCRIPT_Before;
- script->set_target("%c");
+ script->set_target("");
script->fail_on_error = true;
+ script->set_command(NPRT(script->command), CONSOLE_CMD);
- } else if (strcmp(item->name, "runafterfailedjob") == 0) {
+ } else if (strcasecmp(item->name, "consolerunafterjob") == 0) {
+ script->when = SCRIPT_After;
+ script->set_target("");
+ script->on_success = true;
+ script->on_failure = false;
+ script->set_command(NPRT(script->command), CONSOLE_CMD);
+
+ } else if (strcasecmp(item->name, "runafterfailedjob") == 0) {
script->when = SCRIPT_After;
script->on_failure = true;
script->on_success = false;
if (*runscripts == NULL) {
*runscripts = New(alist(10, not_owned_by_alist));
}
-
+
(*runscripts)->append(script);
script->debug();
}
-
scan_to_eol(lc);
+ set_bit(index, res_all.hdr.item_present);
}
-/* Store a bool in a bit field without modifing res_all.hdr
+/* Store a bool in a bit field without modifing res_all.hdr
* We can also add an option to store_bool to skip res_all.hdr
*/
void store_runscript_bool(LEX *lc, RES_ITEM *item, int index, int pass)
* name handler value code flags default_value
*/
static RES_ITEM runscript_items[] = {
- {"command", store_runscript_cmd, {(char **)&res_runscript}, SHELL_CMD, 0, 0},
- {"console", store_runscript_cmd, {(char **)&res_runscript}, CONSOLE_CMD, 0, 0},
- {"target", store_runscript_target,{(char **)&res_runscript}, 0, 0, 0},
+ {"command", store_runscript_cmd, {(char **)&res_runscript}, SHELL_CMD, 0, 0},
+ {"console", store_runscript_cmd, {(char **)&res_runscript}, CONSOLE_CMD, 0, 0},
+ {"target", store_runscript_target,{(char **)&res_runscript}, 0, 0, 0},
{"runsonsuccess", store_runscript_bool, {(char **)&res_runscript.on_success},0, 0, 0},
{"runsonfailure", store_runscript_bool, {(char **)&res_runscript.on_failure},0, 0, 0},
{"failjobonerror",store_runscript_bool, {(char **)&res_runscript.fail_on_error},0, 0, 0},
* resource. We treat the RunScript like a sort of
* mini-resource within the Job resource.
*/
-static void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass)
+void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass)
{
char *c;
int token, i, t;
Dmsg1(200, "store_runscript: begin store_runscript pass=%i\n", pass);
token = lex_get_token(lc, T_SKIP_EOL);
-
+
if (token != T_BOB) {
scan_err1(lc, _("Expecting open brace. Got %s"), lc->str);
}
/* setting on_success, on_failure, fail_on_error */
- res_runscript.reset_default();
+ res_runscript.reset_default();
if (pass == 2) {
res_runscript.commands = New(alist(10, not_owned_by_alist));
if (token != T_EQUALS) {
scan_err1(lc, _("expected an equals, got: %s"), lc->str);
}
-
+
/* Call item handler */
runscript_items[i].handler(lc, &runscript_items[i], i, pass);
i = -1;
break;
}
}
-
+
if (i >=0) {
scan_err1(lc, _("Keyword %s not permitted in this resource"), lc->str);
}
}
/*
* commands list contains 2 values per command
- * - POOLMEM command string (ex: /bin/true)
+ * - POOLMEM command string (ex: /bin/true)
* - int command type (ex: SHELL_CMD)
*/
- res_runscript.set_job_code_callback(job_code_callback_filesetname);
+ res_runscript.set_job_code_callback(job_code_callback_director);
while ((c=(char*)res_runscript.commands->pop()) != NULL) {
t = (intptr_t)res_runscript.commands->pop();
RUNSCRIPT *script = new_runscript();
script->command = c;
script->cmd_type = t;
/* target is taken from res_runscript, each runscript object have
- * a copy
+ * a copy
*/
script->target = NULL;
script->set_target(res_runscript.target);
}
delete res_runscript.commands;
/* setting on_success, on_failure... cleanup target field */
- res_runscript.reset_default(true);
+ res_runscript.reset_default(true);
}
scan_to_eol(lc);
}
/* callback function for edit_job_codes */
-extern "C" char *job_code_callback_filesetname(JCR *jcr, const char* param)
+/* See ../lib/util.c, function edit_job_codes, for more remaining codes */
+extern "C" char *job_code_callback_director(JCR *jcr, const char* param, char *buf, int buflen)
{
- if (param[0] == 'f') {
- return jcr->fileset->name();
- } else {
- return NULL;
+ static char yes[] = "yes";
+ static char no[] = "no";
+ static char nothing[] = "";
+
+ if (jcr == NULL) {
+ return nothing;
+ }
+ switch (param[0]) {
+ case 'f':
+ if (jcr->fileset) {
+ return jcr->fileset->name();
+ }
+ break;
+ case 'h':
+ if (jcr->client && jcr->client->address()) {
+ return jcr->client->address();
+ }
+ break;
+ case 'p':
+ if (jcr->pool) {
+ return jcr->pool->name();
+ }
+ break;
+ case 'w':
+ if (jcr->wstore) {
+ return jcr->wstore->name();
+ }
+ break;
+ case 'x':
+ return jcr->spool_data ? yes : no;
+ case 'D':
+ return my_name;
+ case 'C':
+ return jcr->cloned ? yes : no;
+ case 'I':
+ if (buflen >= 50) {
+ if (jcr->wjcr) {
+ edit_uint64(jcr->wjcr->JobId, buf);
+ return buf;
+ } else {
+ edit_uint64(0, buf);
+ return buf;
+ }
+ }
}
+ return nothing;
}
bool parse_dir_config(CONFIG *config, const char *configfile, int exit_code)
{
config->init(configfile, NULL, exit_code, (void *)&res_all, res_all_size,
- r_first, r_last, resources, res_head);
+ r_first, r_last, resources, &res_head);
return config->parse_config();
}