* Version $Id$
*/
/*
- Copyright (C) 2000-2003 Kern Sibbald and John Walker
+ Copyright (C) 2000-2004 Kern Sibbald and John Walker
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
*/
int r_first = R_FIRST;
int r_last = R_LAST;
-
-pthread_mutex_t res_mutex = PTHREAD_MUTEX_INITIALIZER;
+static RES *sres_head[R_LAST - R_FIRST + 1];
+RES **res_head = sres_head;
/* Imported subroutines */
-extern void store_run(LEX *lc, struct res_items *item, int index, int pass);
-extern void store_finc(LEX *lc, struct res_items *item, int index, int pass);
-extern void store_inc(LEX *lc, struct res_items *item, int index, int pass);
+extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
+extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
+extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
/* Forward referenced subroutines */
-static void store_backup(LEX *lc, struct res_items *item, int index, int pass);
-static void store_restore(LEX *lc, struct res_items *item, int index, int pass);
-static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass);
-static void store_level(LEX *lc, struct res_items *item, int index, int pass);
-static void store_replace(LEX *lc, struct res_items *item, int index, int pass);
+void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
+void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
/* We build the current resource here as we are
*
* name handler value code flags default_value
*/
-static struct res_items dir_items[] = {
+static RES_ITEM dir_items[] = {
{"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
{"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
- {"dirport", store_pint, ITEM(res_dir.DIRport), 0, ITEM_DEFAULT, 9101},
- {"diraddress", store_str, ITEM(res_dir.DIRaddr), 0, 0, 0},
+ {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
+ {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
+ {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
{"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
{"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
{"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
*
* name handler value code flags default_value
*/
-static struct res_items con_items[] = {
+static RES_ITEM con_items[] = {
{"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
{"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
{"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
+ {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
+ {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
+ {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
+ {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
+ {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
+ {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
+ {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
+ {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
+ {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
{NULL, NULL, NULL, 0, 0, 0}
};
* name handler value code flags default_value
*/
-static struct res_items cli_items[] = {
+static RES_ITEM cli_items[] = {
{"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
{"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
*
* name handler value code flags default_value
*/
-static struct res_items store_items[] = {
+static RES_ITEM store_items[] = {
{"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
{"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
*
* name handler value code flags default_value
*/
-static struct res_items cat_items[] = {
+static RES_ITEM cat_items[] = {
{"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
{"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
*
* name handler value code flags default_value
*/
-static struct res_items job_items[] = {
- {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
- {"backup", store_backup, ITEM(res_job), JT_BACKUP, 0, 0},
- {"verify", store_backup, ITEM(res_job), JT_VERIFY, 0, 0},
- {"restore", store_restore, ITEM(res_job), JT_RESTORE, 0, 0},
- {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
- {"type", store_jobtype, ITEM(res_job), 0, 0, 0},
- {"level", store_level, ITEM(res_job), 0, 0, 0},
- {"messages", store_res, ITEM(res_job.messages), R_MSGS, 0, 0},
- {"storage", store_res, ITEM(res_job.storage), R_STORAGE, 0, 0},
- {"pool", store_res, ITEM(res_job.pool), R_POOL, 0, 0},
- {"client", store_res, ITEM(res_job.client), R_CLIENT, 0, 0},
- {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, 0, 0},
- {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
- {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
- {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
- {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
- {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
- {"maxstartdelay", store_time,ITEM(res_job.MaxStartDelay), 0, 0, 0},
+RES_ITEM job_items[] = {
+ {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
+ {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
+ {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
+ {"level", store_level, ITEM(res_job.level), 0, 0, 0},
+ {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
+ {"storage", store_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0},
+ {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
+ {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
+ {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
+ {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
+ {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
+ {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
+ {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
+ {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
+ {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
+ {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
+ {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
+ {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
+ {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
+ {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
+ {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
+ {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
+ {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
{"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
{"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
{"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
- {"prunevolumes", store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
+ {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
+ {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
+ {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
{"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
{"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
+ {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
{"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
{"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
- {"spoolattributes", store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
- {"writebootstrap", store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
{"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
{"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
{"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
*
* name handler value code flags default_value
*/
-static struct res_items fs_items[] = {
+static RES_ITEM fs_items[] = {
{"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
{"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
*
* name handler value code flags default_value
*/
-static struct res_items sch_items[] = {
+static RES_ITEM sch_items[] = {
{"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
{"run", store_run, ITEM(res_sch.run), 0, 0, 0},
{NULL, NULL, NULL, 0, 0, 0}
};
-/* Group resource -- not implemented
- *
- * name handler value code flags default_value
- */
-static struct res_items group_items[] = {
- {"name", store_name, ITEM(res_group.hdr.name), 0, ITEM_REQUIRED, 0},
- {"description", store_str, ITEM(res_group.hdr.desc), 0, 0, 0},
- {NULL, NULL, NULL, 0, 0, 0}
-};
-
/* Pool resource
*
* name handler value code flags default_value
*/
-static struct res_items pool_items[] = {
+static RES_ITEM pool_items[] = {
{"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
{"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
* Counter Resource
* name handler value code flags default_value
*/
-static struct res_items counter_items[] = {
+static RES_ITEM counter_items[] = {
{"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
{"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
{"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
/* Message resource */
-extern struct res_items msgs_items[];
+extern RES_ITEM msgs_items[];
/*
* This is the master resource definition.
*
* name items rcode res_head
*/
-struct s_res resources[] = {
- {"director", dir_items, R_DIRECTOR, NULL},
- {"client", cli_items, R_CLIENT, NULL},
- {"job", job_items, R_JOB, NULL},
- {"storage", store_items, R_STORAGE, NULL},
- {"catalog", cat_items, R_CATALOG, NULL},
- {"schedule", sch_items, R_SCHEDULE, NULL},
- {"fileset", fs_items, R_FILESET, NULL},
- {"group", group_items, R_GROUP, NULL},
- {"pool", pool_items, R_POOL, NULL},
- {"messages", msgs_items, R_MSGS, NULL},
- {"counter", counter_items, R_COUNTER, NULL},
- {"console", con_items, R_CONSOLE, NULL},
- {NULL, NULL, 0, NULL}
+RES_TABLE resources[] = {
+ {"director", dir_items, R_DIRECTOR},
+ {"client", cli_items, R_CLIENT},
+ {"job", job_items, R_JOB},
+ {"storage", store_items, R_STORAGE},
+ {"catalog", cat_items, R_CATALOG},
+ {"schedule", sch_items, R_SCHEDULE},
+ {"fileset", fs_items, R_FILESET},
+ {"pool", pool_items, R_POOL},
+ {"messages", msgs_items, R_MSGS},
+ {"counter", counter_items, R_COUNTER},
+ {"console", con_items, R_CONSOLE},
+ {"jobdefs", job_items, R_JOBDEFS},
+ {NULL, NULL, 0}
};
{"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
{"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
{"Data", L_VERIFY_DATA, JT_VERIFY},
- {NULL, 0}
+ {" ", L_NONE, JT_ADMIN},
+ {" ", L_NONE, JT_RESTORE},
+ {NULL, 0, 0}
};
/* Keywords (RHS) permitted in Job type records
{NULL, 0}
};
+#ifdef old_deprecated_code
/* Keywords (RHS) permitted in Backup and Verify records */
static struct s_kw BakVerFields[] = {
{"bootstrap", 'B'}, /* bootstrap file */
{NULL, 0}
};
+#endif
/* Options permitted in Restore replace= */
struct s_kw ReplaceOptions[] = {
{NULL, 0}
};
-char *level_to_str(int level)
+const char *level_to_str(int level)
{
int i;
static char level_no[30];
- char *str = level_no;
+ const char *str = level_no;
- sprintf(level_no, "%d", level); /* default if not found */
+ bsnprintf(level_no, sizeof(level_no), "%d", level); /* default if not found */
for (i=0; joblevels[i].level_name; i++) {
if (level == joblevels[i].level) {
str = joblevels[i].level_name;
}
/* Dump contents of resource */
-void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ...), void *sock)
+void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
{
URES *res = (URES *)reshdr;
bool recurse = true;
res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
res->res_client.MaxConcurrentJobs);
sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
- edit_utime(res->res_client.JobRetention, ed1),
- edit_utime(res->res_client.FileRetention, ed2),
+ edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
+ edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
res->res_client.AutoPrune);
if (res->res_client.catalog) {
sendit(sock, " --> ");
res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user));
break;
case R_JOB:
- sendit(sock, "Job: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
+ case R_JOBDEFS:
+ sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
+ type == R_JOB ? "Job" : "JobDefs",
res->res_job.hdr.name, res->res_job.JobType,
level_to_str(res->res_job.level), res->res_job.Priority,
res->res_job.MaxConcurrentJobs);
- sendit(sock, " Resched=%d Times=%d Interval=%s\n",
+ sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d\n",
res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
- edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1));
+ edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
+ res->res_job.spool_data);
if (res->res_job.client) {
sendit(sock, " --> ");
dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
if (res->res_job.RunAfterJob) {
sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
}
+ if (res->res_job.RunAfterFailedJob) {
+ sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
+ }
if (res->res_job.WriteBootstrap) {
sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
}
if (res->res_job.pool) {
sendit(sock, " --> ");
dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
- } else {
- sendit(sock, "!!! No Pool resource\n");
+ }
+ if (res->res_job.full_pool) {
+ sendit(sock, " --> ");
+ dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
+ }
+ if (res->res_job.inc_pool) {
+ sendit(sock, " --> ");
+ dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
+ }
+ if (res->res_job.dif_pool) {
+ sendit(sock, " --> ");
+ dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
}
if (res->res_job.verify_job) {
sendit(sock, " --> ");
- dump_resource(-R_JOB, (RES *)res->res_job.verify_job, sendit, sock);
+ dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
}
break;
if (res->res_job.messages) {
}
break;
case R_FILESET:
+ {
+ int i, j, k;
sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
- for (int i=0; i<res->res_fs.num_includes; i++) {
+ for (i=0; i<res->res_fs.num_includes; i++) {
INCEXE *incexe = res->res_fs.include_items[i];
- for (int j=0; j<incexe->name_list.size(); j++) {
- sendit(sock, " Inc: %s\n", incexe->name_list.get(j));
+ for (j=0; j<incexe->num_opts; j++) {
+ FOPTS *fo = incexe->opts_list[j];
+ sendit(sock, " O %s\n", fo->opts);
+ for (k=0; k<fo->regex.size(); k++) {
+ sendit(sock, " R %s\n", fo->regex.get(k));
+ }
+ for (k=0; k<fo->wild.size(); k++) {
+ sendit(sock, " W %s\n", fo->wild.get(k));
+ }
+ for (k=0; k<fo->base.size(); k++) {
+ sendit(sock, " B %s\n", fo->base.get(k));
+ }
+ if (fo->reader) {
+ sendit(sock, " D %s\n", fo->reader);
+ }
+ if (fo->writer) {
+ sendit(sock, " T %s\n", fo->writer);
+ }
+ sendit(sock, " N\n");
+ }
+ for (j=0; j<incexe->name_list.size(); j++) {
+ sendit(sock, " I %s\n", incexe->name_list.get(j));
+ }
+ if (incexe->name_list.size()) {
+ sendit(sock, " N\n");
}
}
- for (int i=0; i<res->res_fs.num_excludes; i++) {
+
+ for (i=0; i<res->res_fs.num_excludes; i++) {
INCEXE *incexe = res->res_fs.exclude_items[i];
- for (int j=0; j<incexe->name_list.size(); j++) {
- sendit(sock, " Exc: %s\n", incexe->name_list.get(j));
+ for (j=0; j<incexe->name_list.size(); j++) {
+ sendit(sock, " E %s\n", incexe->name_list.get(j));
+ }
+ if (incexe->name_list.size()) {
+ sendit(sock, " N\n");
}
}
break;
+ }
case R_SCHEDULE:
if (res->res_sch.run) {
int i;
RUN *run = res->res_sch.run;
- char buf[1000], num[10];
+ char buf[1000], num[30];
sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
if (!run) {
break;
bstrncpy(buf, " hour=", sizeof(buf));
for (i=0; i<24; i++) {
if (bit_is_set(i, run->hour)) {
- sprintf(num, "%d ", i);
+ bsnprintf(num, sizeof(num), "%d ", i);
bstrncat(buf, num, sizeof(buf));
}
}
- strcat(buf, "\n");
+ bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
- strcpy(buf, " mday=");
+ bstrncpy(buf, " mday=", sizeof(buf));
for (i=0; i<31; i++) {
if (bit_is_set(i, run->mday)) {
- sprintf(num, "%d ", i+1);
- strcat(buf, num);
+ bsnprintf(num, sizeof(num), "%d ", i);
+ bstrncat(buf, num, sizeof(buf));
}
}
- strcat(buf, "\n");
+ bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
- strcpy(buf, " month=");
+ bstrncpy(buf, " month=", sizeof(buf));
for (i=0; i<12; i++) {
if (bit_is_set(i, run->month)) {
- sprintf(num, "%d ", i+1);
- strcat(buf, num);
+ bsnprintf(num, sizeof(num), "%d ", i);
+ bstrncat(buf, num, sizeof(buf));
}
}
- strcat(buf, "\n");
+ bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
- strcpy(buf, " wday=");
+ bstrncpy(buf, " wday=", sizeof(buf));
for (i=0; i<7; i++) {
if (bit_is_set(i, run->wday)) {
- sprintf(num, "%d ", i+1);
- strcat(buf, num);
+ bsnprintf(num, sizeof(num), "%d ", i);
+ bstrncat(buf, num, sizeof(buf));
}
}
- strcat(buf, "\n");
+ bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
- strcpy(buf, " wpos=");
+ bstrncpy(buf, " wom=", sizeof(buf));
for (i=0; i<5; i++) {
- if (bit_is_set(i, run->wpos)) {
- sprintf(num, "%d ", i+1);
- strcat(buf, num);
+ if (bit_is_set(i, run->wom)) {
+ bsnprintf(num, sizeof(num), "%d ", i);
+ bstrncat(buf, num, sizeof(buf));
+ }
+ }
+ bstrncat(buf, "\n", sizeof(buf));
+ sendit(sock, buf);
+ bstrncpy(buf, " woy=", sizeof(buf));
+ for (i=0; i<54; i++) {
+ if (bit_is_set(i, run->woy)) {
+ bsnprintf(num, sizeof(num), "%d ", i);
+ bstrncat(buf, num, sizeof(buf));
}
}
- strcat(buf, "\n");
+ bstrncat(buf, "\n", sizeof(buf));
sendit(sock, buf);
sendit(sock, " mins=%d\n", run->minute);
if (run->pool) {
sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
}
break;
- case R_GROUP:
- sendit(sock, "Group: name=%s\n", res->res_group.hdr.name);
- break;
case R_POOL:
sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
res->res_pool.pool_type);
res->res_pool.accept_any_volume, res->res_pool.catalog_files);
sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
res->res_pool.max_volumes, res->res_pool.AutoPrune,
- edit_utime(res->res_pool.VolRetention, ed1));
+ edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
- edit_utime(res->res_pool.VolUseDuration, ed1),
+ edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
res->res_pool.Recycle,
NPRT(res->res_pool.label_format));
sendit(sock, " CleaningPrefix=%s\n",
incexe->name_list.destroy();
for (int i=0; i<incexe->num_opts; i++) {
FOPTS *fopt = incexe->opts_list[i];
- fopt->match.destroy();
- fopt->base_list.destroy();
+ fopt->regex.destroy();
+ fopt->wild.destroy();
+ fopt->base.destroy();
+ if (fopt->reader) {
+ free(fopt->reader);
+ }
+ if (fopt->writer) {
+ free(fopt->writer);
+ }
free(fopt);
}
if (incexe->opts_list) {
}
/*
- * Free memory of resource.
+ * Free memory of resource -- called when daemon terminates.
* NB, we don't need to worry about freeing any references
* to other resources as they will be freed when that
* resource chain is traversed. Mainly we worry about freeing
* allocated strings (names).
*/
-void free_resource(int type)
+void free_resource(RES *sres, int type)
{
int num;
- URES *res;
- RES *nres;
- int rindex = type - r_first;
-
- res = (URES *)resources[rindex].res_head;
+ RES *nres; /* next resource if linked */
+ URES *res = (URES *)sres;
if (res == NULL)
return;
if (res->res_dir.query_file) {
free(res->res_dir.query_file);
}
- if (res->res_dir.DIRaddr) {
- free(res->res_dir.DIRaddr);
+ if (res->res_dir.DIRaddrs) {
+ free_addresses(res->res_dir.DIRaddrs);
}
break;
case R_COUNTER:
if (res->res_con.password) {
free(res->res_con.password);
}
+ for (int i=0; i<Num_ACL; i++) {
+ if (res->res_con.ACL_lists[i]) {
+ delete res->res_con.ACL_lists[i];
+ res->res_con.ACL_lists[i] = NULL;
+ }
+ }
break;
case R_CLIENT:
if (res->res_client.address) {
}
break;
case R_JOB:
+ case R_JOBDEFS:
if (res->res_job.RestoreWhere) {
free(res->res_job.RestoreWhere);
}
if (res->res_job.RunAfterJob) {
free(res->res_job.RunAfterJob);
}
+ if (res->res_job.RunAfterFailedJob) {
+ free(res->res_job.RunAfterFailedJob);
+ }
if (res->res_job.ClientRunBeforeJob) {
free(res->res_job.ClientRunBeforeJob);
}
free_msgs_res((MSGS *)res); /* free message resource */
res = NULL;
break;
- case R_GROUP:
- break;
default:
printf("Unknown resource type %d in free_resource.\n", type);
}
if (res) {
free(res);
}
- resources[rindex].res_head = nres;
if (nres) {
- free_resource(type);
+ free_resource(nres, type);
}
}
* pointers because they may not have been defined until
* later in pass 1.
*/
-void save_resource(int type, struct res_items *items, int pass)
+void save_resource(int type, RES_ITEM *items, int pass)
{
URES *res;
int rindex = type - r_first;
int i, size;
int error = 0;
- /*
- * Ensure that all required items are present
- */
- for (i=0; items[i].name; i++) {
- if (items[i].flags & ITEM_REQUIRED) {
- if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
- Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
- items[i].name, resources[rindex]);
- }
- }
- /* If this triggers, take a look at lib/parse_conf.h */
- if (i >= MAX_RES_ITEMS) {
- Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
+ /* Check Job requirements after applying JobDefs */
+ if (type != R_JOB && type != R_JOBDEFS) {
+ /*
+ * Ensure that all required items are present
+ */
+ for (i=0; items[i].name; i++) {
+ if (items[i].flags & ITEM_REQUIRED) {
+ if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
+ Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
+ items[i].name, resources[rindex]);
+ }
+ }
+ /* If this triggers, take a look at lib/parse_conf.h */
+ if (i >= MAX_RES_ITEMS) {
+ Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
+ }
}
}
- /* During pass 2 in each "store" routine, we looked up pointers
+ /*
+ * During pass 2 in each "store" routine, we looked up pointers
* to all the resources referrenced in the current resource, now we
* must copy their addresses from the static record to the allocated
* record.
case R_CONSOLE:
case R_CATALOG:
case R_STORAGE:
- case R_GROUP:
case R_POOL:
case R_MSGS:
case R_FILESET:
res->res_dir.messages = res_all.res_dir.messages;
break;
case R_JOB:
- if ((res = (URES *)GetResWithName(R_JOB, res_all.res_dir.hdr.name)) == NULL) {
- Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n", res_all.res_dir.hdr.name);
+ case R_JOBDEFS:
+ if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
+ Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
+ res_all.res_dir.hdr.name);
}
res->res_job.messages = res_all.res_job.messages;
res->res_job.schedule = res_all.res_job.schedule;
res->res_job.fileset = res_all.res_job.fileset;
res->res_job.storage = res_all.res_job.storage;
res->res_job.pool = res_all.res_job.pool;
+ res->res_job.full_pool = res_all.res_job.full_pool;
+ res->res_job.inc_pool = res_all.res_job.inc_pool;
+ res->res_job.dif_pool = res_all.res_job.dif_pool;
res->res_job.verify_job = res_all.res_job.verify_job;
- if (res->res_job.JobType == 0) {
- Emsg1(M_ERROR_TERM, 0, "Job Type not defined for Job resource %s\n", res_all.res_dir.hdr.name);
- }
- if (res->res_job.level != 0) {
- int i;
- for (i=0; joblevels[i].level_name; i++) {
- if (joblevels[i].level == res->res_job.level &&
- joblevels[i].job_type == res->res_job.JobType) {
- i = 0;
- break;
- }
- }
- if (i != 0) {
- Emsg1(M_ERROR_TERM, 0, "Inappropriate level specified in Job resource %s\n",
- res_all.res_dir.hdr.name);
- }
- }
+ res->res_job.jobdefs = res_all.res_job.jobdefs;
break;
case R_COUNTER:
if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
res->res_client.catalog = res_all.res_client.catalog;
break;
case R_SCHEDULE:
- /* Schedule is a bit different in that it contains a RUN record
+ /*
+ * Schedule is a bit different in that it contains a RUN record
* chain which isn't a "named" resource. This chain was linked
* in by run_conf.c during pass 2, so here we jam the pointer
* into the Schedule resource.
size = sizeof(CAT);
break;
case R_JOB:
+ case R_JOBDEFS:
size = sizeof(JOB);
break;
case R_FILESET:
case R_SCHEDULE:
size = sizeof(SCHED);
break;
- case R_GROUP:
- size = sizeof(GROUP);
- break;
case R_POOL:
size = sizeof(POOL);
break;
if (!error) {
res = (URES *)malloc(size);
memcpy(res, &res_all, size);
- if (!resources[rindex].res_head) {
- resources[rindex].res_head = (RES *)res; /* store first entry */
- Dmsg3(200, "Inserting first %s res: %s index=%d\n", res_to_str(type),
+ if (!res_head[rindex]) {
+ res_head[rindex] = (RES *)res; /* store first entry */
+ Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
res->res_dir.hdr.name, rindex);
} else {
RES *next;
/* Add new res to end of chain */
- for (next=resources[rindex].res_head; next->next; next=next->next) {
+ for (next=res_head[rindex]; next->next; next=next->next) {
if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
Emsg2(M_ERROR_TERM, 0,
_("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
}
}
next->next = (RES *)res;
- Dmsg4(200, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
+ Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
res->res_dir.hdr.name, rindex, pass);
}
}
* Store JobType (backup, verify, restore)
*
*/
-static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass)
+void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
{
int token, i;
/* Store the type both pass 1 and pass 2 */
for (i=0; jobtypes[i].type_name; i++) {
if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
- ((JOB *)(item->value))->JobType = jobtypes[i].job_type;
+ *(int *)(item->value) = jobtypes[i].job_type;
i = 0;
break;
}
* Store Job Level (Full, Incremental, ...)
*
*/
-static void store_level(LEX *lc, struct res_items *item, int index, int pass)
+void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
{
int token, i;
/* Store the level pass 2 so that type is defined */
for (i=0; joblevels[i].level_name; i++) {
if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
- ((JOB *)(item->value))->level = joblevels[i].level;
+ *(int *)(item->value) = joblevels[i].level;
i = 0;
break;
}
set_bit(index, res_all.hdr.item_present);
}
-static void store_replace(LEX *lc, struct res_items *item, int index, int pass)
+void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
{
int token, i;
token = lex_get_token(lc, T_NAME);
set_bit(index, res_all.hdr.item_present);
}
+/*
+ * Store ACL (access control list)
+ *
+ */
+void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
+{
+ int token;
+
+ for (;;) {
+ token = lex_get_token(lc, T_NAME);
+ if (pass == 1) {
+ if (((alist **)item->value)[item->code] == NULL) {
+ ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
+// Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
+ }
+ ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
+// Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
+ }
+ token = lex_get_token(lc, T_ALL);
+ if (token == T_COMMA) {
+ continue; /* get another ACL */
+ }
+ break;
+ }
+ set_bit(index, res_all.hdr.item_present);
+}
+
+
+#ifdef old_deprecated_code
/*
* Store backup/verify info for Job record
*
*
* Backup = Client=<client-name> FileSet=<FileSet-name> Level=<level>
*/
-static void store_backup(LEX *lc, struct res_items *item, int index, int pass)
+static void store_backup(LEX *lc, RES_ITEM *item, int index, int pass)
{
int token, i;
RES *res;
while ((token = lex_get_token(lc, T_ALL)) != T_EOL) {
bool found = false;
- Dmsg1(150, "store_backup got token=%s\n", lex_tok_to_str(token));
+ Dmsg1(900, "store_backup got token=%s\n", lex_tok_to_str(token));
if (token != T_IDENTIFIER && token != T_UNQUOTED_STRING && token != T_QUOTED_STRING) {
scan_err1(lc, "Expected a backup/verify keyword, got: %s", lc->str);
}
- Dmsg1(190, "Got keyword: %s\n", lc->str);
+ Dmsg1(900, "Got keyword: %s\n", lc->str);
for (i=0; BakVerFields[i].name; i++) {
if (strcasecmp(lc->str, BakVerFields[i].name) == 0) {
found = true;
scan_err1(lc, "Expected an equals, got: %s", lc->str);
}
token = lex_get_token(lc, T_NAME);
- Dmsg1(190, "Got value: %s\n", lc->str);
+ Dmsg1(900, "Got value: %s\n", lc->str);
switch (BakVerFields[i].token) {
case 'C':
/* Find Client Resource */
* Restore = JobId=<job-id> Where=<root-directory> Replace=<options> Bootstrap=<file>
*
*/
-static void store_restore(LEX *lc, struct res_items *item, int index, int pass)
+static void store_restore(LEX *lc, RES_ITEM *item, int index, int pass)
{
int token, i;
RES *res;
lc->options |= LOPT_NO_IDENT; /* make spaces significant */
- Dmsg0(190, "Enter store_restore()\n");
+ Dmsg0(900, "Enter store_restore()\n");
((JOB *)(item->value))->JobType = item->code;
while ((token = lex_get_token(lc, T_ALL)) != T_EOL) {
scan_err1(lc, "expected a name, got: %s", lc->str);
}
for (i=0; RestoreFields[i].name; i++) {
- Dmsg1(190, "Restore kw=%s\n", lc->str);
+ Dmsg1(900, "Restore kw=%s\n", lc->str);
if (strcasecmp(lc->str, RestoreFields[i].name) == 0) {
found = true;
if (lex_get_token(lc, T_ALL) != T_EQUALS) {
scan_err1(lc, "Expected an equals, got: %s", lc->str);
}
token = lex_get_token(lc, T_ALL);
- Dmsg1(190, "Restore value=%s\n", lc->str);
+ Dmsg1(900, "Restore value=%s\n", lc->str);
switch (RestoreFields[i].token) {
case 'B':
/* Bootstrap */
}
errno = 0;
res_all.res_job.RestoreJobId = strtol(lc->str, NULL, 0);
- Dmsg1(190, "RestorJobId=%d\n", res_all.res_job.RestoreJobId);
+ Dmsg1(900, "RestorJobId=%d\n", res_all.res_job.RestoreJobId);
if (errno != 0) {
scan_err1(lc, "expected an integer number, got: %s", lc->str);
}
lc->options = options; /* reset original options */
set_bit(index, res_all.hdr.item_present);
}
+#endif