X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fdird%2Fdird_conf.c;h=491212b0f65b2e9f57cd49ef635a5da2ee6f3c43;hb=3a0d7d1ee99ae3750af1f9fa63c3b7e5cadd879d;hp=d98f8b9cd73ece335933337aecb191f29e0bc1e9;hpb=5eb4d01116ff4fa301d4b38de5cefc57635cd8e4;p=bacula%2Fbacula diff --git a/bacula/src/dird/dird_conf.c b/bacula/src/dird/dird_conf.c index d98f8b9cd7..491212b0f6 100644 --- a/bacula/src/dird/dird_conf.c +++ b/bacula/src/dird/dird_conf.c @@ -22,7 +22,7 @@ * Version $Id$ */ /* - Copyright (C) 2000-2003 Kern Sibbald and John Walker + Copyright (C) 2000-2004 Kern Sibbald and John Walker This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as @@ -51,21 +51,18 @@ int r_first = R_FIRST; int r_last = R_LAST; -pthread_mutex_t res_mutex = PTHREAD_MUTEX_INITIALIZER; - /* Imported subroutines */ -extern void store_run(LEX *lc, struct res_items *item, int index, int pass); -extern void store_finc(LEX *lc, struct res_items *item, int index, int pass); -extern void store_inc(LEX *lc, struct res_items *item, int index, int pass); +extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass); /* Forward referenced subroutines */ -static void store_backup(LEX *lc, struct res_items *item, int index, int pass); -static void store_restore(LEX *lc, struct res_items *item, int index, int pass); -static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass); -static void store_level(LEX *lc, struct res_items *item, int index, int pass); -static void store_replace(LEX *lc, struct res_items *item, int index, int pass); +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_level(LEX *lc, RES_ITEM *item, int index, int pass); +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass); +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass); /* We build the current resource here as we are @@ -86,7 +83,7 @@ int res_all_size = sizeof(res_all); * * name handler value code flags default_value */ -static struct res_items dir_items[] = { +static RES_ITEM dir_items[] = { {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0}, @@ -110,11 +107,20 @@ static struct res_items dir_items[] = { * * name handler value code flags default_value */ -static struct res_items con_items[] = { +static RES_ITEM con_items[] = { {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0}, {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0}, {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0}, + {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0}, + {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0}, + {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0}, + {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0}, + {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0}, + {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0}, + {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0}, + {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0}, + {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0}, {NULL, NULL, NULL, 0, 0, 0} }; @@ -125,7 +131,7 @@ static struct res_items con_items[] = { * name handler value code flags default_value */ -static struct res_items cli_items[] = { +static RES_ITEM cli_items[] = { {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0}, @@ -146,7 +152,7 @@ static struct res_items cli_items[] = { * * name handler value code flags default_value */ -static struct res_items store_items[] = { +static RES_ITEM store_items[] = { {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0}, {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103}, @@ -169,7 +175,7 @@ static struct res_items store_items[] = { * * name handler value code flags default_value */ -static struct res_items cat_items[] = { +static RES_ITEM cat_items[] = { {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0}, @@ -189,37 +195,41 @@ static struct res_items cat_items[] = { * * name handler value code flags default_value */ -static struct res_items job_items[] = { - {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0}, - {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0}, - {"backup", store_backup, ITEM(res_job), JT_BACKUP, 0, 0}, - {"verify", store_backup, ITEM(res_job), JT_VERIFY, 0, 0}, - {"restore", store_restore, ITEM(res_job), JT_RESTORE, 0, 0}, - {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0}, - {"type", store_jobtype, ITEM(res_job), 0, 0, 0}, - {"level", store_level, ITEM(res_job), 0, 0, 0}, - {"messages", store_res, ITEM(res_job.messages), R_MSGS, 0, 0}, - {"storage", store_res, ITEM(res_job.storage), R_STORAGE, 0, 0}, - {"pool", store_res, ITEM(res_job.pool), R_POOL, 0, 0}, - {"client", store_res, ITEM(res_job.client), R_CLIENT, 0, 0}, - {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, 0, 0}, - {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0}, - {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0}, - {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS}, - {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0}, - {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0}, - {"maxstartdelay", store_time,ITEM(res_job.MaxStartDelay), 0, 0, 0}, +RES_ITEM job_items[] = { + {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0}, + {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0}, + {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0}, + {"level", store_level, ITEM(res_job.level), 0, 0, 0}, + {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0}, + {"storage", store_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0}, + {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0}, + {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0}, + {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0}, + {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0}, + {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0}, + {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0}, + {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0}, + {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0}, + {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0}, + {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0}, + {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0}, + {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0}, + {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS}, + {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0}, + {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0}, + {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0}, + {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0}, {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0}, {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0}, {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0}, - {"prunevolumes", store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0}, + {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0}, + {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0}, + {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0}, {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0}, {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0}, {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0}, {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0}, {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0}, - {"spoolattributes", store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0}, - {"writebootstrap", store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0}, {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0}, {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30}, @@ -232,7 +242,7 @@ static struct res_items job_items[] = { * * name handler value code flags default_value */ -static struct res_items fs_items[] = { +static RES_ITEM fs_items[] = { {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0}, {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0}, @@ -245,28 +255,18 @@ static struct res_items fs_items[] = { * * name handler value code flags default_value */ -static struct res_items sch_items[] = { +static RES_ITEM sch_items[] = { {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0}, {"run", store_run, ITEM(res_sch.run), 0, 0, 0}, {NULL, NULL, NULL, 0, 0, 0} }; -/* Group resource -- not implemented - * - * name handler value code flags default_value - */ -static struct res_items group_items[] = { - {"name", store_name, ITEM(res_group.hdr.name), 0, ITEM_REQUIRED, 0}, - {"description", store_str, ITEM(res_group.hdr.desc), 0, 0, 0}, - {NULL, NULL, NULL, 0, 0, 0} -}; - /* Pool resource * * name handler value code flags default_value */ -static struct res_items pool_items[] = { +static RES_ITEM pool_items[] = { {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0}, {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0}, @@ -294,7 +294,7 @@ static struct res_items pool_items[] = { * Counter Resource * name handler value code flags default_value */ -static struct res_items counter_items[] = { +static RES_ITEM counter_items[] = { {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0}, {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0}, @@ -306,7 +306,7 @@ static struct res_items counter_items[] = { /* Message resource */ -extern struct res_items msgs_items[]; +extern RES_ITEM msgs_items[]; /* * This is the master resource definition. @@ -317,7 +317,7 @@ extern struct res_items msgs_items[]; * * name items rcode res_head */ -struct s_res resources[] = { +RES_TABLE resources[] = { {"director", dir_items, R_DIRECTOR, NULL}, {"client", cli_items, R_CLIENT, NULL}, {"job", job_items, R_JOB, NULL}, @@ -325,11 +325,11 @@ struct s_res resources[] = { {"catalog", cat_items, R_CATALOG, NULL}, {"schedule", sch_items, R_SCHEDULE, NULL}, {"fileset", fs_items, R_FILESET, NULL}, - {"group", group_items, R_GROUP, NULL}, {"pool", pool_items, R_POOL, NULL}, {"messages", msgs_items, R_MSGS, NULL}, {"counter", counter_items, R_COUNTER, NULL}, {"console", con_items, R_CONSOLE, NULL}, + {"jobdefs", job_items, R_JOBDEFS, NULL}, {NULL, NULL, 0, NULL} }; @@ -351,7 +351,7 @@ struct s_jl joblevels[] = { {"Data", L_VERIFY_DATA, JT_VERIFY}, {" ", L_NONE, JT_ADMIN}, {" ", L_NONE, JT_RESTORE}, - {NULL, 0} + {NULL, 0, 0} }; /* Keywords (RHS) permitted in Job type records @@ -366,6 +366,7 @@ struct s_jt jobtypes[] = { {NULL, 0} }; +#ifdef old_deprecated_code /* Keywords (RHS) permitted in Backup and Verify records */ static struct s_kw BakVerFields[] = { @@ -385,6 +386,7 @@ static struct s_kw RestoreFields[] = { {"bootstrap", 'B'}, /* bootstrap file */ {NULL, 0} }; +#endif /* Options permitted in Restore replace= */ struct s_kw ReplaceOptions[] = { @@ -401,7 +403,7 @@ char *level_to_str(int level) static char level_no[30]; char *str = level_no; - sprintf(level_no, "%d", level); /* default if not found */ + bsnprintf(level_no, sizeof(level_no), "%d", level); /* default if not found */ for (i=0; joblevels[i].level_name; i++) { if (level == joblevels[i].level) { str = joblevels[i].level_name; @@ -488,13 +490,16 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user)); break; case R_JOB: - sendit(sock, "Job: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n", + case R_JOBDEFS: + sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n", + type == R_JOB ? "Job" : "JobDefs", res->res_job.hdr.name, res->res_job.JobType, level_to_str(res->res_job.level), res->res_job.Priority, res->res_job.MaxConcurrentJobs); - sendit(sock, " Resched=%d Times=%d Interval=%s\n", + sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d\n", res->res_job.RescheduleOnError, res->res_job.RescheduleTimes, - edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1)); + edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1), + res->res_job.spool_data); if (res->res_job.client) { sendit(sock, " --> "); dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock); @@ -532,12 +537,22 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... if (res->res_job.pool) { sendit(sock, " --> "); dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock); - } else { - sendit(sock, "!!! No Pool resource\n"); + } + if (res->res_job.full_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock); + } + if (res->res_job.inc_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock); + } + if (res->res_job.dif_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock); } if (res->res_job.verify_job) { sendit(sock, " --> "); - dump_resource(-R_JOB, (RES *)res->res_job.verify_job, sendit, sock); + dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock); } break; if (res->res_job.messages) { @@ -546,25 +561,49 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... } break; case R_FILESET: + { + int i, j, k; sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name); - for (int i=0; ires_fs.num_includes; i++) { + for (i=0; ires_fs.num_includes; i++) { INCEXE *incexe = res->res_fs.include_items[i]; - for (int j=0; jname_list.size(); j++) { - sendit(sock, " Inc: %s\n", incexe->name_list.get(j)); + for (j=0; jnum_opts; j++) { + FOPTS *fo = incexe->opts_list[j]; + sendit(sock, " O %s\n", fo->opts); + for (k=0; kregex.size(); k++) { + sendit(sock, " R %s\n", fo->regex.get(k)); + } + for (k=0; kwild.size(); k++) { + sendit(sock, " W %s\n", fo->wild.get(k)); + } + for (k=0; kbase.size(); k++) { + sendit(sock, " B %s\n", fo->base.get(k)); + } + sendit(sock, " N\n"); + } + for (j=0; jname_list.size(); j++) { + sendit(sock, " I %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); } } - for (int i=0; ires_fs.num_excludes; i++) { + + for (i=0; ires_fs.num_excludes; i++) { INCEXE *incexe = res->res_fs.exclude_items[i]; - for (int j=0; jname_list.size(); j++) { - sendit(sock, " Exc: %s\n", incexe->name_list.get(j)); + for (j=0; jname_list.size(); j++) { + sendit(sock, " E %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); } } break; + } case R_SCHEDULE: if (res->res_sch.run) { int i; RUN *run = res->res_sch.run; - char buf[1000], num[10]; + char buf[1000], num[30]; sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name); if (!run) { break; @@ -574,47 +613,56 @@ next_run: bstrncpy(buf, " hour=", sizeof(buf)); for (i=0; i<24; i++) { if (bit_is_set(i, run->hour)) { - sprintf(num, "%d ", i); + bsnprintf(num, sizeof(num), "%d ", i); bstrncat(buf, num, sizeof(buf)); } } - strcat(buf, "\n"); + bstrncat(buf, "\n", sizeof(buf)); sendit(sock, buf); - strcpy(buf, " mday="); + bstrncpy(buf, " mday=", sizeof(buf)); for (i=0; i<31; i++) { if (bit_is_set(i, run->mday)) { - sprintf(num, "%d ", i+1); - strcat(buf, num); + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); } } - strcat(buf, "\n"); + bstrncat(buf, "\n", sizeof(buf)); sendit(sock, buf); - strcpy(buf, " month="); + bstrncpy(buf, " month=", sizeof(buf)); for (i=0; i<12; i++) { if (bit_is_set(i, run->month)) { - sprintf(num, "%d ", i+1); - strcat(buf, num); + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); } } - strcat(buf, "\n"); + bstrncat(buf, "\n", sizeof(buf)); sendit(sock, buf); - strcpy(buf, " wday="); + bstrncpy(buf, " wday=", sizeof(buf)); for (i=0; i<7; i++) { if (bit_is_set(i, run->wday)) { - sprintf(num, "%d ", i+1); - strcat(buf, num); + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); } } - strcat(buf, "\n"); + bstrncat(buf, "\n", sizeof(buf)); sendit(sock, buf); - strcpy(buf, " wpos="); + bstrncpy(buf, " wom=", sizeof(buf)); for (i=0; i<5; i++) { - if (bit_is_set(i, run->wpos)) { - sprintf(num, "%d ", i+1); - strcat(buf, num); + if (bit_is_set(i, run->wom)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); + } + } + bstrncat(buf, "\n", sizeof(buf)); + sendit(sock, buf); + bstrncpy(buf, " woy=", sizeof(buf)); + for (i=0; i<54; i++) { + if (bit_is_set(i, run->woy)) { + bsnprintf(num, sizeof(num), "%d ", i); + bstrncat(buf, num, sizeof(buf)); } } - strcat(buf, "\n"); + bstrncat(buf, "\n", sizeof(buf)); sendit(sock, buf); sendit(sock, " mins=%d\n", run->minute); if (run->pool) { @@ -638,9 +686,6 @@ next_run: sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name); } break; - case R_GROUP: - sendit(sock, "Group: name=%s\n", res->res_group.hdr.name); - break; case R_POOL: sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name, res->res_pool.pool_type); @@ -684,8 +729,9 @@ static void free_incexe(INCEXE *incexe) incexe->name_list.destroy(); for (int i=0; inum_opts; i++) { FOPTS *fopt = incexe->opts_list[i]; - fopt->match.destroy(); - fopt->base_list.destroy(); + fopt->regex.destroy(); + fopt->wild.destroy(); + fopt->base.destroy(); free(fopt); } if (incexe->opts_list) { @@ -695,20 +741,17 @@ static void free_incexe(INCEXE *incexe) } /* - * Free memory of resource. + * Free memory of resource -- called when daemon terminates. * NB, we don't need to worry about freeing any references * to other resources as they will be freed when that * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ -void free_resource(int type) +void free_resource(RES *sres, int type) { int num; - URES *res; - RES *nres; - int rindex = type - r_first; - - res = (URES *)resources[rindex].res_head; + RES *nres; /* next resource if linked */ + URES *res = (URES *)sres; if (res == NULL) return; @@ -749,6 +792,12 @@ void free_resource(int type) if (res->res_con.password) { free(res->res_con.password); } + for (int i=0; ires_con.ACL_lists[i]) { + delete res->res_con.ACL_lists[i]; + res->res_con.ACL_lists[i] = NULL; + } + } break; case R_CLIENT: if (res->res_client.address) { @@ -828,6 +877,7 @@ void free_resource(int type) } break; case R_JOB: + case R_JOBDEFS: if (res->res_job.RestoreWhere) { free(res->res_job.RestoreWhere); } @@ -863,8 +913,6 @@ void free_resource(int type) free_msgs_res((MSGS *)res); /* free message resource */ res = NULL; break; - case R_GROUP: - break; default: printf("Unknown resource type %d in free_resource.\n", type); } @@ -872,9 +920,8 @@ void free_resource(int type) if (res) { free(res); } - resources[rindex].res_head = nres; if (nres) { - free_resource(type); + free_resource(nres, type); } } @@ -884,30 +931,34 @@ void free_resource(int type) * pointers because they may not have been defined until * later in pass 1. */ -void save_resource(int type, struct res_items *items, int pass) +void save_resource(int type, RES_ITEM *items, int pass) { URES *res; int rindex = type - r_first; int i, size; int error = 0; - /* - * Ensure that all required items are present - */ - for (i=0; items[i].name; i++) { - if (items[i].flags & ITEM_REQUIRED) { - if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { - Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n", - items[i].name, resources[rindex]); - } - } - /* If this triggers, take a look at lib/parse_conf.h */ - if (i >= MAX_RES_ITEMS) { - Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]); + /* Check Job requirements after applying JobDefs */ + if (type != R_JOB && type != R_JOBDEFS) { + /* + * Ensure that all required items are present + */ + for (i=0; items[i].name; i++) { + if (items[i].flags & ITEM_REQUIRED) { + if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) { + Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n", + items[i].name, resources[rindex]); + } + } + /* If this triggers, take a look at lib/parse_conf.h */ + if (i >= MAX_RES_ITEMS) { + Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]); + } } } - /* During pass 2 in each "store" routine, we looked up pointers + /* + * During pass 2 in each "store" routine, we looked up pointers * to all the resources referrenced in the current resource, now we * must copy their addresses from the static record to the allocated * record. @@ -918,7 +969,6 @@ void save_resource(int type, struct res_items *items, int pass) case R_CONSOLE: case R_CATALOG: case R_STORAGE: - case R_GROUP: case R_POOL: case R_MSGS: case R_FILESET: @@ -932,8 +982,10 @@ void save_resource(int type, struct res_items *items, int pass) res->res_dir.messages = res_all.res_dir.messages; break; case R_JOB: - if ((res = (URES *)GetResWithName(R_JOB, res_all.res_dir.hdr.name)) == NULL) { - Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n", res_all.res_dir.hdr.name); + case R_JOBDEFS: + if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) { + Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n", + res_all.res_dir.hdr.name); } res->res_job.messages = res_all.res_job.messages; res->res_job.schedule = res_all.res_job.schedule; @@ -941,24 +993,11 @@ void save_resource(int type, struct res_items *items, int pass) res->res_job.fileset = res_all.res_job.fileset; res->res_job.storage = res_all.res_job.storage; res->res_job.pool = res_all.res_job.pool; + res->res_job.full_pool = res_all.res_job.full_pool; + res->res_job.inc_pool = res_all.res_job.inc_pool; + res->res_job.dif_pool = res_all.res_job.dif_pool; res->res_job.verify_job = res_all.res_job.verify_job; - if (res->res_job.JobType == 0) { - Emsg1(M_ERROR_TERM, 0, "Job Type not defined for Job resource %s\n", res_all.res_dir.hdr.name); - } - if (res->res_job.level != 0) { - int i; - for (i=0; joblevels[i].level_name; i++) { - if (joblevels[i].level == res->res_job.level && - joblevels[i].job_type == res->res_job.JobType) { - i = 0; - break; - } - } - if (i != 0) { - Emsg1(M_ERROR_TERM, 0, "Inappropriate level specified in Job resource %s\n", - res_all.res_dir.hdr.name); - } - } + res->res_job.jobdefs = res_all.res_job.jobdefs; break; case R_COUNTER: if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) { @@ -975,7 +1014,8 @@ void save_resource(int type, struct res_items *items, int pass) res->res_client.catalog = res_all.res_client.catalog; break; case R_SCHEDULE: - /* Schedule is a bit different in that it contains a RUN record + /* + * Schedule is a bit different in that it contains a RUN record * chain which isn't a "named" resource. This chain was linked * in by run_conf.c during pass 2, so here we jam the pointer * into the Schedule resource. @@ -1024,6 +1064,7 @@ void save_resource(int type, struct res_items *items, int pass) size = sizeof(CAT); break; case R_JOB: + case R_JOBDEFS: size = sizeof(JOB); break; case R_FILESET: @@ -1032,9 +1073,6 @@ void save_resource(int type, struct res_items *items, int pass) case R_SCHEDULE: size = sizeof(SCHED); break; - case R_GROUP: - size = sizeof(GROUP); - break; case R_POOL: size = sizeof(POOL); break; @@ -1052,6 +1090,9 @@ void save_resource(int type, struct res_items *items, int pass) } /* Common */ if (!error) { + if (type == R_JOBDEFS) { + Dmsg0(200, "Storing JobDefs definition.\n"); + } res = (URES *)malloc(size); memcpy(res, &res_all, size); if (!resources[rindex].res_head) { @@ -1079,7 +1120,7 @@ void save_resource(int type, struct res_items *items, int pass) * Store JobType (backup, verify, restore) * */ -static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass) +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; @@ -1087,7 +1128,7 @@ static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass) /* Store the type both pass 1 and pass 2 */ for (i=0; jobtypes[i].type_name; i++) { if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) { - ((JOB *)(item->value))->JobType = jobtypes[i].job_type; + *(int *)(item->value) = jobtypes[i].job_type; i = 0; break; } @@ -1103,7 +1144,7 @@ static void store_jobtype(LEX *lc, struct res_items *item, int index, int pass) * Store Job Level (Full, Incremental, ...) * */ -static void store_level(LEX *lc, struct res_items *item, int index, int pass) +void store_level(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; @@ -1111,7 +1152,7 @@ static void store_level(LEX *lc, struct res_items *item, int index, int pass) /* Store the level pass 2 so that type is defined */ for (i=0; joblevels[i].level_name; i++) { if (strcasecmp(lc->str, joblevels[i].level_name) == 0) { - ((JOB *)(item->value))->level = joblevels[i].level; + *(int *)(item->value) = joblevels[i].level; i = 0; break; } @@ -1123,7 +1164,7 @@ static void store_level(LEX *lc, struct res_items *item, int index, int pass) set_bit(index, res_all.hdr.item_present); } -static void store_replace(LEX *lc, struct res_items *item, int index, int pass) +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; token = lex_get_token(lc, T_NAME); @@ -1142,6 +1183,35 @@ static void store_replace(LEX *lc, struct res_items *item, int index, int pass) set_bit(index, res_all.hdr.item_present); } +/* + * Store ACL (access control list) + * + */ +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass) +{ + int token; + + for (;;) { + token = lex_get_token(lc, T_NAME); + if (pass == 1) { + if (((alist **)item->value)[item->code] == NULL) { + ((alist **)item->value)[item->code] = new alist(10, owned_by_alist); +// Dmsg1(400, "Defined new ACL alist at %d\n", item->code); + } + ((alist **)item->value)[item->code]->append(bstrdup(lc->str)); +// Dmsg2(400, "Appended to %d %s\n", item->code, lc->str); + } + token = lex_get_token(lc, T_ALL); + if (token == T_COMMA) { + continue; /* get another ACL */ + } + break; + } + set_bit(index, res_all.hdr.item_present); +} + + +#ifdef old_deprecated_code /* * Store backup/verify info for Job record * @@ -1149,7 +1219,7 @@ static void store_replace(LEX *lc, struct res_items *item, int index, int pass) * * Backup = Client= FileSet= Level= */ -static void store_backup(LEX *lc, struct res_items *item, int index, int pass) +static void store_backup(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; RES *res; @@ -1231,7 +1301,7 @@ static void store_backup(LEX *lc, struct res_items *item, int index, int pass) * Restore = JobId= Where= Replace= Bootstrap= * */ -static void store_restore(LEX *lc, struct res_items *item, int index, int pass) +static void store_restore(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; RES *res; @@ -1338,3 +1408,4 @@ static void store_restore(LEX *lc, struct res_items *item, int index, int pass) lc->options = options; /* reset original options */ set_bit(index, res_all.hdr.item_present); } +#endif