X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=bacula%2Fsrc%2Fdird%2Fdird_conf.c;h=491212b0f65b2e9f57cd49ef635a5da2ee6f3c43;hb=3a0d7d1ee99ae3750af1f9fa63c3b7e5cadd879d;hp=f41dce96e5d31066b13b56fc92fe985a9b6761e6;hpb=5d9617445ed5fd5601274adb713635dafd2f6019;p=bacula%2Fbacula diff --git a/bacula/src/dird/dird_conf.c b/bacula/src/dird/dird_conf.c index f41dce96e5..491212b0f6 100644 --- a/bacula/src/dird/dird_conf.c +++ b/bacula/src/dird/dird_conf.c @@ -51,20 +51,18 @@ int r_first = R_FIRST; int r_last = R_LAST; -pthread_mutex_t res_mutex = PTHREAD_MUTEX_INITIALIZER; - /* Imported subroutines */ -extern void store_run(LEX *lc, struct res_items *item, int index, int pass); -extern void store_finc(LEX *lc, struct res_items *item, int index, int pass); -extern void store_inc(LEX *lc, struct res_items *item, int index, int pass); +extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass); +extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass); /* Forward referenced subroutines */ -void store_jobtype(LEX *lc, struct res_items *item, int index, int pass); -void store_level(LEX *lc, struct res_items *item, int index, int pass); -void store_replace(LEX *lc, struct res_items *item, int index, int pass); -void store_acl(LEX *lc, struct res_items *item, int index, int pass); +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass); +void store_level(LEX *lc, RES_ITEM *item, int index, int pass); +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass); +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass); /* We build the current resource here as we are @@ -85,7 +83,7 @@ int res_all_size = sizeof(res_all); * * name handler value code flags default_value */ -static struct res_items dir_items[] = { +static RES_ITEM dir_items[] = { {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0}, @@ -109,7 +107,7 @@ static struct res_items dir_items[] = { * * name handler value code flags default_value */ -static struct res_items con_items[] = { +static RES_ITEM con_items[] = { {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0}, {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0}, @@ -133,7 +131,7 @@ static struct res_items con_items[] = { * name handler value code flags default_value */ -static struct res_items cli_items[] = { +static RES_ITEM cli_items[] = { {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0}, @@ -154,7 +152,7 @@ static struct res_items cli_items[] = { * * name handler value code flags default_value */ -static struct res_items store_items[] = { +static RES_ITEM store_items[] = { {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0}, {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103}, @@ -177,7 +175,7 @@ static struct res_items store_items[] = { * * name handler value code flags default_value */ -static struct res_items cat_items[] = { +static RES_ITEM cat_items[] = { {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0}, @@ -197,7 +195,7 @@ static struct res_items cat_items[] = { * * name handler value code flags default_value */ -struct res_items job_items[] = { +RES_ITEM job_items[] = { {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0}, {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0}, @@ -205,6 +203,9 @@ struct res_items job_items[] = { {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0}, {"storage", store_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0}, {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0}, + {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0}, + {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0}, + {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0}, {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0}, {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0}, {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0}, @@ -223,6 +224,7 @@ struct res_items job_items[] = { {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0}, {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0}, {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0}, + {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0}, {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0}, {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0}, {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0}, @@ -240,7 +242,7 @@ struct res_items job_items[] = { * * name handler value code flags default_value */ -static struct res_items fs_items[] = { +static RES_ITEM fs_items[] = { {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0}, {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0}, @@ -253,7 +255,7 @@ static struct res_items fs_items[] = { * * name handler value code flags default_value */ -static struct res_items sch_items[] = { +static RES_ITEM sch_items[] = { {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0}, {"run", store_run, ITEM(res_sch.run), 0, 0, 0}, @@ -264,7 +266,7 @@ static struct res_items sch_items[] = { * * name handler value code flags default_value */ -static struct res_items pool_items[] = { +static RES_ITEM pool_items[] = { {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0}, {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0}, @@ -292,7 +294,7 @@ static struct res_items pool_items[] = { * Counter Resource * name handler value code flags default_value */ -static struct res_items counter_items[] = { +static RES_ITEM counter_items[] = { {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0}, {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0}, @@ -304,7 +306,7 @@ static struct res_items counter_items[] = { /* Message resource */ -extern struct res_items msgs_items[]; +extern RES_ITEM msgs_items[]; /* * This is the master resource definition. @@ -315,7 +317,7 @@ extern struct res_items msgs_items[]; * * name items rcode res_head */ -struct s_res resources[] = { +RES_TABLE resources[] = { {"director", dir_items, R_DIRECTOR, NULL}, {"client", cli_items, R_CLIENT, NULL}, {"job", job_items, R_JOB, NULL}, @@ -349,7 +351,7 @@ struct s_jl joblevels[] = { {"Data", L_VERIFY_DATA, JT_VERIFY}, {" ", L_NONE, JT_ADMIN}, {" ", L_NONE, JT_RESTORE}, - {NULL, 0} + {NULL, 0, 0} }; /* Keywords (RHS) permitted in Job type records @@ -494,9 +496,10 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... res->res_job.hdr.name, res->res_job.JobType, level_to_str(res->res_job.level), res->res_job.Priority, res->res_job.MaxConcurrentJobs); - sendit(sock, " Resched=%d Times=%d Interval=%s\n", + sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d\n", res->res_job.RescheduleOnError, res->res_job.RescheduleTimes, - edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1)); + edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1), + res->res_job.spool_data); if (res->res_job.client) { sendit(sock, " --> "); dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock); @@ -534,8 +537,18 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... if (res->res_job.pool) { sendit(sock, " --> "); dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock); - } else { - sendit(sock, "!!! No Pool resource\n"); + } + if (res->res_job.full_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock); + } + if (res->res_job.inc_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock); + } + if (res->res_job.dif_pool) { + sendit(sock, " --> "); + dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock); } if (res->res_job.verify_job) { sendit(sock, " --> "); @@ -548,20 +561,44 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, char *fmt, ... } break; case R_FILESET: + { + int i, j, k; sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name); - for (int i=0; ires_fs.num_includes; i++) { + for (i=0; ires_fs.num_includes; i++) { INCEXE *incexe = res->res_fs.include_items[i]; - for (int j=0; jname_list.size(); j++) { - sendit(sock, " Inc: %s\n", incexe->name_list.get(j)); + for (j=0; jnum_opts; j++) { + FOPTS *fo = incexe->opts_list[j]; + sendit(sock, " O %s\n", fo->opts); + for (k=0; kregex.size(); k++) { + sendit(sock, " R %s\n", fo->regex.get(k)); + } + for (k=0; kwild.size(); k++) { + sendit(sock, " W %s\n", fo->wild.get(k)); + } + for (k=0; kbase.size(); k++) { + sendit(sock, " B %s\n", fo->base.get(k)); + } + sendit(sock, " N\n"); + } + for (j=0; jname_list.size(); j++) { + sendit(sock, " I %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); } } - for (int i=0; ires_fs.num_excludes; i++) { + + for (i=0; ires_fs.num_excludes; i++) { INCEXE *incexe = res->res_fs.exclude_items[i]; - for (int j=0; jname_list.size(); j++) { - sendit(sock, " Exc: %s\n", incexe->name_list.get(j)); + for (j=0; jname_list.size(); j++) { + sendit(sock, " E %s\n", incexe->name_list.get(j)); + } + if (incexe->name_list.size()) { + sendit(sock, " N\n"); } } break; + } case R_SCHEDULE: if (res->res_sch.run) { int i; @@ -692,8 +729,9 @@ static void free_incexe(INCEXE *incexe) incexe->name_list.destroy(); for (int i=0; inum_opts; i++) { FOPTS *fopt = incexe->opts_list[i]; - fopt->match.destroy(); - fopt->base_list.destroy(); + fopt->regex.destroy(); + fopt->wild.destroy(); + fopt->base.destroy(); free(fopt); } if (incexe->opts_list) { @@ -709,14 +747,11 @@ static void free_incexe(INCEXE *incexe) * resource chain is traversed. Mainly we worry about freeing * allocated strings (names). */ -void free_resource(int type) +void free_resource(RES *sres, int type) { int num; - URES *res; - RES *nres; - int rindex = type - r_first; - - res = (URES *)resources[rindex].res_head; + RES *nres; /* next resource if linked */ + URES *res = (URES *)sres; if (res == NULL) return; @@ -885,9 +920,8 @@ void free_resource(int type) if (res) { free(res); } - resources[rindex].res_head = nres; if (nres) { - free_resource(type); + free_resource(nres, type); } } @@ -897,7 +931,7 @@ void free_resource(int type) * pointers because they may not have been defined until * later in pass 1. */ -void save_resource(int type, struct res_items *items, int pass) +void save_resource(int type, RES_ITEM *items, int pass) { URES *res; int rindex = type - r_first; @@ -959,6 +993,9 @@ void save_resource(int type, struct res_items *items, int pass) res->res_job.fileset = res_all.res_job.fileset; res->res_job.storage = res_all.res_job.storage; res->res_job.pool = res_all.res_job.pool; + res->res_job.full_pool = res_all.res_job.full_pool; + res->res_job.inc_pool = res_all.res_job.inc_pool; + res->res_job.dif_pool = res_all.res_job.dif_pool; res->res_job.verify_job = res_all.res_job.verify_job; res->res_job.jobdefs = res_all.res_job.jobdefs; break; @@ -1083,7 +1120,7 @@ void save_resource(int type, struct res_items *items, int pass) * Store JobType (backup, verify, restore) * */ -void store_jobtype(LEX *lc, struct res_items *item, int index, int pass) +void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; @@ -1107,7 +1144,7 @@ void store_jobtype(LEX *lc, struct res_items *item, int index, int pass) * Store Job Level (Full, Incremental, ...) * */ -void store_level(LEX *lc, struct res_items *item, int index, int pass) +void store_level(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; @@ -1127,7 +1164,7 @@ void store_level(LEX *lc, struct res_items *item, int index, int pass) set_bit(index, res_all.hdr.item_present); } -void store_replace(LEX *lc, struct res_items *item, int index, int pass) +void store_replace(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; token = lex_get_token(lc, T_NAME); @@ -1150,7 +1187,7 @@ void store_replace(LEX *lc, struct res_items *item, int index, int pass) * Store ACL (access control list) * */ -void store_acl(LEX *lc, struct res_items *item, int index, int pass) +void store_acl(LEX *lc, RES_ITEM *item, int index, int pass) { int token; @@ -1182,7 +1219,7 @@ void store_acl(LEX *lc, struct res_items *item, int index, int pass) * * Backup = Client= FileSet= Level= */ -static void store_backup(LEX *lc, struct res_items *item, int index, int pass) +static void store_backup(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; RES *res; @@ -1264,7 +1301,7 @@ static void store_backup(LEX *lc, struct res_items *item, int index, int pass) * Restore = JobId= Where= Replace= Bootstrap= * */ -static void store_restore(LEX *lc, struct res_items *item, int index, int pass) +static void store_restore(LEX *lc, RES_ITEM *item, int index, int pass) { int token, i; RES *res;