2 * Main configuration file parser for Bacula Directors,
3 * some parts may be split into separate files such as
4 * the schedule configuration (run_config.c).
6 * Note, the configuration file parser consists of three parts
8 * 1. The generic lexical scanner in lib/lex.c and lib/lex.h
10 * 2. The generic config scanner in lib/parse_config.c and
12 * These files contain the parser code, some utility
13 * routines, and the common store routines (name, int,
16 * 3. The daemon specific file, which contains the Resource
17 * definitions as well as any specific store routines
18 * for the resource records.
20 * Kern Sibbald, January MM
25 Copyright (C) 2000-2004 Kern Sibbald and John Walker
27 This program is free software; you can redistribute it and/or
28 modify it under the terms of the GNU General Public License as
29 published by the Free Software Foundation; either version 2 of
30 the License, or (at your option) any later version.
32 This program is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 General Public License for more details.
37 You should have received a copy of the GNU General Public
38 License along with this program; if not, write to the Free
39 Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
47 /* Define the first and last resource ID record
48 * types. Note, these should be unique for each
49 * daemon though not a requirement.
51 int r_first = R_FIRST;
53 static RES *sres_head[R_LAST - R_FIRST + 1];
54 RES **res_head = sres_head;
56 /* Imported subroutines */
57 extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
58 extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
59 extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
62 /* Forward referenced subroutines */
64 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
65 void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
66 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
67 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
70 /* We build the current resource here as we are
71 * scanning the resource configuration definition,
72 * then move it to allocated memory when the resource
76 int res_all_size = sizeof(res_all);
79 /* Definition of records permitted within each
80 * resource with the routine to process the record
81 * information. NOTE! quoted names must be in lower case.
86 * name handler value code flags default_value
88 static RES_ITEM dir_items[] = {
89 {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
90 {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
91 {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
92 {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
93 {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
94 {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
95 {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
96 {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
97 {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
98 {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
99 {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
100 {"requiressl", store_yesno, ITEM(res_dir.require_ssl), 1, ITEM_DEFAULT, 0},
101 {"enablessl", store_yesno, ITEM(res_dir.enable_ssl), 1, ITEM_DEFAULT, 0},
102 {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
103 {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
104 {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
105 {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
106 {NULL, NULL, NULL, 0, 0, 0}
112 * name handler value code flags default_value
114 static RES_ITEM con_items[] = {
115 {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
116 {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
117 {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
118 {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
119 {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
120 {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
121 {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
122 {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
123 {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
124 {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
125 {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
126 {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
127 {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
128 {NULL, NULL, NULL, 0, 0, 0}
133 * Client or File daemon resource
135 * name handler value code flags default_value
138 static RES_ITEM cli_items[] = {
139 {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
140 {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
141 {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
142 {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
143 {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
144 {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
145 {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
146 {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
147 {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
148 {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
149 {"autoprune", store_yesno, ITEM(res_client.AutoPrune), 1, ITEM_DEFAULT, 1},
150 {"enablessl", store_yesno, ITEM(res_client.enable_ssl), 1, ITEM_DEFAULT, 0},
151 {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
152 {NULL, NULL, NULL, 0, 0, 0}
155 /* Storage daemon resource
157 * name handler value code flags default_value
159 static RES_ITEM store_items[] = {
160 {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
161 {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
162 {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
163 {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
164 {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
165 {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
166 {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
167 {"device", store_strname, ITEM(res_store.dev_name), 0, ITEM_REQUIRED, 0},
168 {"sddevicename", store_strname, ITEM(res_store.dev_name), 0, 0, 0},
169 {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
170 {"autochanger", store_yesno, ITEM(res_store.autochanger), 1, ITEM_DEFAULT, 0},
171 {"enablessl", store_yesno, ITEM(res_store.enable_ssl), 1, ITEM_DEFAULT, 0},
172 {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
173 {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
174 {NULL, NULL, NULL, 0, 0, 0}
178 * Catalog Resource Directives
180 * name handler value code flags default_value
182 static RES_ITEM cat_items[] = {
183 {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
184 {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
185 {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
186 {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
187 {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0},
188 /* keep this password as store_str for the moment */
189 {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
190 {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
191 {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
192 {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
193 {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
194 {"multipleconnections", store_yesno, ITEM(res_cat.mult_db_connections), 1, 0, 0},
195 {NULL, NULL, NULL, 0, 0, 0}
199 * Job Resource Directives
201 * name handler value code flags default_value
203 RES_ITEM job_items[] = {
204 {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
205 {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
206 {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
207 {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
208 {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
209 {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, MAX_STORE},
210 {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
211 {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
212 {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
213 {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
214 {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
215 {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
216 {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
217 {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
218 {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
219 {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
220 {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
221 {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
222 {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
223 {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
224 {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
225 {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
226 {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
227 {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
228 {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
229 {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
230 {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
231 {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
232 {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
233 {"rerunfailedlevels", store_yesno, ITEM(res_job.rerun_failed_levels), 1, ITEM_DEFAULT, 0},
234 {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
235 {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
236 {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
237 {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
238 {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
239 {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
240 {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
241 {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
242 {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0},
243 {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
244 {NULL, NULL, NULL, 0, 0, 0}
249 * name handler value code flags default_value
251 static RES_ITEM fs_items[] = {
252 {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
253 {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
254 {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
255 {"exclude", store_inc, NULL, 1, ITEM_NO_EQUALS, 0},
256 {"ignorefilesetchanges", store_yesno, ITEM(res_fs.ignore_fs_changes), 1, ITEM_DEFAULT, 0},
257 {NULL, NULL, NULL, 0, 0, 0}
260 /* Schedule -- see run_conf.c */
263 * name handler value code flags default_value
265 static RES_ITEM sch_items[] = {
266 {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
267 {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
268 {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
269 {NULL, NULL, NULL, 0, 0, 0}
274 * name handler value code flags default_value
276 static RES_ITEM pool_items[] = {
277 {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
278 {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
279 {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
280 {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
281 {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
282 {"usecatalog", store_yesno, ITEM(res_pool.use_catalog), 1, ITEM_DEFAULT, 1},
283 {"usevolumeonce", store_yesno, ITEM(res_pool.use_volume_once), 1, 0, 0},
284 {"purgeoldestvolume", store_yesno, ITEM(res_pool.purge_oldest_volume), 1, 0, 0},
285 {"recycleoldestvolume", store_yesno, ITEM(res_pool.recycle_oldest_volume), 1, 0, 0},
286 {"recyclecurrentvolume", store_yesno, ITEM(res_pool.recycle_current_volume), 1, 0, 0},
287 {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0},
288 {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
289 {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
290 {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
291 {"acceptanyvolume", store_yesno, ITEM(res_pool.accept_any_volume), 1, ITEM_DEFAULT, 1},
292 {"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
293 {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
294 {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration),0, 0, 0},
295 {"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
296 {"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
297 {NULL, NULL, NULL, 0, 0, 0}
302 * name handler value code flags default_value
304 static RES_ITEM counter_items[] = {
305 {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
306 {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
307 {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
308 {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
309 {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
310 {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
311 {NULL, NULL, NULL, 0, 0, 0}
315 /* Message resource */
316 extern RES_ITEM msgs_items[];
319 * This is the master resource definition.
320 * It must have one item for each of the resources.
322 * NOTE!!! keep it in the same order as the R_codes
323 * or eliminate all resources[rindex].name
325 * name items rcode res_head
327 RES_TABLE resources[] = {
328 {"director", dir_items, R_DIRECTOR},
329 {"client", cli_items, R_CLIENT},
330 {"job", job_items, R_JOB},
331 {"storage", store_items, R_STORAGE},
332 {"catalog", cat_items, R_CATALOG},
333 {"schedule", sch_items, R_SCHEDULE},
334 {"fileset", fs_items, R_FILESET},
335 {"pool", pool_items, R_POOL},
336 {"messages", msgs_items, R_MSGS},
337 {"counter", counter_items, R_COUNTER},
338 {"console", con_items, R_CONSOLE},
339 {"jobdefs", job_items, R_JOBDEFS},
344 /* Keywords (RHS) permitted in Job Level records
346 * level_name level job_type
348 struct s_jl joblevels[] = {
349 {"Full", L_FULL, JT_BACKUP},
350 {"Base", L_BASE, JT_BACKUP},
351 {"Incremental", L_INCREMENTAL, JT_BACKUP},
352 {"Differential", L_DIFFERENTIAL, JT_BACKUP},
353 {"Since", L_SINCE, JT_BACKUP},
354 {"Catalog", L_VERIFY_CATALOG, JT_VERIFY},
355 {"InitCatalog", L_VERIFY_INIT, JT_VERIFY},
356 {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
357 {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
358 {"Data", L_VERIFY_DATA, JT_VERIFY},
359 {" ", L_NONE, JT_ADMIN},
360 {" ", L_NONE, JT_RESTORE},
364 /* Keywords (RHS) permitted in Job type records
368 struct s_jt jobtypes[] = {
369 {"backup", JT_BACKUP},
371 {"verify", JT_VERIFY},
372 {"restore", JT_RESTORE},
376 #ifdef old_deprecated_code
378 /* Keywords (RHS) permitted in Backup and Verify records */
379 static struct s_kw BakVerFields[] = {
386 /* Keywords (RHS) permitted in Restore records */
387 static struct s_kw RestoreFields[] = {
390 {"jobid", 'J'}, /* JobId to restore */
391 {"where", 'W'}, /* root of restore */
392 {"replace", 'R'}, /* replacement options */
393 {"bootstrap", 'B'}, /* bootstrap file */
398 /* Options permitted in Restore replace= */
399 struct s_kw ReplaceOptions[] = {
400 {"always", REPLACE_ALWAYS},
401 {"ifnewer", REPLACE_IFNEWER},
402 {"ifolder", REPLACE_IFOLDER},
403 {"never", REPLACE_NEVER},
407 const char *level_to_str(int level)
410 static char level_no[30];
411 const char *str = level_no;
413 bsnprintf(level_no, sizeof(level_no), "%d", level); /* default if not found */
414 for (i=0; joblevels[i].level_name; i++) {
415 if (level == joblevels[i].level) {
416 str = joblevels[i].level_name;
423 /* Dump contents of resource */
424 void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
426 URES *res = (URES *)reshdr;
428 char ed1[100], ed2[100];
431 sendit(sock, "No %s resource defined\n", res_to_str(type));
434 if (type < 0) { /* no recursion */
440 sendit(sock, "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n",
441 reshdr->name, res->res_dir.MaxConcurrentJobs,
442 edit_uint64(res->res_dir.FDConnectTimeout, ed1),
443 edit_uint64(res->res_dir.SDConnectTimeout, ed2));
444 if (res->res_dir.query_file) {
445 sendit(sock, " query_file=%s\n", res->res_dir.query_file);
447 if (res->res_dir.messages) {
448 sendit(sock, " --> ");
449 dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock);
453 sendit(sock, "Console: name=%s SSL=%d\n",
454 res->res_con.hdr.name, res->res_con.enable_ssl);
457 if (res->res_counter.WrapCounter) {
458 sendit(sock, "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n",
459 res->res_counter.hdr.name, res->res_counter.MinValue,
460 res->res_counter.MaxValue, res->res_counter.CurrentValue,
461 res->res_counter.WrapCounter->hdr.name);
463 sendit(sock, "Counter: name=%s min=%d max=%d\n",
464 res->res_counter.hdr.name, res->res_counter.MinValue,
465 res->res_counter.MaxValue);
467 if (res->res_counter.Catalog) {
468 sendit(sock, " --> ");
469 dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock);
474 sendit(sock, "Client: name=%s address=%s FDport=%d MaxJobs=%u\n",
475 res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
476 res->res_client.MaxConcurrentJobs);
477 sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
478 edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
479 edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
480 res->res_client.AutoPrune);
481 if (res->res_client.catalog) {
482 sendit(sock, " --> ");
483 dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
487 sendit(sock, "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
488 " DeviceName=%s MediaType=%s\n",
489 res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
490 res->res_store.MaxConcurrentJobs,
491 res->res_store.dev_name, res->res_store.media_type);
494 sendit(sock, "Catalog: name=%s address=%s DBport=%d db_name=%s\n"
495 " db_user=%s MutliDBConn=%d\n",
496 res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
497 res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user),
498 res->res_cat.mult_db_connections);
502 sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
503 type == R_JOB ? "Job" : "JobDefs",
504 res->res_job.hdr.name, res->res_job.JobType,
505 level_to_str(res->res_job.JobLevel), res->res_job.Priority,
506 res->res_job.MaxConcurrentJobs);
507 sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d\n",
508 res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
509 edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
510 res->res_job.spool_data);
511 if (res->res_job.client) {
512 sendit(sock, " --> ");
513 dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
515 if (res->res_job.fileset) {
516 sendit(sock, " --> ");
517 dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock);
519 if (res->res_job.schedule) {
520 sendit(sock, " --> ");
521 dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock);
523 if (res->res_job.RestoreWhere) {
524 sendit(sock, " --> Where=%s\n", NPRT(res->res_job.RestoreWhere));
526 if (res->res_job.RestoreBootstrap) {
527 sendit(sock, " --> Bootstrap=%s\n", NPRT(res->res_job.RestoreBootstrap));
529 if (res->res_job.RunBeforeJob) {
530 sendit(sock, " --> RunBefore=%s\n", NPRT(res->res_job.RunBeforeJob));
532 if (res->res_job.RunAfterJob) {
533 sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
535 if (res->res_job.RunAfterFailedJob) {
536 sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
538 if (res->res_job.WriteBootstrap) {
539 sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
541 if (res->res_job.storage[0]) {
542 sendit(sock, " --> ");
544 // dump_resource(-R_STORAGE, (RES *)res->res_job.storage, sendit, sock);
546 if (res->res_job.pool) {
547 sendit(sock, " --> ");
548 dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
550 if (res->res_job.full_pool) {
551 sendit(sock, " --> ");
552 dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
554 if (res->res_job.inc_pool) {
555 sendit(sock, " --> ");
556 dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
558 if (res->res_job.dif_pool) {
559 sendit(sock, " --> ");
560 dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
562 if (res->res_job.verify_job) {
563 sendit(sock, " --> ");
564 dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
567 if (res->res_job.messages) {
568 sendit(sock, " --> ");
569 dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock);
575 sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
576 for (i=0; i<res->res_fs.num_includes; i++) {
577 INCEXE *incexe = res->res_fs.include_items[i];
578 for (j=0; j<incexe->num_opts; j++) {
579 FOPTS *fo = incexe->opts_list[j];
580 sendit(sock, " O %s\n", fo->opts);
581 for (k=0; k<fo->regex.size(); k++) {
582 sendit(sock, " R %s\n", fo->regex.get(k));
584 for (k=0; k<fo->wild.size(); k++) {
585 sendit(sock, " W %s\n", fo->wild.get(k));
587 for (k=0; k<fo->base.size(); k++) {
588 sendit(sock, " B %s\n", fo->base.get(k));
590 for (k=0; k<fo->fstype.size(); k++) {
591 sendit(sock, " X %s\n", fo->fstype.get(k));
594 sendit(sock, " D %s\n", fo->reader);
597 sendit(sock, " T %s\n", fo->writer);
599 sendit(sock, " N\n");
601 for (j=0; j<incexe->name_list.size(); j++) {
602 sendit(sock, " I %s\n", incexe->name_list.get(j));
604 if (incexe->name_list.size()) {
605 sendit(sock, " N\n");
609 for (i=0; i<res->res_fs.num_excludes; i++) {
610 INCEXE *incexe = res->res_fs.exclude_items[i];
611 for (j=0; j<incexe->name_list.size(); j++) {
612 sendit(sock, " E %s\n", incexe->name_list.get(j));
614 if (incexe->name_list.size()) {
615 sendit(sock, " N\n");
621 if (res->res_sch.run) {
623 RUN *run = res->res_sch.run;
624 char buf[1000], num[30];
625 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
630 sendit(sock, " --> Run Level=%s\n", level_to_str(run->level));
631 bstrncpy(buf, " hour=", sizeof(buf));
632 for (i=0; i<24; i++) {
633 if (bit_is_set(i, run->hour)) {
634 bsnprintf(num, sizeof(num), "%d ", i);
635 bstrncat(buf, num, sizeof(buf));
638 bstrncat(buf, "\n", sizeof(buf));
640 bstrncpy(buf, " mday=", sizeof(buf));
641 for (i=0; i<31; i++) {
642 if (bit_is_set(i, run->mday)) {
643 bsnprintf(num, sizeof(num), "%d ", i);
644 bstrncat(buf, num, sizeof(buf));
647 bstrncat(buf, "\n", sizeof(buf));
649 bstrncpy(buf, " month=", sizeof(buf));
650 for (i=0; i<12; i++) {
651 if (bit_is_set(i, run->month)) {
652 bsnprintf(num, sizeof(num), "%d ", i);
653 bstrncat(buf, num, sizeof(buf));
656 bstrncat(buf, "\n", sizeof(buf));
658 bstrncpy(buf, " wday=", sizeof(buf));
659 for (i=0; i<7; i++) {
660 if (bit_is_set(i, run->wday)) {
661 bsnprintf(num, sizeof(num), "%d ", i);
662 bstrncat(buf, num, sizeof(buf));
665 bstrncat(buf, "\n", sizeof(buf));
667 bstrncpy(buf, " wom=", sizeof(buf));
668 for (i=0; i<5; i++) {
669 if (bit_is_set(i, run->wom)) {
670 bsnprintf(num, sizeof(num), "%d ", i);
671 bstrncat(buf, num, sizeof(buf));
674 bstrncat(buf, "\n", sizeof(buf));
676 bstrncpy(buf, " woy=", sizeof(buf));
677 for (i=0; i<54; i++) {
678 if (bit_is_set(i, run->woy)) {
679 bsnprintf(num, sizeof(num), "%d ", i);
680 bstrncat(buf, num, sizeof(buf));
683 bstrncat(buf, "\n", sizeof(buf));
685 sendit(sock, " mins=%d\n", run->minute);
687 sendit(sock, " --> ");
688 dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
691 sendit(sock, " --> ");
692 dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
695 sendit(sock, " --> ");
696 dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock);
698 /* If another Run record is chained in, go print it */
704 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
708 sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
709 res->res_pool.pool_type);
710 sendit(sock, " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n",
711 res->res_pool.use_catalog, res->res_pool.use_volume_once,
712 res->res_pool.accept_any_volume, res->res_pool.catalog_files);
713 sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
714 res->res_pool.max_volumes, res->res_pool.AutoPrune,
715 edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
716 sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
717 edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
718 res->res_pool.Recycle,
719 NPRT(res->res_pool.label_format));
720 sendit(sock, " CleaningPrefix=%s\n",
721 NPRT(res->res_pool.cleaning_prefix));
722 sendit(sock, " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n",
723 res->res_pool.recycle_oldest_volume,
724 res->res_pool.purge_oldest_volume,
725 res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
728 sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name);
729 if (res->res_msgs.mail_cmd)
730 sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd);
731 if (res->res_msgs.operator_cmd)
732 sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd);
735 sendit(sock, "Unknown resource type %d in dump_resource.\n", type);
738 if (recurse && res->res_dir.hdr.next) {
739 dump_resource(type, res->res_dir.hdr.next, sendit, sock);
744 * Free all the members of an INCEXE structure
746 static void free_incexe(INCEXE *incexe)
748 incexe->name_list.destroy();
749 for (int i=0; i<incexe->num_opts; i++) {
750 FOPTS *fopt = incexe->opts_list[i];
751 fopt->regex.destroy();
752 fopt->wild.destroy();
753 fopt->base.destroy();
754 fopt->fstype.destroy();
763 if (incexe->opts_list) {
764 free(incexe->opts_list);
770 * Free memory of resource -- called when daemon terminates.
771 * NB, we don't need to worry about freeing any references
772 * to other resources as they will be freed when that
773 * resource chain is traversed. Mainly we worry about freeing
774 * allocated strings (names).
776 void free_resource(RES *sres, int type)
779 RES *nres; /* next resource if linked */
780 URES *res = (URES *)sres;
785 /* common stuff -- free the resource name and description */
786 nres = (RES *)res->res_dir.hdr.next;
787 if (res->res_dir.hdr.name) {
788 free(res->res_dir.hdr.name);
790 if (res->res_dir.hdr.desc) {
791 free(res->res_dir.hdr.desc);
796 if (res->res_dir.working_directory) {
797 free(res->res_dir.working_directory);
799 if (res->res_dir.scripts_directory) {
800 free((char *)res->res_dir.scripts_directory);
802 if (res->res_dir.pid_directory) {
803 free(res->res_dir.pid_directory);
805 if (res->res_dir.subsys_directory) {
806 free(res->res_dir.subsys_directory);
808 if (res->res_dir.password) {
809 free(res->res_dir.password);
811 if (res->res_dir.query_file) {
812 free(res->res_dir.query_file);
814 if (res->res_dir.DIRaddrs) {
815 free_addresses(res->res_dir.DIRaddrs);
821 if (res->res_con.password) {
822 free(res->res_con.password);
824 for (int i=0; i<Num_ACL; i++) {
825 if (res->res_con.ACL_lists[i]) {
826 delete res->res_con.ACL_lists[i];
827 res->res_con.ACL_lists[i] = NULL;
832 if (res->res_client.address) {
833 free(res->res_client.address);
835 if (res->res_client.password) {
836 free(res->res_client.password);
840 if (res->res_store.address) {
841 free(res->res_store.address);
843 if (res->res_store.password) {
844 free(res->res_store.password);
846 if (res->res_store.media_type) {
847 free(res->res_store.media_type);
849 if (res->res_store.dev_name) {
850 free(res->res_store.dev_name);
854 if (res->res_cat.db_address) {
855 free(res->res_cat.db_address);
857 if (res->res_cat.db_socket) {
858 free(res->res_cat.db_socket);
860 if (res->res_cat.db_user) {
861 free(res->res_cat.db_user);
863 if (res->res_cat.db_name) {
864 free(res->res_cat.db_name);
866 if (res->res_cat.db_password) {
867 free(res->res_cat.db_password);
871 if ((num=res->res_fs.num_includes)) {
873 free_incexe(res->res_fs.include_items[num]);
875 free(res->res_fs.include_items);
877 res->res_fs.num_includes = 0;
878 if ((num=res->res_fs.num_excludes)) {
880 free_incexe(res->res_fs.exclude_items[num]);
882 free(res->res_fs.exclude_items);
884 res->res_fs.num_excludes = 0;
887 if (res->res_pool.pool_type) {
888 free(res->res_pool.pool_type);
890 if (res->res_pool.label_format) {
891 free(res->res_pool.label_format);
893 if (res->res_pool.cleaning_prefix) {
894 free(res->res_pool.cleaning_prefix);
898 if (res->res_sch.run) {
900 nrun = res->res_sch.run;
910 if (res->res_job.RestoreWhere) {
911 free(res->res_job.RestoreWhere);
913 if (res->res_job.RestoreBootstrap) {
914 free(res->res_job.RestoreBootstrap);
916 if (res->res_job.WriteBootstrap) {
917 free(res->res_job.WriteBootstrap);
919 if (res->res_job.RunBeforeJob) {
920 free(res->res_job.RunBeforeJob);
922 if (res->res_job.RunAfterJob) {
923 free(res->res_job.RunAfterJob);
925 if (res->res_job.RunAfterFailedJob) {
926 free(res->res_job.RunAfterFailedJob);
928 if (res->res_job.ClientRunBeforeJob) {
929 free(res->res_job.ClientRunBeforeJob);
931 if (res->res_job.ClientRunAfterJob) {
932 free(res->res_job.ClientRunAfterJob);
934 for (int i=0; i < MAX_STORE; i++) {
935 if (res->res_job.storage[i]) {
936 delete (alist *)res->res_job.storage[i];
941 if (res->res_msgs.mail_cmd) {
942 free(res->res_msgs.mail_cmd);
944 if (res->res_msgs.operator_cmd) {
945 free(res->res_msgs.operator_cmd);
947 free_msgs_res((MSGS *)res); /* free message resource */
951 printf("Unknown resource type %d in free_resource.\n", type);
953 /* Common stuff again -- free the resource, recurse to next one */
958 free_resource(nres, type);
963 * Save the new resource by chaining it into the head list for
964 * the resource. If this is pass 2, we update any resource
965 * pointers because they may not have been defined until
968 void save_resource(int type, RES_ITEM *items, int pass)
971 int rindex = type - r_first;
975 /* Check Job requirements after applying JobDefs */
976 if (type != R_JOB && type != R_JOBDEFS) {
978 * Ensure that all required items are present
980 for (i=0; items[i].name; i++) {
981 if (items[i].flags & ITEM_REQUIRED) {
982 if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
983 Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
984 items[i].name, resources[rindex]);
987 /* If this triggers, take a look at lib/parse_conf.h */
988 if (i >= MAX_RES_ITEMS) {
989 Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
995 * During pass 2 in each "store" routine, we looked up pointers
996 * to all the resources referrenced in the current resource, now we
997 * must copy their addresses from the static record to the allocated
1002 /* Resources not containing a resource */
1011 /* Resources containing another resource */
1013 if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
1014 Emsg1(M_ERROR_TERM, 0, "Cannot find Director resource %s\n", res_all.res_dir.hdr.name);
1016 res->res_dir.messages = res_all.res_dir.messages;
1020 if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
1021 Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
1022 res_all.res_dir.hdr.name);
1024 res->res_job.messages = res_all.res_job.messages;
1025 res->res_job.schedule = res_all.res_job.schedule;
1026 res->res_job.client = res_all.res_job.client;
1027 res->res_job.fileset = res_all.res_job.fileset;
1028 for (int i=0; i < MAX_STORE; i++) {
1029 res->res_job.storage[i] = res_all.res_job.storage[i];
1031 res->res_job.pool = res_all.res_job.pool;
1032 res->res_job.full_pool = res_all.res_job.full_pool;
1033 res->res_job.inc_pool = res_all.res_job.inc_pool;
1034 res->res_job.dif_pool = res_all.res_job.dif_pool;
1035 res->res_job.verify_job = res_all.res_job.verify_job;
1036 res->res_job.jobdefs = res_all.res_job.jobdefs;
1039 if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
1040 Emsg1(M_ERROR_TERM, 0, "Cannot find Counter resource %s\n", res_all.res_counter.hdr.name);
1042 res->res_counter.Catalog = res_all.res_counter.Catalog;
1043 res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
1047 if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
1048 Emsg1(M_ERROR_TERM, 0, "Cannot find Client resource %s\n", res_all.res_client.hdr.name);
1050 res->res_client.catalog = res_all.res_client.catalog;
1054 * Schedule is a bit different in that it contains a RUN record
1055 * chain which isn't a "named" resource. This chain was linked
1056 * in by run_conf.c during pass 2, so here we jam the pointer
1057 * into the Schedule resource.
1059 if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
1060 Emsg1(M_ERROR_TERM, 0, "Cannot find Schedule resource %s\n", res_all.res_client.hdr.name);
1062 res->res_sch.run = res_all.res_sch.run;
1065 Emsg1(M_ERROR, 0, "Unknown resource type %d in save_resource.\n", type);
1069 /* Note, the resource name was already saved during pass 1,
1070 * so here, we can just release it.
1072 if (res_all.res_dir.hdr.name) {
1073 free(res_all.res_dir.hdr.name);
1074 res_all.res_dir.hdr.name = NULL;
1076 if (res_all.res_dir.hdr.desc) {
1077 free(res_all.res_dir.hdr.desc);
1078 res_all.res_dir.hdr.desc = NULL;
1084 * The following code is only executed during pass 1
1088 size = sizeof(DIRRES);
1091 size = sizeof(CONRES);
1094 size =sizeof(CLIENT);
1097 size = sizeof(STORE);
1107 size = sizeof(FILESET);
1110 size = sizeof(SCHED);
1113 size = sizeof(POOL);
1116 size = sizeof(MSGS);
1119 size = sizeof(COUNTER);
1122 printf("Unknown resource type %d in save_resrouce.\n", type);
1129 res = (URES *)malloc(size);
1130 memcpy(res, &res_all, size);
1131 if (!res_head[rindex]) {
1132 res_head[rindex] = (RES *)res; /* store first entry */
1133 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
1134 res->res_dir.hdr.name, rindex);
1137 /* Add new res to end of chain */
1138 for (next=res_head[rindex]; next->next; next=next->next) {
1139 if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
1140 Emsg2(M_ERROR_TERM, 0,
1141 _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
1142 resources[rindex].name, res->res_dir.hdr.name);
1145 next->next = (RES *)res;
1146 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
1147 res->res_dir.hdr.name, rindex, pass);
1153 * Store JobType (backup, verify, restore)
1156 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
1160 token = lex_get_token(lc, T_NAME);
1161 /* Store the type both pass 1 and pass 2 */
1162 for (i=0; jobtypes[i].type_name; i++) {
1163 if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
1164 *(int *)(item->value) = jobtypes[i].job_type;
1170 scan_err1(lc, "Expected a Job Type keyword, got: %s", lc->str);
1173 set_bit(index, res_all.hdr.item_present);
1177 * Store Job Level (Full, Incremental, ...)
1180 void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
1184 token = lex_get_token(lc, T_NAME);
1185 /* Store the level pass 2 so that type is defined */
1186 for (i=0; joblevels[i].level_name; i++) {
1187 if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
1188 *(int *)(item->value) = joblevels[i].level;
1194 scan_err1(lc, "Expected a Job Level keyword, got: %s", lc->str);
1197 set_bit(index, res_all.hdr.item_present);
1200 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
1203 token = lex_get_token(lc, T_NAME);
1204 /* Scan Replacement options */
1205 for (i=0; ReplaceOptions[i].name; i++) {
1206 if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
1207 *(int *)(item->value) = ReplaceOptions[i].token;
1213 scan_err1(lc, "Expected a Restore replacement option, got: %s", lc->str);
1216 set_bit(index, res_all.hdr.item_present);
1220 * Store ACL (access control list)
1223 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
1228 token = lex_get_token(lc, T_NAME);
1230 if (((alist **)item->value)[item->code] == NULL) {
1231 ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
1232 Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
1234 ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
1235 Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
1237 token = lex_get_token(lc, T_ALL);
1238 if (token == T_COMMA) {
1239 continue; /* get another ACL */
1243 set_bit(index, res_all.hdr.item_present);