2 * Main configuration file parser for Bacula Directors,
3 * some parts may be split into separate files such as
4 * the schedule configuration (run_config.c).
6 * Note, the configuration file parser consists of three parts
8 * 1. The generic lexical scanner in lib/lex.c and lib/lex.h
10 * 2. The generic config scanner in lib/parse_config.c and
12 * These files contain the parser code, some utility
13 * routines, and the common store routines (name, int,
16 * 3. The daemon specific file, which contains the Resource
17 * definitions as well as any specific store routines
18 * for the resource records.
20 * Kern Sibbald, January MM
25 Copyright (C) 2000-2005 Kern Sibbald
27 This program is free software; you can redistribute it and/or
28 modify it under the terms of the GNU General Public License as
29 published by the Free Software Foundation; either version 2 of
30 the License, or (at your option) any later version.
32 This program is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 General Public License for more details.
37 You should have received a copy of the GNU General Public
38 License along with this program; if not, write to the Free
39 Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
47 /* Define the first and last resource ID record
48 * types. Note, these should be unique for each
49 * daemon though not a requirement.
51 int r_first = R_FIRST;
53 static RES *sres_head[R_LAST - R_FIRST + 1];
54 RES **res_head = sres_head;
56 /* Imported subroutines */
57 extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
58 extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
59 extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
62 /* Forward referenced subroutines */
64 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
65 void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
66 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
67 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
68 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
71 /* We build the current resource here as we are
72 * scanning the resource configuration definition,
73 * then move it to allocated memory when the resource
77 int res_all_size = sizeof(res_all);
80 /* Definition of records permitted within each
81 * resource with the routine to process the record
82 * information. NOTE! quoted names must be in lower case.
87 * name handler value code flags default_value
89 static RES_ITEM dir_items[] = {
90 {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
91 {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
92 {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
93 {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
94 {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
95 {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
96 {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
97 {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
98 {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
99 {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
100 {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
101 {"requiressl", store_yesno, ITEM(res_dir.require_ssl), 1, ITEM_DEFAULT, 0},
102 {"enablessl", store_yesno, ITEM(res_dir.enable_ssl), 1, ITEM_DEFAULT, 0},
103 {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
104 {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
105 {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
106 {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
107 {NULL, NULL, NULL, 0, 0, 0}
113 * name handler value code flags default_value
115 static RES_ITEM con_items[] = {
116 {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
117 {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
118 {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
119 {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
120 {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
121 {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
122 {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
123 {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
124 {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
125 {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
126 {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
127 {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
128 {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
129 {NULL, NULL, NULL, 0, 0, 0}
134 * Client or File daemon resource
136 * name handler value code flags default_value
139 static RES_ITEM cli_items[] = {
140 {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
141 {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
142 {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
143 {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
144 {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
145 {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
146 {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
147 {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
148 {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
149 {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
150 {"autoprune", store_yesno, ITEM(res_client.AutoPrune), 1, ITEM_DEFAULT, 1},
151 {"enablessl", store_yesno, ITEM(res_client.enable_ssl), 1, ITEM_DEFAULT, 0},
152 {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
153 {NULL, NULL, NULL, 0, 0, 0}
156 /* Storage daemon resource
158 * name handler value code flags default_value
160 static RES_ITEM store_items[] = {
161 {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
162 {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
163 {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
164 {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
165 {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
166 {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
167 {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
168 {"device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0},
169 {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
170 {"autochanger", store_yesno, ITEM(res_store.autochanger), 1, ITEM_DEFAULT, 0},
171 {"enablessl", store_yesno, ITEM(res_store.enable_ssl), 1, ITEM_DEFAULT, 0},
172 {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
173 {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
174 {NULL, NULL, NULL, 0, 0, 0}
178 * Catalog Resource Directives
180 * name handler value code flags default_value
182 static RES_ITEM cat_items[] = {
183 {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
184 {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
185 {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
186 {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
187 {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0},
188 /* keep this password as store_str for the moment */
189 {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
190 {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
191 {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
192 {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
193 {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
194 {"multipleconnections", store_yesno, ITEM(res_cat.mult_db_connections), 1, 0, 0},
195 {NULL, NULL, NULL, 0, 0, 0}
199 * Job Resource Directives
201 * name handler value code flags default_value
203 RES_ITEM job_items[] = {
204 {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
205 {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
206 {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
207 {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
208 {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
209 {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0},
210 {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
211 {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
212 {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
213 {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
214 {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
215 {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
216 {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
217 {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
218 {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
219 {"run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0},
220 {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
221 {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
222 {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
223 {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
224 {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
225 {"fullmaxwaittime", store_time, ITEM(res_job.FullMaxWaitTime), 0, 0, 0},
226 {"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxWaitTime), 0, 0, 0},
227 {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxWaitTime), 0, 0, 0},
228 {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
229 {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
230 {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
231 {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
232 {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
233 {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
234 {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
235 {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
236 {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
237 {"rerunfailedlevels", store_yesno, ITEM(res_job.rerun_failed_levels), 1, ITEM_DEFAULT, 0},
238 {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
239 {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
240 {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
241 {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
242 {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
243 {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
244 {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
245 {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
246 {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0},
247 {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
248 {"writepartafterjob", store_yesno, ITEM(res_job.write_part_after_job), 1, ITEM_DEFAULT, 0},
249 {NULL, NULL, NULL, 0, 0, 0}
254 * name handler value code flags default_value
256 static RES_ITEM fs_items[] = {
257 {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
258 {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
259 {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
260 {"exclude", store_inc, NULL, 1, ITEM_NO_EQUALS, 0},
261 {"ignorefilesetchanges", store_yesno, ITEM(res_fs.ignore_fs_changes), 1, ITEM_DEFAULT, 0},
262 {NULL, NULL, NULL, 0, 0, 0}
265 /* Schedule -- see run_conf.c */
268 * name handler value code flags default_value
270 static RES_ITEM sch_items[] = {
271 {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
272 {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
273 {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
274 {NULL, NULL, NULL, 0, 0, 0}
279 * name handler value code flags default_value
281 static RES_ITEM pool_items[] = {
282 {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
283 {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
284 {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
285 {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
286 {"labeltype", store_label, ITEM(res_pool.LabelType), 0, 0, 0},
287 {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
288 {"usecatalog", store_yesno, ITEM(res_pool.use_catalog), 1, ITEM_DEFAULT, 1},
289 {"usevolumeonce", store_yesno, ITEM(res_pool.use_volume_once),1, 0, 0},
290 {"purgeoldestvolume", store_yesno, ITEM(res_pool.purge_oldest_volume), 1, 0, 0},
291 {"recycleoldestvolume", store_yesno, ITEM(res_pool.recycle_oldest_volume), 1, 0, 0},
292 {"recyclecurrentvolume", store_yesno, ITEM(res_pool.recycle_current_volume), 1, 0, 0},
293 {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0},
294 {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
295 {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
296 {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
297 {"acceptanyvolume", store_yesno, ITEM(res_pool.accept_any_volume), 1, ITEM_DEFAULT, 1},
298 {"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
299 {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
300 {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
301 {"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
302 {"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
303 {NULL, NULL, NULL, 0, 0, 0}
308 * name handler value code flags default_value
310 static RES_ITEM counter_items[] = {
311 {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
312 {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
313 {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
314 {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
315 {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
316 {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
317 {NULL, NULL, NULL, 0, 0, 0}
321 /* Message resource */
322 extern RES_ITEM msgs_items[];
325 * This is the master resource definition.
326 * It must have one item for each of the resources.
328 * NOTE!!! keep it in the same order as the R_codes
329 * or eliminate all resources[rindex].name
331 * name items rcode res_head
333 RES_TABLE resources[] = {
334 {"director", dir_items, R_DIRECTOR},
335 {"client", cli_items, R_CLIENT},
336 {"job", job_items, R_JOB},
337 {"storage", store_items, R_STORAGE},
338 {"catalog", cat_items, R_CATALOG},
339 {"schedule", sch_items, R_SCHEDULE},
340 {"fileset", fs_items, R_FILESET},
341 {"pool", pool_items, R_POOL},
342 {"messages", msgs_items, R_MSGS},
343 {"counter", counter_items, R_COUNTER},
344 {"console", con_items, R_CONSOLE},
345 {"jobdefs", job_items, R_JOBDEFS},
346 {"device", NULL, R_DEVICE}, /* info obtained from SD */
351 /* Keywords (RHS) permitted in Job Level records
353 * level_name level job_type
355 struct s_jl joblevels[] = {
356 {"Full", L_FULL, JT_BACKUP},
357 {"Base", L_BASE, JT_BACKUP},
358 {"Incremental", L_INCREMENTAL, JT_BACKUP},
359 {"Differential", L_DIFFERENTIAL, JT_BACKUP},
360 {"Since", L_SINCE, JT_BACKUP},
361 {"Catalog", L_VERIFY_CATALOG, JT_VERIFY},
362 {"InitCatalog", L_VERIFY_INIT, JT_VERIFY},
363 {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
364 {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
365 {"Data", L_VERIFY_DATA, JT_VERIFY},
366 {" ", L_NONE, JT_ADMIN},
367 {" ", L_NONE, JT_RESTORE},
371 /* Keywords (RHS) permitted in Job type records
375 struct s_jt jobtypes[] = {
376 {"backup", JT_BACKUP},
378 {"verify", JT_VERIFY},
379 {"restore", JT_RESTORE},
384 /* Options permitted in Restore replace= */
385 struct s_kw ReplaceOptions[] = {
386 {"always", REPLACE_ALWAYS},
387 {"ifnewer", REPLACE_IFNEWER},
388 {"ifolder", REPLACE_IFOLDER},
389 {"never", REPLACE_NEVER},
393 const char *level_to_str(int level)
396 static char level_no[30];
397 const char *str = level_no;
399 bsnprintf(level_no, sizeof(level_no), "%c (%d)", level, level); /* default if not found */
400 for (i=0; joblevels[i].level_name; i++) {
401 if (level == joblevels[i].level) {
402 str = joblevels[i].level_name;
409 /* Dump contents of resource */
410 void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
412 URES *res = (URES *)reshdr;
414 char ed1[100], ed2[100];
418 sendit(sock, "No %s resource defined\n", res_to_str(type));
421 if (type < 0) { /* no recursion */
427 sendit(sock, "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n",
428 reshdr->name, res->res_dir.MaxConcurrentJobs,
429 edit_uint64(res->res_dir.FDConnectTimeout, ed1),
430 edit_uint64(res->res_dir.SDConnectTimeout, ed2));
431 if (res->res_dir.query_file) {
432 sendit(sock, " query_file=%s\n", res->res_dir.query_file);
434 if (res->res_dir.messages) {
435 sendit(sock, " --> ");
436 dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock);
440 sendit(sock, "Console: name=%s SSL=%d\n",
441 res->res_con.hdr.name, res->res_con.enable_ssl);
444 if (res->res_counter.WrapCounter) {
445 sendit(sock, "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n",
446 res->res_counter.hdr.name, res->res_counter.MinValue,
447 res->res_counter.MaxValue, res->res_counter.CurrentValue,
448 res->res_counter.WrapCounter->hdr.name);
450 sendit(sock, "Counter: name=%s min=%d max=%d\n",
451 res->res_counter.hdr.name, res->res_counter.MinValue,
452 res->res_counter.MaxValue);
454 if (res->res_counter.Catalog) {
455 sendit(sock, " --> ");
456 dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock);
461 sendit(sock, "Client: name=%s address=%s FDport=%d MaxJobs=%u\n",
462 res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
463 res->res_client.MaxConcurrentJobs);
464 sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
465 edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
466 edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
467 res->res_client.AutoPrune);
468 if (res->res_client.catalog) {
469 sendit(sock, " --> ");
470 dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
476 sendit(sock, "Device: name=%s ok=%d num_writers=%d max_writers=%d\n"
477 " reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%d\n"
478 " poolid=%s volname=%s MediaType=%s\n",
479 dev->hdr.name, dev->found, dev->num_writers, dev->max_writers,
480 dev->reserved, dev->open, dev->append, dev->read, dev->labeled,
481 dev->offline, dev->autochanger,
482 edit_uint64(dev->PoolId, ed1),
483 dev->VolumeName, dev->MediaType);
486 sendit(sock, "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
487 " DeviceName=%s MediaType=%s StorageId=%s\n",
488 res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
489 res->res_store.MaxConcurrentJobs,
490 res->res_store.dev_name(),
491 res->res_store.media_type,
492 edit_int64(res->res_store.StorageId, ed1));
495 sendit(sock, "Catalog: name=%s address=%s DBport=%d db_name=%s\n"
496 " db_user=%s MutliDBConn=%d\n",
497 res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
498 res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user),
499 res->res_cat.mult_db_connections);
503 sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
504 type == R_JOB ? "Job" : "JobDefs",
505 res->res_job.hdr.name, res->res_job.JobType,
506 level_to_str(res->res_job.JobLevel), res->res_job.Priority,
507 res->res_job.MaxConcurrentJobs);
508 sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n",
509 res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
510 edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
511 res->res_job.spool_data, res->res_job.write_part_after_job);
512 if (res->res_job.client) {
513 sendit(sock, " --> ");
514 dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
516 if (res->res_job.fileset) {
517 sendit(sock, " --> ");
518 dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock);
520 if (res->res_job.schedule) {
521 sendit(sock, " --> ");
522 dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock);
524 if (res->res_job.RestoreWhere) {
525 sendit(sock, " --> Where=%s\n", NPRT(res->res_job.RestoreWhere));
527 if (res->res_job.RestoreBootstrap) {
528 sendit(sock, " --> Bootstrap=%s\n", NPRT(res->res_job.RestoreBootstrap));
530 if (res->res_job.RunBeforeJob) {
531 sendit(sock, " --> RunBefore=%s\n", NPRT(res->res_job.RunBeforeJob));
533 if (res->res_job.RunAfterJob) {
534 sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
536 if (res->res_job.RunAfterFailedJob) {
537 sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
539 if (res->res_job.WriteBootstrap) {
540 sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
542 if (res->res_job.storage) {
544 foreach_alist(store, res->res_job.storage) {
545 sendit(sock, " --> ");
546 dump_resource(-R_STORAGE, (RES *)store, sendit, sock);
549 if (res->res_job.pool) {
550 sendit(sock, " --> ");
551 dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
553 if (res->res_job.full_pool) {
554 sendit(sock, " --> ");
555 dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
557 if (res->res_job.inc_pool) {
558 sendit(sock, " --> ");
559 dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
561 if (res->res_job.dif_pool) {
562 sendit(sock, " --> ");
563 dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
565 if (res->res_job.verify_job) {
566 sendit(sock, " --> ");
567 dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
569 if (res->res_job.run_cmds) {
571 foreach_alist(runcmd, res->res_job.run_cmds) {
572 sendit(sock, " --> Run=%s\n", runcmd);
575 if (res->res_job.messages) {
576 sendit(sock, " --> ");
577 dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock);
583 sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
584 for (i=0; i<res->res_fs.num_includes; i++) {
585 INCEXE *incexe = res->res_fs.include_items[i];
586 for (j=0; j<incexe->num_opts; j++) {
587 FOPTS *fo = incexe->opts_list[j];
588 sendit(sock, " O %s\n", fo->opts);
589 for (k=0; k<fo->regex.size(); k++) {
590 sendit(sock, " R %s\n", fo->regex.get(k));
592 for (k=0; k<fo->regexdir.size(); k++) {
593 sendit(sock, " RD %s\n", fo->regexdir.get(k));
595 for (k=0; k<fo->regexfile.size(); k++) {
596 sendit(sock, " RF %s\n", fo->regexfile.get(k));
598 for (k=0; k<fo->wild.size(); k++) {
599 sendit(sock, " W %s\n", fo->wild.get(k));
601 for (k=0; k<fo->wilddir.size(); k++) {
602 sendit(sock, " WD %s\n", fo->wilddir.get(k));
604 for (k=0; k<fo->wildfile.size(); k++) {
605 sendit(sock, " WF %s\n", fo->wildfile.get(k));
607 for (k=0; k<fo->base.size(); k++) {
608 sendit(sock, " B %s\n", fo->base.get(k));
610 for (k=0; k<fo->fstype.size(); k++) {
611 sendit(sock, " X %s\n", fo->fstype.get(k));
614 sendit(sock, " D %s\n", fo->reader);
617 sendit(sock, " T %s\n", fo->writer);
619 sendit(sock, " N\n");
621 for (j=0; j<incexe->name_list.size(); j++) {
622 sendit(sock, " I %s\n", incexe->name_list.get(j));
624 if (incexe->name_list.size()) {
625 sendit(sock, " N\n");
629 for (i=0; i<res->res_fs.num_excludes; i++) {
630 INCEXE *incexe = res->res_fs.exclude_items[i];
631 for (j=0; j<incexe->name_list.size(); j++) {
632 sendit(sock, " E %s\n", incexe->name_list.get(j));
634 if (incexe->name_list.size()) {
635 sendit(sock, " N\n");
641 if (res->res_sch.run) {
643 RUN *run = res->res_sch.run;
644 char buf[1000], num[30];
645 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
650 sendit(sock, " --> Run Level=%s\n", level_to_str(run->level));
651 bstrncpy(buf, " hour=", sizeof(buf));
652 for (i=0; i<24; i++) {
653 if (bit_is_set(i, run->hour)) {
654 bsnprintf(num, sizeof(num), "%d ", i);
655 bstrncat(buf, num, sizeof(buf));
658 bstrncat(buf, "\n", sizeof(buf));
660 bstrncpy(buf, " mday=", sizeof(buf));
661 for (i=0; i<31; i++) {
662 if (bit_is_set(i, run->mday)) {
663 bsnprintf(num, sizeof(num), "%d ", i);
664 bstrncat(buf, num, sizeof(buf));
667 bstrncat(buf, "\n", sizeof(buf));
669 bstrncpy(buf, " month=", sizeof(buf));
670 for (i=0; i<12; i++) {
671 if (bit_is_set(i, run->month)) {
672 bsnprintf(num, sizeof(num), "%d ", i);
673 bstrncat(buf, num, sizeof(buf));
676 bstrncat(buf, "\n", sizeof(buf));
678 bstrncpy(buf, " wday=", sizeof(buf));
679 for (i=0; i<7; i++) {
680 if (bit_is_set(i, run->wday)) {
681 bsnprintf(num, sizeof(num), "%d ", i);
682 bstrncat(buf, num, sizeof(buf));
685 bstrncat(buf, "\n", sizeof(buf));
687 bstrncpy(buf, " wom=", sizeof(buf));
688 for (i=0; i<5; i++) {
689 if (bit_is_set(i, run->wom)) {
690 bsnprintf(num, sizeof(num), "%d ", i);
691 bstrncat(buf, num, sizeof(buf));
694 bstrncat(buf, "\n", sizeof(buf));
696 bstrncpy(buf, " woy=", sizeof(buf));
697 for (i=0; i<54; i++) {
698 if (bit_is_set(i, run->woy)) {
699 bsnprintf(num, sizeof(num), "%d ", i);
700 bstrncat(buf, num, sizeof(buf));
703 bstrncat(buf, "\n", sizeof(buf));
705 sendit(sock, " mins=%d\n", run->minute);
707 sendit(sock, " --> ");
708 dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
711 sendit(sock, " --> ");
712 dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
715 sendit(sock, " --> ");
716 dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock);
718 /* If another Run record is chained in, go print it */
724 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
728 sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
729 res->res_pool.pool_type);
730 sendit(sock, " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n",
731 res->res_pool.use_catalog, res->res_pool.use_volume_once,
732 res->res_pool.accept_any_volume, res->res_pool.catalog_files);
733 sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
734 res->res_pool.max_volumes, res->res_pool.AutoPrune,
735 edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
736 sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
737 edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
738 res->res_pool.Recycle,
739 NPRT(res->res_pool.label_format));
740 sendit(sock, " CleaningPrefix=%s LabelType=%d\n",
741 NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType);
742 sendit(sock, " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n",
743 res->res_pool.recycle_oldest_volume,
744 res->res_pool.purge_oldest_volume,
745 res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
748 sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name);
749 if (res->res_msgs.mail_cmd)
750 sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd);
751 if (res->res_msgs.operator_cmd)
752 sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd);
755 sendit(sock, "Unknown resource type %d in dump_resource.\n", type);
758 if (recurse && res->res_dir.hdr.next) {
759 dump_resource(type, res->res_dir.hdr.next, sendit, sock);
764 * Free all the members of an INCEXE structure
766 static void free_incexe(INCEXE *incexe)
768 incexe->name_list.destroy();
769 for (int i=0; i<incexe->num_opts; i++) {
770 FOPTS *fopt = incexe->opts_list[i];
771 fopt->regex.destroy();
772 fopt->regexdir.destroy();
773 fopt->regexfile.destroy();
774 fopt->wild.destroy();
775 fopt->wilddir.destroy();
776 fopt->wildfile.destroy();
777 fopt->base.destroy();
778 fopt->fstype.destroy();
787 if (incexe->opts_list) {
788 free(incexe->opts_list);
794 * Free memory of resource -- called when daemon terminates.
795 * NB, we don't need to worry about freeing any references
796 * to other resources as they will be freed when that
797 * resource chain is traversed. Mainly we worry about freeing
798 * allocated strings (names).
800 void free_resource(RES *sres, int type)
803 RES *nres; /* next resource if linked */
804 URES *res = (URES *)sres;
809 /* common stuff -- free the resource name and description */
810 nres = (RES *)res->res_dir.hdr.next;
811 if (res->res_dir.hdr.name) {
812 free(res->res_dir.hdr.name);
814 if (res->res_dir.hdr.desc) {
815 free(res->res_dir.hdr.desc);
820 if (res->res_dir.working_directory) {
821 free(res->res_dir.working_directory);
823 if (res->res_dir.scripts_directory) {
824 free((char *)res->res_dir.scripts_directory);
826 if (res->res_dir.pid_directory) {
827 free(res->res_dir.pid_directory);
829 if (res->res_dir.subsys_directory) {
830 free(res->res_dir.subsys_directory);
832 if (res->res_dir.password) {
833 free(res->res_dir.password);
835 if (res->res_dir.query_file) {
836 free(res->res_dir.query_file);
838 if (res->res_dir.DIRaddrs) {
839 free_addresses(res->res_dir.DIRaddrs);
846 if (res->res_con.password) {
847 free(res->res_con.password);
849 for (int i=0; i<Num_ACL; i++) {
850 if (res->res_con.ACL_lists[i]) {
851 delete res->res_con.ACL_lists[i];
852 res->res_con.ACL_lists[i] = NULL;
857 if (res->res_client.address) {
858 free(res->res_client.address);
860 if (res->res_client.password) {
861 free(res->res_client.password);
865 if (res->res_store.address) {
866 free(res->res_store.address);
868 if (res->res_store.password) {
869 free(res->res_store.password);
871 if (res->res_store.media_type) {
872 free(res->res_store.media_type);
874 if (res->res_store.device) {
875 delete res->res_store.device;
879 if (res->res_cat.db_address) {
880 free(res->res_cat.db_address);
882 if (res->res_cat.db_socket) {
883 free(res->res_cat.db_socket);
885 if (res->res_cat.db_user) {
886 free(res->res_cat.db_user);
888 if (res->res_cat.db_name) {
889 free(res->res_cat.db_name);
891 if (res->res_cat.db_password) {
892 free(res->res_cat.db_password);
896 if ((num=res->res_fs.num_includes)) {
898 free_incexe(res->res_fs.include_items[num]);
900 free(res->res_fs.include_items);
902 res->res_fs.num_includes = 0;
903 if ((num=res->res_fs.num_excludes)) {
905 free_incexe(res->res_fs.exclude_items[num]);
907 free(res->res_fs.exclude_items);
909 res->res_fs.num_excludes = 0;
912 if (res->res_pool.pool_type) {
913 free(res->res_pool.pool_type);
915 if (res->res_pool.label_format) {
916 free(res->res_pool.label_format);
918 if (res->res_pool.cleaning_prefix) {
919 free(res->res_pool.cleaning_prefix);
923 if (res->res_sch.run) {
925 nrun = res->res_sch.run;
935 if (res->res_job.RestoreWhere) {
936 free(res->res_job.RestoreWhere);
938 if (res->res_job.RestoreBootstrap) {
939 free(res->res_job.RestoreBootstrap);
941 if (res->res_job.WriteBootstrap) {
942 free(res->res_job.WriteBootstrap);
944 if (res->res_job.RunBeforeJob) {
945 free(res->res_job.RunBeforeJob);
947 if (res->res_job.RunAfterJob) {
948 free(res->res_job.RunAfterJob);
950 if (res->res_job.RunAfterFailedJob) {
951 free(res->res_job.RunAfterFailedJob);
953 if (res->res_job.ClientRunBeforeJob) {
954 free(res->res_job.ClientRunBeforeJob);
956 if (res->res_job.ClientRunAfterJob) {
957 free(res->res_job.ClientRunAfterJob);
959 if (res->res_job.run_cmds) {
960 delete res->res_job.run_cmds;
962 if (res->res_job.storage) {
963 delete res->res_job.storage;
967 if (res->res_msgs.mail_cmd) {
968 free(res->res_msgs.mail_cmd);
970 if (res->res_msgs.operator_cmd) {
971 free(res->res_msgs.operator_cmd);
973 free_msgs_res((MSGS *)res); /* free message resource */
977 printf("Unknown resource type %d in free_resource.\n", type);
979 /* Common stuff again -- free the resource, recurse to next one */
984 free_resource(nres, type);
989 * Save the new resource by chaining it into the head list for
990 * the resource. If this is pass 2, we update any resource
991 * pointers because they may not have been defined until
994 void save_resource(int type, RES_ITEM *items, int pass)
997 int rindex = type - r_first;
1001 /* Check Job requirements after applying JobDefs */
1002 if (type != R_JOB && type != R_JOBDEFS) {
1004 * Ensure that all required items are present
1006 for (i=0; items[i].name; i++) {
1007 if (items[i].flags & ITEM_REQUIRED) {
1008 if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
1009 Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
1010 items[i].name, resources[rindex]);
1013 /* If this triggers, take a look at lib/parse_conf.h */
1014 if (i >= MAX_RES_ITEMS) {
1015 Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
1021 * During pass 2 in each "store" routine, we looked up pointers
1022 * to all the resources referrenced in the current resource, now we
1023 * must copy their addresses from the static record to the allocated
1028 /* Resources not containing a resource */
1037 /* Resources containing another resource or alist */
1039 if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
1040 Emsg1(M_ERROR_TERM, 0, "Cannot find Director resource %s\n", res_all.res_dir.hdr.name);
1042 res->res_dir.messages = res_all.res_dir.messages;
1045 if ((res = (URES *)GetResWithName(type, res_all.res_store.hdr.name)) == NULL) {
1046 Emsg1(M_ERROR_TERM, 0, "Cannot find Storage resource %s\n",
1047 res_all.res_dir.hdr.name);
1049 /* we must explicitly copy the device alist pointer */
1050 res->res_store.device = res_all.res_store.device;
1054 if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
1055 Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
1056 res_all.res_dir.hdr.name);
1058 res->res_job.messages = res_all.res_job.messages;
1059 res->res_job.schedule = res_all.res_job.schedule;
1060 res->res_job.client = res_all.res_job.client;
1061 res->res_job.fileset = res_all.res_job.fileset;
1062 res->res_job.storage = res_all.res_job.storage;
1063 res->res_job.pool = res_all.res_job.pool;
1064 res->res_job.full_pool = res_all.res_job.full_pool;
1065 res->res_job.inc_pool = res_all.res_job.inc_pool;
1066 res->res_job.dif_pool = res_all.res_job.dif_pool;
1067 res->res_job.verify_job = res_all.res_job.verify_job;
1068 res->res_job.jobdefs = res_all.res_job.jobdefs;
1069 res->res_job.run_cmds = res_all.res_job.run_cmds;
1072 if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
1073 Emsg1(M_ERROR_TERM, 0, "Cannot find Counter resource %s\n", res_all.res_counter.hdr.name);
1075 res->res_counter.Catalog = res_all.res_counter.Catalog;
1076 res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
1080 if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
1081 Emsg1(M_ERROR_TERM, 0, "Cannot find Client resource %s\n", res_all.res_client.hdr.name);
1083 res->res_client.catalog = res_all.res_client.catalog;
1087 * Schedule is a bit different in that it contains a RUN record
1088 * chain which isn't a "named" resource. This chain was linked
1089 * in by run_conf.c during pass 2, so here we jam the pointer
1090 * into the Schedule resource.
1092 if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
1093 Emsg1(M_ERROR_TERM, 0, "Cannot find Schedule resource %s\n", res_all.res_client.hdr.name);
1095 res->res_sch.run = res_all.res_sch.run;
1098 Emsg1(M_ERROR, 0, "Unknown resource type %d in save_resource.\n", type);
1102 /* Note, the resource name was already saved during pass 1,
1103 * so here, we can just release it.
1105 if (res_all.res_dir.hdr.name) {
1106 free(res_all.res_dir.hdr.name);
1107 res_all.res_dir.hdr.name = NULL;
1109 if (res_all.res_dir.hdr.desc) {
1110 free(res_all.res_dir.hdr.desc);
1111 res_all.res_dir.hdr.desc = NULL;
1117 * The following code is only executed during pass 1
1121 size = sizeof(DIRRES);
1124 size = sizeof(CONRES);
1127 size =sizeof(CLIENT);
1130 size = sizeof(STORE);
1140 size = sizeof(FILESET);
1143 size = sizeof(SCHED);
1146 size = sizeof(POOL);
1149 size = sizeof(MSGS);
1152 size = sizeof(COUNTER);
1158 printf("Unknown resource type %d in save_resrouce.\n", type);
1164 res = (URES *)malloc(size);
1165 memcpy(res, &res_all, size);
1166 if (!res_head[rindex]) {
1167 res_head[rindex] = (RES *)res; /* store first entry */
1168 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
1169 res->res_dir.hdr.name, rindex);
1172 /* Add new res to end of chain */
1173 for (next=res_head[rindex]; next->next; next=next->next) {
1174 if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
1175 Emsg2(M_ERROR_TERM, 0,
1176 _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
1177 resources[rindex].name, res->res_dir.hdr.name);
1180 next->next = (RES *)res;
1181 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
1182 res->res_dir.hdr.name, rindex, pass);
1188 * Store Device. Note, the resource is created upon the
1189 * first reference. The details of the resource are obtained
1190 * later from the SD.
1192 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
1196 int rindex = R_DEVICE - r_first;
1197 int size = sizeof(DEVICE);
1201 token = lex_get_token(lc, T_NAME);
1202 if (!res_head[rindex]) {
1203 res = (URES *)malloc(size);
1204 memset(res, 0, size);
1205 res->res_dev.hdr.name = bstrdup(lc->str);
1206 res_head[rindex] = (RES *)res; /* store first entry */
1207 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(R_DEVICE),
1208 res->res_dir.hdr.name, rindex);
1211 /* See if it is already defined */
1212 for (next=res_head[rindex]; next->next; next=next->next) {
1213 if (strcmp(next->name, lc->str) == 0) {
1219 res = (URES *)malloc(size);
1220 memset(res, 0, size);
1221 res->res_dev.hdr.name = bstrdup(lc->str);
1222 next->next = (RES *)res;
1223 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(R_DEVICE),
1224 res->res_dir.hdr.name, rindex, pass);
1229 set_bit(index, res_all.hdr.item_present);
1231 store_alist_res(lc, item, index, pass);
1237 * Store JobType (backup, verify, restore)
1240 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
1244 token = lex_get_token(lc, T_NAME);
1245 /* Store the type both pass 1 and pass 2 */
1246 for (i=0; jobtypes[i].type_name; i++) {
1247 if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
1248 *(int *)(item->value) = jobtypes[i].job_type;
1254 scan_err1(lc, "Expected a Job Type keyword, got: %s", lc->str);
1257 set_bit(index, res_all.hdr.item_present);
1261 * Store Job Level (Full, Incremental, ...)
1264 void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
1268 token = lex_get_token(lc, T_NAME);
1269 /* Store the level pass 2 so that type is defined */
1270 for (i=0; joblevels[i].level_name; i++) {
1271 if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
1272 *(int *)(item->value) = joblevels[i].level;
1278 scan_err1(lc, "Expected a Job Level keyword, got: %s", lc->str);
1281 set_bit(index, res_all.hdr.item_present);
1285 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
1288 token = lex_get_token(lc, T_NAME);
1289 /* Scan Replacement options */
1290 for (i=0; ReplaceOptions[i].name; i++) {
1291 if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
1292 *(int *)(item->value) = ReplaceOptions[i].token;
1298 scan_err1(lc, "Expected a Restore replacement option, got: %s", lc->str);
1301 set_bit(index, res_all.hdr.item_present);
1305 * Store ACL (access control list)
1308 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
1313 token = lex_get_token(lc, T_NAME);
1315 if (((alist **)item->value)[item->code] == NULL) {
1316 ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
1317 Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
1319 ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
1320 Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
1322 token = lex_get_token(lc, T_ALL);
1323 if (token == T_COMMA) {
1324 continue; /* get another ACL */
1328 set_bit(index, res_all.hdr.item_present);