2 * Main configuration file parser for Bacula Directors,
3 * some parts may be split into separate files such as
4 * the schedule configuration (run_config.c).
6 * Note, the configuration file parser consists of three parts
8 * 1. The generic lexical scanner in lib/lex.c and lib/lex.h
10 * 2. The generic config scanner in lib/parse_config.c and
12 * These files contain the parser code, some utility
13 * routines, and the common store routines (name, int,
16 * 3. The daemon specific file, which contains the Resource
17 * definitions as well as any specific store routines
18 * for the resource records.
20 * Kern Sibbald, January MM
25 Copyright (C) 2000-2005 Kern Sibbald
27 This program is free software; you can redistribute it and/or
28 modify it under the terms of the GNU General Public License as
29 published by the Free Software Foundation; either version 2 of
30 the License, or (at your option) any later version.
32 This program is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 General Public License for more details.
37 You should have received a copy of the GNU General Public
38 License along with this program; if not, write to the Free
39 Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
47 /* Define the first and last resource ID record
48 * types. Note, these should be unique for each
49 * daemon though not a requirement.
51 int r_first = R_FIRST;
53 static RES *sres_head[R_LAST - R_FIRST + 1];
54 RES **res_head = sres_head;
56 /* Imported subroutines */
57 extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
58 extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
59 extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
62 /* Forward referenced subroutines */
64 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
65 void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
66 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
67 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
68 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
71 /* We build the current resource here as we are
72 * scanning the resource configuration definition,
73 * then move it to allocated memory when the resource
77 int res_all_size = sizeof(res_all);
80 /* Definition of records permitted within each
81 * resource with the routine to process the record
82 * information. NOTE! quoted names must be in lower case.
87 * name handler value code flags default_value
89 static RES_ITEM dir_items[] = {
90 {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
91 {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
92 {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
93 {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
94 {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
95 {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
96 {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
97 {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
98 {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
99 {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
100 {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
101 {"requiressl", store_yesno, ITEM(res_dir.require_ssl), 1, ITEM_DEFAULT, 0},
102 {"enablessl", store_yesno, ITEM(res_dir.enable_ssl), 1, ITEM_DEFAULT, 0},
103 {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
104 {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
105 {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
106 {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
107 {NULL, NULL, NULL, 0, 0, 0}
113 * name handler value code flags default_value
115 static RES_ITEM con_items[] = {
116 {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
117 {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
118 {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
119 {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
120 {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
121 {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
122 {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
123 {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
124 {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
125 {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
126 {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
127 {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
128 {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
129 {NULL, NULL, NULL, 0, 0, 0}
134 * Client or File daemon resource
136 * name handler value code flags default_value
139 static RES_ITEM cli_items[] = {
140 {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
141 {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
142 {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
143 {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
144 {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
145 {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
146 {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
147 {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
148 {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
149 {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
150 {"autoprune", store_yesno, ITEM(res_client.AutoPrune), 1, ITEM_DEFAULT, 1},
151 {"enablessl", store_yesno, ITEM(res_client.enable_ssl), 1, ITEM_DEFAULT, 0},
152 {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
153 {NULL, NULL, NULL, 0, 0, 0}
156 /* Storage daemon resource
158 * name handler value code flags default_value
160 static RES_ITEM store_items[] = {
161 {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
162 {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
163 {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
164 {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
165 {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
166 {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
167 {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
168 {"device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0},
169 {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
170 {"autochanger", store_yesno, ITEM(res_store.autochanger), 1, ITEM_DEFAULT, 0},
171 {"enablessl", store_yesno, ITEM(res_store.enable_ssl), 1, ITEM_DEFAULT, 0},
172 {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
173 {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
174 {NULL, NULL, NULL, 0, 0, 0}
178 * Catalog Resource Directives
180 * name handler value code flags default_value
182 static RES_ITEM cat_items[] = {
183 {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
184 {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
185 {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
186 {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
187 {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0},
188 /* keep this password as store_str for the moment */
189 {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
190 {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
191 {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
192 {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
193 {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
194 /* Turned off for the moment */
195 {"multipleconnections", store_yesno, ITEM(res_cat.mult_db_connections), 0, 0, 0},
196 {NULL, NULL, NULL, 0, 0, 0}
200 * Job Resource Directives
202 * name handler value code flags default_value
204 RES_ITEM job_items[] = {
205 {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
206 {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
207 {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
208 {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
209 {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
210 {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0},
211 {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
212 {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
213 {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
214 {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
215 {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
216 {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
217 {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
218 {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
219 {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
220 {"run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0},
221 {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
222 {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
223 {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
224 {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
225 {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
226 {"fullmaxwaittime", store_time, ITEM(res_job.FullMaxWaitTime), 0, 0, 0},
227 {"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxWaitTime), 0, 0, 0},
228 {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxWaitTime), 0, 0, 0},
229 {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
230 {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
231 {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
232 {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
233 {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
234 {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
235 {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
236 {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
237 {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
238 {"rerunfailedlevels", store_yesno, ITEM(res_job.rerun_failed_levels), 1, ITEM_DEFAULT, 0},
239 {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
240 {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
241 {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
242 {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
243 {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
244 {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
245 {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
246 {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
247 {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0},
248 {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
249 {"writepartafterjob", store_yesno, ITEM(res_job.write_part_after_job), 1, ITEM_DEFAULT, 0},
250 {NULL, NULL, NULL, 0, 0, 0}
255 * name handler value code flags default_value
257 static RES_ITEM fs_items[] = {
258 {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
259 {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
260 {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
261 {"exclude", store_inc, NULL, 1, ITEM_NO_EQUALS, 0},
262 {"ignorefilesetchanges", store_yesno, ITEM(res_fs.ignore_fs_changes), 1, ITEM_DEFAULT, 0},
263 {NULL, NULL, NULL, 0, 0, 0}
266 /* Schedule -- see run_conf.c */
269 * name handler value code flags default_value
271 static RES_ITEM sch_items[] = {
272 {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
273 {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
274 {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
275 {NULL, NULL, NULL, 0, 0, 0}
280 * name handler value code flags default_value
282 static RES_ITEM pool_items[] = {
283 {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
284 {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
285 {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
286 {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
287 {"labeltype", store_label, ITEM(res_pool.LabelType), 0, 0, 0},
288 {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
289 {"usecatalog", store_yesno, ITEM(res_pool.use_catalog), 1, ITEM_DEFAULT, 1},
290 {"usevolumeonce", store_yesno, ITEM(res_pool.use_volume_once),1, 0, 0},
291 {"purgeoldestvolume", store_yesno, ITEM(res_pool.purge_oldest_volume), 1, 0, 0},
292 {"recycleoldestvolume", store_yesno, ITEM(res_pool.recycle_oldest_volume), 1, 0, 0},
293 {"recyclecurrentvolume", store_yesno, ITEM(res_pool.recycle_current_volume), 1, 0, 0},
294 {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0},
295 {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
296 {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
297 {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
298 {"acceptanyvolume", store_yesno, ITEM(res_pool.accept_any_volume), 1, ITEM_DEFAULT, 1},
299 {"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
300 {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
301 {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
302 {"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
303 {"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
304 {NULL, NULL, NULL, 0, 0, 0}
309 * name handler value code flags default_value
311 static RES_ITEM counter_items[] = {
312 {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
313 {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
314 {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
315 {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
316 {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
317 {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
318 {NULL, NULL, NULL, 0, 0, 0}
322 /* Message resource */
323 extern RES_ITEM msgs_items[];
326 * This is the master resource definition.
327 * It must have one item for each of the resources.
329 * NOTE!!! keep it in the same order as the R_codes
330 * or eliminate all resources[rindex].name
332 * name items rcode res_head
334 RES_TABLE resources[] = {
335 {"director", dir_items, R_DIRECTOR},
336 {"client", cli_items, R_CLIENT},
337 {"job", job_items, R_JOB},
338 {"storage", store_items, R_STORAGE},
339 {"catalog", cat_items, R_CATALOG},
340 {"schedule", sch_items, R_SCHEDULE},
341 {"fileset", fs_items, R_FILESET},
342 {"pool", pool_items, R_POOL},
343 {"messages", msgs_items, R_MSGS},
344 {"counter", counter_items, R_COUNTER},
345 {"console", con_items, R_CONSOLE},
346 {"jobdefs", job_items, R_JOBDEFS},
347 {"device", NULL, R_DEVICE}, /* info obtained from SD */
352 /* Keywords (RHS) permitted in Job Level records
354 * level_name level job_type
356 struct s_jl joblevels[] = {
357 {"Full", L_FULL, JT_BACKUP},
358 {"Base", L_BASE, JT_BACKUP},
359 {"Incremental", L_INCREMENTAL, JT_BACKUP},
360 {"Differential", L_DIFFERENTIAL, JT_BACKUP},
361 {"Since", L_SINCE, JT_BACKUP},
362 {"Catalog", L_VERIFY_CATALOG, JT_VERIFY},
363 {"InitCatalog", L_VERIFY_INIT, JT_VERIFY},
364 {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
365 {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
366 {"Data", L_VERIFY_DATA, JT_VERIFY},
367 {" ", L_NONE, JT_ADMIN},
368 {" ", L_NONE, JT_RESTORE},
372 /* Keywords (RHS) permitted in Job type records
376 struct s_jt jobtypes[] = {
377 {"backup", JT_BACKUP},
379 {"verify", JT_VERIFY},
380 {"restore", JT_RESTORE},
385 /* Options permitted in Restore replace= */
386 struct s_kw ReplaceOptions[] = {
387 {"always", REPLACE_ALWAYS},
388 {"ifnewer", REPLACE_IFNEWER},
389 {"ifolder", REPLACE_IFOLDER},
390 {"never", REPLACE_NEVER},
394 const char *level_to_str(int level)
397 static char level_no[30];
398 const char *str = level_no;
400 bsnprintf(level_no, sizeof(level_no), "%c (%d)", level, level); /* default if not found */
401 for (i=0; joblevels[i].level_name; i++) {
402 if (level == joblevels[i].level) {
403 str = joblevels[i].level_name;
410 /* Dump contents of resource */
411 void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
413 URES *res = (URES *)reshdr;
415 char ed1[100], ed2[100];
419 sendit(sock, "No %s resource defined\n", res_to_str(type));
422 if (type < 0) { /* no recursion */
428 sendit(sock, "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n",
429 reshdr->name, res->res_dir.MaxConcurrentJobs,
430 edit_uint64(res->res_dir.FDConnectTimeout, ed1),
431 edit_uint64(res->res_dir.SDConnectTimeout, ed2));
432 if (res->res_dir.query_file) {
433 sendit(sock, " query_file=%s\n", res->res_dir.query_file);
435 if (res->res_dir.messages) {
436 sendit(sock, " --> ");
437 dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock);
441 sendit(sock, "Console: name=%s SSL=%d\n",
442 res->res_con.hdr.name, res->res_con.enable_ssl);
445 if (res->res_counter.WrapCounter) {
446 sendit(sock, "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n",
447 res->res_counter.hdr.name, res->res_counter.MinValue,
448 res->res_counter.MaxValue, res->res_counter.CurrentValue,
449 res->res_counter.WrapCounter->hdr.name);
451 sendit(sock, "Counter: name=%s min=%d max=%d\n",
452 res->res_counter.hdr.name, res->res_counter.MinValue,
453 res->res_counter.MaxValue);
455 if (res->res_counter.Catalog) {
456 sendit(sock, " --> ");
457 dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock);
462 sendit(sock, "Client: name=%s address=%s FDport=%d MaxJobs=%u\n",
463 res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
464 res->res_client.MaxConcurrentJobs);
465 sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
466 edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
467 edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
468 res->res_client.AutoPrune);
469 if (res->res_client.catalog) {
470 sendit(sock, " --> ");
471 dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
477 sendit(sock, "Device: name=%s ok=%d num_writers=%d max_writers=%d\n"
478 " reserved=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%d\n"
479 " poolid=%s volname=%s MediaType=%s\n",
480 dev->hdr.name, dev->found, dev->num_writers, dev->max_writers,
481 dev->reserved, dev->open, dev->append, dev->read, dev->labeled,
482 dev->offline, dev->autochanger,
483 edit_uint64(dev->PoolId, ed1),
484 dev->VolumeName, dev->MediaType);
487 sendit(sock, "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
488 " DeviceName=%s MediaType=%s StorageId=%s\n",
489 res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
490 res->res_store.MaxConcurrentJobs,
491 res->res_store.dev_name(),
492 res->res_store.media_type,
493 edit_int64(res->res_store.StorageId, ed1));
496 sendit(sock, "Catalog: name=%s address=%s DBport=%d db_name=%s\n"
497 " db_user=%s MutliDBConn=%d\n",
498 res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
499 res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user),
500 res->res_cat.mult_db_connections);
504 sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
505 type == R_JOB ? "Job" : "JobDefs",
506 res->res_job.hdr.name, res->res_job.JobType,
507 level_to_str(res->res_job.JobLevel), res->res_job.Priority,
508 res->res_job.MaxConcurrentJobs);
509 sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n",
510 res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
511 edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
512 res->res_job.spool_data, res->res_job.write_part_after_job);
513 if (res->res_job.client) {
514 sendit(sock, " --> ");
515 dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
517 if (res->res_job.fileset) {
518 sendit(sock, " --> ");
519 dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock);
521 if (res->res_job.schedule) {
522 sendit(sock, " --> ");
523 dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock);
525 if (res->res_job.RestoreWhere) {
526 sendit(sock, " --> Where=%s\n", NPRT(res->res_job.RestoreWhere));
528 if (res->res_job.RestoreBootstrap) {
529 sendit(sock, " --> Bootstrap=%s\n", NPRT(res->res_job.RestoreBootstrap));
531 if (res->res_job.RunBeforeJob) {
532 sendit(sock, " --> RunBefore=%s\n", NPRT(res->res_job.RunBeforeJob));
534 if (res->res_job.RunAfterJob) {
535 sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
537 if (res->res_job.RunAfterFailedJob) {
538 sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
540 if (res->res_job.WriteBootstrap) {
541 sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
543 if (res->res_job.storage) {
545 foreach_alist(store, res->res_job.storage) {
546 sendit(sock, " --> ");
547 dump_resource(-R_STORAGE, (RES *)store, sendit, sock);
550 if (res->res_job.pool) {
551 sendit(sock, " --> ");
552 dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
554 if (res->res_job.full_pool) {
555 sendit(sock, " --> ");
556 dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
558 if (res->res_job.inc_pool) {
559 sendit(sock, " --> ");
560 dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
562 if (res->res_job.dif_pool) {
563 sendit(sock, " --> ");
564 dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
566 if (res->res_job.verify_job) {
567 sendit(sock, " --> ");
568 dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
570 if (res->res_job.run_cmds) {
572 foreach_alist(runcmd, res->res_job.run_cmds) {
573 sendit(sock, " --> Run=%s\n", runcmd);
576 if (res->res_job.messages) {
577 sendit(sock, " --> ");
578 dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock);
584 sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
585 for (i=0; i<res->res_fs.num_includes; i++) {
586 INCEXE *incexe = res->res_fs.include_items[i];
587 for (j=0; j<incexe->num_opts; j++) {
588 FOPTS *fo = incexe->opts_list[j];
589 sendit(sock, " O %s\n", fo->opts);
590 for (k=0; k<fo->regex.size(); k++) {
591 sendit(sock, " R %s\n", fo->regex.get(k));
593 for (k=0; k<fo->regexdir.size(); k++) {
594 sendit(sock, " RD %s\n", fo->regexdir.get(k));
596 for (k=0; k<fo->regexfile.size(); k++) {
597 sendit(sock, " RF %s\n", fo->regexfile.get(k));
599 for (k=0; k<fo->wild.size(); k++) {
600 sendit(sock, " W %s\n", fo->wild.get(k));
602 for (k=0; k<fo->wilddir.size(); k++) {
603 sendit(sock, " WD %s\n", fo->wilddir.get(k));
605 for (k=0; k<fo->wildfile.size(); k++) {
606 sendit(sock, " WF %s\n", fo->wildfile.get(k));
608 for (k=0; k<fo->base.size(); k++) {
609 sendit(sock, " B %s\n", fo->base.get(k));
611 for (k=0; k<fo->fstype.size(); k++) {
612 sendit(sock, " X %s\n", fo->fstype.get(k));
615 sendit(sock, " D %s\n", fo->reader);
618 sendit(sock, " T %s\n", fo->writer);
620 sendit(sock, " N\n");
622 for (j=0; j<incexe->name_list.size(); j++) {
623 sendit(sock, " I %s\n", incexe->name_list.get(j));
625 if (incexe->name_list.size()) {
626 sendit(sock, " N\n");
630 for (i=0; i<res->res_fs.num_excludes; i++) {
631 INCEXE *incexe = res->res_fs.exclude_items[i];
632 for (j=0; j<incexe->name_list.size(); j++) {
633 sendit(sock, " E %s\n", incexe->name_list.get(j));
635 if (incexe->name_list.size()) {
636 sendit(sock, " N\n");
642 if (res->res_sch.run) {
644 RUN *run = res->res_sch.run;
645 char buf[1000], num[30];
646 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
651 sendit(sock, " --> Run Level=%s\n", level_to_str(run->level));
652 bstrncpy(buf, " hour=", sizeof(buf));
653 for (i=0; i<24; i++) {
654 if (bit_is_set(i, run->hour)) {
655 bsnprintf(num, sizeof(num), "%d ", i);
656 bstrncat(buf, num, sizeof(buf));
659 bstrncat(buf, "\n", sizeof(buf));
661 bstrncpy(buf, " mday=", sizeof(buf));
662 for (i=0; i<31; i++) {
663 if (bit_is_set(i, run->mday)) {
664 bsnprintf(num, sizeof(num), "%d ", i);
665 bstrncat(buf, num, sizeof(buf));
668 bstrncat(buf, "\n", sizeof(buf));
670 bstrncpy(buf, " month=", sizeof(buf));
671 for (i=0; i<12; i++) {
672 if (bit_is_set(i, run->month)) {
673 bsnprintf(num, sizeof(num), "%d ", i);
674 bstrncat(buf, num, sizeof(buf));
677 bstrncat(buf, "\n", sizeof(buf));
679 bstrncpy(buf, " wday=", sizeof(buf));
680 for (i=0; i<7; i++) {
681 if (bit_is_set(i, run->wday)) {
682 bsnprintf(num, sizeof(num), "%d ", i);
683 bstrncat(buf, num, sizeof(buf));
686 bstrncat(buf, "\n", sizeof(buf));
688 bstrncpy(buf, " wom=", sizeof(buf));
689 for (i=0; i<5; i++) {
690 if (bit_is_set(i, run->wom)) {
691 bsnprintf(num, sizeof(num), "%d ", i);
692 bstrncat(buf, num, sizeof(buf));
695 bstrncat(buf, "\n", sizeof(buf));
697 bstrncpy(buf, " woy=", sizeof(buf));
698 for (i=0; i<54; i++) {
699 if (bit_is_set(i, run->woy)) {
700 bsnprintf(num, sizeof(num), "%d ", i);
701 bstrncat(buf, num, sizeof(buf));
704 bstrncat(buf, "\n", sizeof(buf));
706 sendit(sock, " mins=%d\n", run->minute);
708 sendit(sock, " --> ");
709 dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
712 sendit(sock, " --> ");
713 dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
716 sendit(sock, " --> ");
717 dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock);
719 /* If another Run record is chained in, go print it */
725 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
729 sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
730 res->res_pool.pool_type);
731 sendit(sock, " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n",
732 res->res_pool.use_catalog, res->res_pool.use_volume_once,
733 res->res_pool.accept_any_volume, res->res_pool.catalog_files);
734 sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
735 res->res_pool.max_volumes, res->res_pool.AutoPrune,
736 edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
737 sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
738 edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
739 res->res_pool.Recycle,
740 NPRT(res->res_pool.label_format));
741 sendit(sock, " CleaningPrefix=%s LabelType=%d\n",
742 NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType);
743 sendit(sock, " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n",
744 res->res_pool.recycle_oldest_volume,
745 res->res_pool.purge_oldest_volume,
746 res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
749 sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name);
750 if (res->res_msgs.mail_cmd)
751 sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd);
752 if (res->res_msgs.operator_cmd)
753 sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd);
756 sendit(sock, "Unknown resource type %d in dump_resource.\n", type);
759 if (recurse && res->res_dir.hdr.next) {
760 dump_resource(type, res->res_dir.hdr.next, sendit, sock);
765 * Free all the members of an INCEXE structure
767 static void free_incexe(INCEXE *incexe)
769 incexe->name_list.destroy();
770 for (int i=0; i<incexe->num_opts; i++) {
771 FOPTS *fopt = incexe->opts_list[i];
772 fopt->regex.destroy();
773 fopt->regexdir.destroy();
774 fopt->regexfile.destroy();
775 fopt->wild.destroy();
776 fopt->wilddir.destroy();
777 fopt->wildfile.destroy();
778 fopt->base.destroy();
779 fopt->fstype.destroy();
788 if (incexe->opts_list) {
789 free(incexe->opts_list);
795 * Free memory of resource -- called when daemon terminates.
796 * NB, we don't need to worry about freeing any references
797 * to other resources as they will be freed when that
798 * resource chain is traversed. Mainly we worry about freeing
799 * allocated strings (names).
801 void free_resource(RES *sres, int type)
804 RES *nres; /* next resource if linked */
805 URES *res = (URES *)sres;
810 /* common stuff -- free the resource name and description */
811 nres = (RES *)res->res_dir.hdr.next;
812 if (res->res_dir.hdr.name) {
813 free(res->res_dir.hdr.name);
815 if (res->res_dir.hdr.desc) {
816 free(res->res_dir.hdr.desc);
821 if (res->res_dir.working_directory) {
822 free(res->res_dir.working_directory);
824 if (res->res_dir.scripts_directory) {
825 free((char *)res->res_dir.scripts_directory);
827 if (res->res_dir.pid_directory) {
828 free(res->res_dir.pid_directory);
830 if (res->res_dir.subsys_directory) {
831 free(res->res_dir.subsys_directory);
833 if (res->res_dir.password) {
834 free(res->res_dir.password);
836 if (res->res_dir.query_file) {
837 free(res->res_dir.query_file);
839 if (res->res_dir.DIRaddrs) {
840 free_addresses(res->res_dir.DIRaddrs);
847 if (res->res_con.password) {
848 free(res->res_con.password);
850 for (int i=0; i<Num_ACL; i++) {
851 if (res->res_con.ACL_lists[i]) {
852 delete res->res_con.ACL_lists[i];
853 res->res_con.ACL_lists[i] = NULL;
858 if (res->res_client.address) {
859 free(res->res_client.address);
861 if (res->res_client.password) {
862 free(res->res_client.password);
866 if (res->res_store.address) {
867 free(res->res_store.address);
869 if (res->res_store.password) {
870 free(res->res_store.password);
872 if (res->res_store.media_type) {
873 free(res->res_store.media_type);
875 if (res->res_store.device) {
876 delete res->res_store.device;
880 if (res->res_cat.db_address) {
881 free(res->res_cat.db_address);
883 if (res->res_cat.db_socket) {
884 free(res->res_cat.db_socket);
886 if (res->res_cat.db_user) {
887 free(res->res_cat.db_user);
889 if (res->res_cat.db_name) {
890 free(res->res_cat.db_name);
892 if (res->res_cat.db_password) {
893 free(res->res_cat.db_password);
897 if ((num=res->res_fs.num_includes)) {
899 free_incexe(res->res_fs.include_items[num]);
901 free(res->res_fs.include_items);
903 res->res_fs.num_includes = 0;
904 if ((num=res->res_fs.num_excludes)) {
906 free_incexe(res->res_fs.exclude_items[num]);
908 free(res->res_fs.exclude_items);
910 res->res_fs.num_excludes = 0;
913 if (res->res_pool.pool_type) {
914 free(res->res_pool.pool_type);
916 if (res->res_pool.label_format) {
917 free(res->res_pool.label_format);
919 if (res->res_pool.cleaning_prefix) {
920 free(res->res_pool.cleaning_prefix);
924 if (res->res_sch.run) {
926 nrun = res->res_sch.run;
936 if (res->res_job.RestoreWhere) {
937 free(res->res_job.RestoreWhere);
939 if (res->res_job.RestoreBootstrap) {
940 free(res->res_job.RestoreBootstrap);
942 if (res->res_job.WriteBootstrap) {
943 free(res->res_job.WriteBootstrap);
945 if (res->res_job.RunBeforeJob) {
946 free(res->res_job.RunBeforeJob);
948 if (res->res_job.RunAfterJob) {
949 free(res->res_job.RunAfterJob);
951 if (res->res_job.RunAfterFailedJob) {
952 free(res->res_job.RunAfterFailedJob);
954 if (res->res_job.ClientRunBeforeJob) {
955 free(res->res_job.ClientRunBeforeJob);
957 if (res->res_job.ClientRunAfterJob) {
958 free(res->res_job.ClientRunAfterJob);
960 if (res->res_job.run_cmds) {
961 delete res->res_job.run_cmds;
963 if (res->res_job.storage) {
964 delete res->res_job.storage;
968 if (res->res_msgs.mail_cmd) {
969 free(res->res_msgs.mail_cmd);
971 if (res->res_msgs.operator_cmd) {
972 free(res->res_msgs.operator_cmd);
974 free_msgs_res((MSGS *)res); /* free message resource */
978 printf("Unknown resource type %d in free_resource.\n", type);
980 /* Common stuff again -- free the resource, recurse to next one */
985 free_resource(nres, type);
990 * Save the new resource by chaining it into the head list for
991 * the resource. If this is pass 2, we update any resource
992 * pointers because they may not have been defined until
995 void save_resource(int type, RES_ITEM *items, int pass)
998 int rindex = type - r_first;
1002 /* Check Job requirements after applying JobDefs */
1003 if (type != R_JOB && type != R_JOBDEFS) {
1005 * Ensure that all required items are present
1007 for (i=0; items[i].name; i++) {
1008 if (items[i].flags & ITEM_REQUIRED) {
1009 if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
1010 Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
1011 items[i].name, resources[rindex]);
1014 /* If this triggers, take a look at lib/parse_conf.h */
1015 if (i >= MAX_RES_ITEMS) {
1016 Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
1022 * During pass 2 in each "store" routine, we looked up pointers
1023 * to all the resources referrenced in the current resource, now we
1024 * must copy their addresses from the static record to the allocated
1029 /* Resources not containing a resource */
1038 /* Resources containing another resource or alist */
1040 if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
1041 Emsg1(M_ERROR_TERM, 0, "Cannot find Director resource %s\n", res_all.res_dir.hdr.name);
1043 res->res_dir.messages = res_all.res_dir.messages;
1046 if ((res = (URES *)GetResWithName(type, res_all.res_store.hdr.name)) == NULL) {
1047 Emsg1(M_ERROR_TERM, 0, "Cannot find Storage resource %s\n",
1048 res_all.res_dir.hdr.name);
1050 /* we must explicitly copy the device alist pointer */
1051 res->res_store.device = res_all.res_store.device;
1055 if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
1056 Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
1057 res_all.res_dir.hdr.name);
1059 res->res_job.messages = res_all.res_job.messages;
1060 res->res_job.schedule = res_all.res_job.schedule;
1061 res->res_job.client = res_all.res_job.client;
1062 res->res_job.fileset = res_all.res_job.fileset;
1063 res->res_job.storage = res_all.res_job.storage;
1064 res->res_job.pool = res_all.res_job.pool;
1065 res->res_job.full_pool = res_all.res_job.full_pool;
1066 res->res_job.inc_pool = res_all.res_job.inc_pool;
1067 res->res_job.dif_pool = res_all.res_job.dif_pool;
1068 res->res_job.verify_job = res_all.res_job.verify_job;
1069 res->res_job.jobdefs = res_all.res_job.jobdefs;
1070 res->res_job.run_cmds = res_all.res_job.run_cmds;
1073 if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
1074 Emsg1(M_ERROR_TERM, 0, "Cannot find Counter resource %s\n", res_all.res_counter.hdr.name);
1076 res->res_counter.Catalog = res_all.res_counter.Catalog;
1077 res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
1081 if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
1082 Emsg1(M_ERROR_TERM, 0, "Cannot find Client resource %s\n", res_all.res_client.hdr.name);
1084 res->res_client.catalog = res_all.res_client.catalog;
1088 * Schedule is a bit different in that it contains a RUN record
1089 * chain which isn't a "named" resource. This chain was linked
1090 * in by run_conf.c during pass 2, so here we jam the pointer
1091 * into the Schedule resource.
1093 if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
1094 Emsg1(M_ERROR_TERM, 0, "Cannot find Schedule resource %s\n", res_all.res_client.hdr.name);
1096 res->res_sch.run = res_all.res_sch.run;
1099 Emsg1(M_ERROR, 0, "Unknown resource type %d in save_resource.\n", type);
1103 /* Note, the resource name was already saved during pass 1,
1104 * so here, we can just release it.
1106 if (res_all.res_dir.hdr.name) {
1107 free(res_all.res_dir.hdr.name);
1108 res_all.res_dir.hdr.name = NULL;
1110 if (res_all.res_dir.hdr.desc) {
1111 free(res_all.res_dir.hdr.desc);
1112 res_all.res_dir.hdr.desc = NULL;
1118 * The following code is only executed during pass 1
1122 size = sizeof(DIRRES);
1125 size = sizeof(CONRES);
1128 size =sizeof(CLIENT);
1131 size = sizeof(STORE);
1141 size = sizeof(FILESET);
1144 size = sizeof(SCHED);
1147 size = sizeof(POOL);
1150 size = sizeof(MSGS);
1153 size = sizeof(COUNTER);
1159 printf("Unknown resource type %d in save_resrouce.\n", type);
1165 res = (URES *)malloc(size);
1166 memcpy(res, &res_all, size);
1167 if (!res_head[rindex]) {
1168 res_head[rindex] = (RES *)res; /* store first entry */
1169 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
1170 res->res_dir.hdr.name, rindex);
1173 /* Add new res to end of chain */
1174 for (next=res_head[rindex]; next->next; next=next->next) {
1175 if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
1176 Emsg2(M_ERROR_TERM, 0,
1177 _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
1178 resources[rindex].name, res->res_dir.hdr.name);
1181 next->next = (RES *)res;
1182 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
1183 res->res_dir.hdr.name, rindex, pass);
1189 * Store Device. Note, the resource is created upon the
1190 * first reference. The details of the resource are obtained
1191 * later from the SD.
1193 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
1197 int rindex = R_DEVICE - r_first;
1198 int size = sizeof(DEVICE);
1202 token = lex_get_token(lc, T_NAME);
1203 if (!res_head[rindex]) {
1204 res = (URES *)malloc(size);
1205 memset(res, 0, size);
1206 res->res_dev.hdr.name = bstrdup(lc->str);
1207 res_head[rindex] = (RES *)res; /* store first entry */
1208 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(R_DEVICE),
1209 res->res_dir.hdr.name, rindex);
1212 /* See if it is already defined */
1213 for (next=res_head[rindex]; next->next; next=next->next) {
1214 if (strcmp(next->name, lc->str) == 0) {
1220 res = (URES *)malloc(size);
1221 memset(res, 0, size);
1222 res->res_dev.hdr.name = bstrdup(lc->str);
1223 next->next = (RES *)res;
1224 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(R_DEVICE),
1225 res->res_dir.hdr.name, rindex, pass);
1230 set_bit(index, res_all.hdr.item_present);
1232 store_alist_res(lc, item, index, pass);
1238 * Store JobType (backup, verify, restore)
1241 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
1245 token = lex_get_token(lc, T_NAME);
1246 /* Store the type both pass 1 and pass 2 */
1247 for (i=0; jobtypes[i].type_name; i++) {
1248 if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
1249 *(int *)(item->value) = jobtypes[i].job_type;
1255 scan_err1(lc, "Expected a Job Type keyword, got: %s", lc->str);
1258 set_bit(index, res_all.hdr.item_present);
1262 * Store Job Level (Full, Incremental, ...)
1265 void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
1269 token = lex_get_token(lc, T_NAME);
1270 /* Store the level pass 2 so that type is defined */
1271 for (i=0; joblevels[i].level_name; i++) {
1272 if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
1273 *(int *)(item->value) = joblevels[i].level;
1279 scan_err1(lc, "Expected a Job Level keyword, got: %s", lc->str);
1282 set_bit(index, res_all.hdr.item_present);
1286 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
1289 token = lex_get_token(lc, T_NAME);
1290 /* Scan Replacement options */
1291 for (i=0; ReplaceOptions[i].name; i++) {
1292 if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
1293 *(int *)(item->value) = ReplaceOptions[i].token;
1299 scan_err1(lc, "Expected a Restore replacement option, got: %s", lc->str);
1302 set_bit(index, res_all.hdr.item_present);
1306 * Store ACL (access control list)
1309 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
1314 token = lex_get_token(lc, T_NAME);
1316 if (((alist **)item->value)[item->code] == NULL) {
1317 ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
1318 Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
1320 ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
1321 Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
1323 token = lex_get_token(lc, T_ALL);
1324 if (token == T_COMMA) {
1325 continue; /* get another ACL */
1329 set_bit(index, res_all.hdr.item_present);