2 * Main configuration file parser for Bacula Directors,
3 * some parts may be split into separate files such as
4 * the schedule configuration (run_config.c).
6 * Note, the configuration file parser consists of three parts
8 * 1. The generic lexical scanner in lib/lex.c and lib/lex.h
10 * 2. The generic config scanner in lib/parse_config.c and
12 * These files contain the parser code, some utility
13 * routines, and the common store routines (name, int,
16 * 3. The daemon specific file, which contains the Resource
17 * definitions as well as any specific store routines
18 * for the resource records.
20 * Kern Sibbald, January MM
25 Copyright (C) 2000-2005 Kern Sibbald
27 This program is free software; you can redistribute it and/or
28 modify it under the terms of the GNU General Public License as
29 published by the Free Software Foundation; either version 2 of
30 the License, or (at your option) any later version.
32 This program is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 General Public License for more details.
37 You should have received a copy of the GNU General Public
38 License along with this program; if not, write to the Free
39 Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
47 /* Define the first and last resource ID record
48 * types. Note, these should be unique for each
49 * daemon though not a requirement.
51 int r_first = R_FIRST;
53 static RES *sres_head[R_LAST - R_FIRST + 1];
54 RES **res_head = sres_head;
56 /* Imported subroutines */
57 extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
58 extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
59 extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
62 /* Forward referenced subroutines */
64 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
65 void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
66 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
67 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
68 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass);
71 /* We build the current resource here as we are
72 * scanning the resource configuration definition,
73 * then move it to allocated memory when the resource
77 int res_all_size = sizeof(res_all);
80 /* Definition of records permitted within each
81 * resource with the routine to process the record
82 * information. NOTE! quoted names must be in lower case.
87 * name handler value code flags default_value
89 static RES_ITEM dir_items[] = {
90 {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
91 {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
92 {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
93 {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
94 {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
95 {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
96 {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
97 {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
98 {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
99 {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
100 {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
101 {"requiressl", store_yesno, ITEM(res_dir.require_ssl), 1, ITEM_DEFAULT, 0},
102 {"enablessl", store_yesno, ITEM(res_dir.enable_ssl), 1, ITEM_DEFAULT, 0},
103 {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
104 {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
105 {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
106 {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
107 {NULL, NULL, NULL, 0, 0, 0}
113 * name handler value code flags default_value
115 static RES_ITEM con_items[] = {
116 {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
117 {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
118 {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
119 {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
120 {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
121 {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
122 {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
123 {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
124 {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
125 {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
126 {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
127 {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
128 {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
129 {NULL, NULL, NULL, 0, 0, 0}
134 * Client or File daemon resource
136 * name handler value code flags default_value
139 static RES_ITEM cli_items[] = {
140 {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
141 {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
142 {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
143 {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
144 {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
145 {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
146 {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
147 {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
148 {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
149 {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
150 {"autoprune", store_yesno, ITEM(res_client.AutoPrune), 1, ITEM_DEFAULT, 1},
151 {"enablessl", store_yesno, ITEM(res_client.enable_ssl), 1, ITEM_DEFAULT, 0},
152 {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
153 {NULL, NULL, NULL, 0, 0, 0}
156 /* Storage daemon resource
158 * name handler value code flags default_value
160 static RES_ITEM store_items[] = {
161 {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
162 {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
163 {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
164 {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
165 {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
166 {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
167 {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
168 {"device", store_device, ITEM(res_store.device), R_DEVICE, ITEM_REQUIRED, 0},
169 {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
170 {"autochanger", store_yesno, ITEM(res_store.autochanger), 1, ITEM_DEFAULT, 0},
171 {"enablessl", store_yesno, ITEM(res_store.enable_ssl), 1, ITEM_DEFAULT, 0},
172 {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
173 {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
174 {NULL, NULL, NULL, 0, 0, 0}
178 * Catalog Resource Directives
180 * name handler value code flags default_value
182 static RES_ITEM cat_items[] = {
183 {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
184 {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
185 {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
186 {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
187 {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0},
188 /* keep this password as store_str for the moment */
189 {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
190 {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
191 {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
192 {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
193 {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
194 {"multipleconnections", store_yesno, ITEM(res_cat.mult_db_connections), 1, 0, 0},
195 {NULL, NULL, NULL, 0, 0, 0}
199 * Job Resource Directives
201 * name handler value code flags default_value
203 RES_ITEM job_items[] = {
204 {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
205 {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
206 {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
207 {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
208 {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
209 {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, 0},
210 {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
211 {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
212 {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
213 {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
214 {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
215 {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
216 {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
217 {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
218 {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
219 {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
220 {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
221 {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
222 {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
223 {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
224 {"fullmaxwaittime", store_time, ITEM(res_job.FullMaxWaitTime), 0, 0, 0},
225 {"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxWaitTime), 0, 0, 0},
226 {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxWaitTime), 0, 0, 0},
227 {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
228 {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
229 {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
230 {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
231 {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
232 {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
233 {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
234 {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
235 {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
236 {"rerunfailedlevels", store_yesno, ITEM(res_job.rerun_failed_levels), 1, ITEM_DEFAULT, 0},
237 {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
238 {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
239 {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
240 {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
241 {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
242 {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
243 {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
244 {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
245 {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0},
246 {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
247 {"writepartafterjob", store_yesno, ITEM(res_job.write_part_after_job), 1, ITEM_DEFAULT, 0},
248 {NULL, NULL, NULL, 0, 0, 0}
253 * name handler value code flags default_value
255 static RES_ITEM fs_items[] = {
256 {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
257 {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
258 {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
259 {"exclude", store_inc, NULL, 1, ITEM_NO_EQUALS, 0},
260 {"ignorefilesetchanges", store_yesno, ITEM(res_fs.ignore_fs_changes), 1, ITEM_DEFAULT, 0},
261 {NULL, NULL, NULL, 0, 0, 0}
264 /* Schedule -- see run_conf.c */
267 * name handler value code flags default_value
269 static RES_ITEM sch_items[] = {
270 {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
271 {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
272 {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
273 {NULL, NULL, NULL, 0, 0, 0}
278 * name handler value code flags default_value
280 static RES_ITEM pool_items[] = {
281 {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
282 {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
283 {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
284 {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
285 {"labeltype", store_label, ITEM(res_pool.LabelType), 0, 0, 0},
286 {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
287 {"usecatalog", store_yesno, ITEM(res_pool.use_catalog), 1, ITEM_DEFAULT, 1},
288 {"usevolumeonce", store_yesno, ITEM(res_pool.use_volume_once),1, 0, 0},
289 {"purgeoldestvolume", store_yesno, ITEM(res_pool.purge_oldest_volume), 1, 0, 0},
290 {"recycleoldestvolume", store_yesno, ITEM(res_pool.recycle_oldest_volume), 1, 0, 0},
291 {"recyclecurrentvolume", store_yesno, ITEM(res_pool.recycle_current_volume), 1, 0, 0},
292 {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0},
293 {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
294 {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
295 {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
296 {"acceptanyvolume", store_yesno, ITEM(res_pool.accept_any_volume), 1, ITEM_DEFAULT, 1},
297 {"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
298 {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
299 {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration), 0, 0, 0},
300 {"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
301 {"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
302 {NULL, NULL, NULL, 0, 0, 0}
307 * name handler value code flags default_value
309 static RES_ITEM counter_items[] = {
310 {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
311 {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
312 {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
313 {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
314 {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
315 {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
316 {NULL, NULL, NULL, 0, 0, 0}
320 /* Message resource */
321 extern RES_ITEM msgs_items[];
324 * This is the master resource definition.
325 * It must have one item for each of the resources.
327 * NOTE!!! keep it in the same order as the R_codes
328 * or eliminate all resources[rindex].name
330 * name items rcode res_head
332 RES_TABLE resources[] = {
333 {"director", dir_items, R_DIRECTOR},
334 {"client", cli_items, R_CLIENT},
335 {"job", job_items, R_JOB},
336 {"storage", store_items, R_STORAGE},
337 {"catalog", cat_items, R_CATALOG},
338 {"schedule", sch_items, R_SCHEDULE},
339 {"fileset", fs_items, R_FILESET},
340 {"pool", pool_items, R_POOL},
341 {"messages", msgs_items, R_MSGS},
342 {"counter", counter_items, R_COUNTER},
343 {"console", con_items, R_CONSOLE},
344 {"jobdefs", job_items, R_JOBDEFS},
345 {"device", NULL, R_DEVICE}, /* info obtained from SD */
350 /* Keywords (RHS) permitted in Job Level records
352 * level_name level job_type
354 struct s_jl joblevels[] = {
355 {"Full", L_FULL, JT_BACKUP},
356 {"Base", L_BASE, JT_BACKUP},
357 {"Incremental", L_INCREMENTAL, JT_BACKUP},
358 {"Differential", L_DIFFERENTIAL, JT_BACKUP},
359 {"Since", L_SINCE, JT_BACKUP},
360 {"Catalog", L_VERIFY_CATALOG, JT_VERIFY},
361 {"InitCatalog", L_VERIFY_INIT, JT_VERIFY},
362 {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
363 {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
364 {"Data", L_VERIFY_DATA, JT_VERIFY},
365 {" ", L_NONE, JT_ADMIN},
366 {" ", L_NONE, JT_RESTORE},
370 /* Keywords (RHS) permitted in Job type records
374 struct s_jt jobtypes[] = {
375 {"backup", JT_BACKUP},
377 {"verify", JT_VERIFY},
378 {"restore", JT_RESTORE},
383 /* Options permitted in Restore replace= */
384 struct s_kw ReplaceOptions[] = {
385 {"always", REPLACE_ALWAYS},
386 {"ifnewer", REPLACE_IFNEWER},
387 {"ifolder", REPLACE_IFOLDER},
388 {"never", REPLACE_NEVER},
392 const char *level_to_str(int level)
395 static char level_no[30];
396 const char *str = level_no;
398 bsnprintf(level_no, sizeof(level_no), "%d", level); /* default if not found */
399 for (i=0; joblevels[i].level_name; i++) {
400 if (level == joblevels[i].level) {
401 str = joblevels[i].level_name;
408 /* Dump contents of resource */
409 void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
411 URES *res = (URES *)reshdr;
413 char ed1[100], ed2[100];
417 sendit(sock, "No %s resource defined\n", res_to_str(type));
420 if (type < 0) { /* no recursion */
426 sendit(sock, "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n",
427 reshdr->name, res->res_dir.MaxConcurrentJobs,
428 edit_uint64(res->res_dir.FDConnectTimeout, ed1),
429 edit_uint64(res->res_dir.SDConnectTimeout, ed2));
430 if (res->res_dir.query_file) {
431 sendit(sock, " query_file=%s\n", res->res_dir.query_file);
433 if (res->res_dir.messages) {
434 sendit(sock, " --> ");
435 dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock);
439 sendit(sock, "Console: name=%s SSL=%d\n",
440 res->res_con.hdr.name, res->res_con.enable_ssl);
443 if (res->res_counter.WrapCounter) {
444 sendit(sock, "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n",
445 res->res_counter.hdr.name, res->res_counter.MinValue,
446 res->res_counter.MaxValue, res->res_counter.CurrentValue,
447 res->res_counter.WrapCounter->hdr.name);
449 sendit(sock, "Counter: name=%s min=%d max=%d\n",
450 res->res_counter.hdr.name, res->res_counter.MinValue,
451 res->res_counter.MaxValue);
453 if (res->res_counter.Catalog) {
454 sendit(sock, " --> ");
455 dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock);
460 sendit(sock, "Client: name=%s address=%s FDport=%d MaxJobs=%u\n",
461 res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
462 res->res_client.MaxConcurrentJobs);
463 sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
464 edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
465 edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
466 res->res_client.AutoPrune);
467 if (res->res_client.catalog) {
468 sendit(sock, " --> ");
469 dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
474 sendit(sock, "Device: name=%s ok=%d num_writers=%d num_waiting=%d\n"
475 " use_cnt=%d open=%d append=%d read=%d labeled=%d offline=%d autochgr=%d\n"
476 " volname=%s MediaType=%s\n",
477 dev->hdr.name, dev->found, dev->num_writers, dev->num_waiting,
478 dev->use_count, dev->open, dev->append, dev->read, dev->labeled,
479 dev->offline, dev->autochanger,
480 dev->VolumeName, dev->MediaType);
483 sendit(sock, "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
484 " DeviceName=%s MediaType=%s StorageId=%s\n",
485 res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
486 res->res_store.MaxConcurrentJobs,
487 res->res_store.dev_name(),
488 res->res_store.media_type,
489 edit_int64(res->res_store.StorageId, ed1));
492 sendit(sock, "Catalog: name=%s address=%s DBport=%d db_name=%s\n"
493 " db_user=%s MutliDBConn=%d\n",
494 res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
495 res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user),
496 res->res_cat.mult_db_connections);
500 sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
501 type == R_JOB ? "Job" : "JobDefs",
502 res->res_job.hdr.name, res->res_job.JobType,
503 level_to_str(res->res_job.JobLevel), res->res_job.Priority,
504 res->res_job.MaxConcurrentJobs);
505 sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n",
506 res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
507 edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
508 res->res_job.spool_data, res->res_job.write_part_after_job);
509 if (res->res_job.client) {
510 sendit(sock, " --> ");
511 dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
513 if (res->res_job.fileset) {
514 sendit(sock, " --> ");
515 dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock);
517 if (res->res_job.schedule) {
518 sendit(sock, " --> ");
519 dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock);
521 if (res->res_job.RestoreWhere) {
522 sendit(sock, " --> Where=%s\n", NPRT(res->res_job.RestoreWhere));
524 if (res->res_job.RestoreBootstrap) {
525 sendit(sock, " --> Bootstrap=%s\n", NPRT(res->res_job.RestoreBootstrap));
527 if (res->res_job.RunBeforeJob) {
528 sendit(sock, " --> RunBefore=%s\n", NPRT(res->res_job.RunBeforeJob));
530 if (res->res_job.RunAfterJob) {
531 sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
533 if (res->res_job.RunAfterFailedJob) {
534 sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
536 if (res->res_job.WriteBootstrap) {
537 sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
539 if (res->res_job.storage) {
541 foreach_alist(store, res->res_job.storage) {
542 sendit(sock, " --> ");
543 dump_resource(-R_STORAGE, (RES *)store, sendit, sock);
546 if (res->res_job.pool) {
547 sendit(sock, " --> ");
548 dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
550 if (res->res_job.full_pool) {
551 sendit(sock, " --> ");
552 dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
554 if (res->res_job.inc_pool) {
555 sendit(sock, " --> ");
556 dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
558 if (res->res_job.dif_pool) {
559 sendit(sock, " --> ");
560 dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
562 if (res->res_job.verify_job) {
563 sendit(sock, " --> ");
564 dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
567 if (res->res_job.messages) {
568 sendit(sock, " --> ");
569 dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock);
575 sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
576 for (i=0; i<res->res_fs.num_includes; i++) {
577 INCEXE *incexe = res->res_fs.include_items[i];
578 for (j=0; j<incexe->num_opts; j++) {
579 FOPTS *fo = incexe->opts_list[j];
580 sendit(sock, " O %s\n", fo->opts);
581 for (k=0; k<fo->regex.size(); k++) {
582 sendit(sock, " R %s\n", fo->regex.get(k));
584 for (k=0; k<fo->regexdir.size(); k++) {
585 sendit(sock, " RD %s\n", fo->regexdir.get(k));
587 for (k=0; k<fo->regexfile.size(); k++) {
588 sendit(sock, " RF %s\n", fo->regexfile.get(k));
590 for (k=0; k<fo->wild.size(); k++) {
591 sendit(sock, " W %s\n", fo->wild.get(k));
593 for (k=0; k<fo->wilddir.size(); k++) {
594 sendit(sock, " WD %s\n", fo->wilddir.get(k));
596 for (k=0; k<fo->wildfile.size(); k++) {
597 sendit(sock, " WF %s\n", fo->wildfile.get(k));
599 for (k=0; k<fo->base.size(); k++) {
600 sendit(sock, " B %s\n", fo->base.get(k));
602 for (k=0; k<fo->fstype.size(); k++) {
603 sendit(sock, " X %s\n", fo->fstype.get(k));
606 sendit(sock, " D %s\n", fo->reader);
609 sendit(sock, " T %s\n", fo->writer);
611 sendit(sock, " N\n");
613 for (j=0; j<incexe->name_list.size(); j++) {
614 sendit(sock, " I %s\n", incexe->name_list.get(j));
616 if (incexe->name_list.size()) {
617 sendit(sock, " N\n");
621 for (i=0; i<res->res_fs.num_excludes; i++) {
622 INCEXE *incexe = res->res_fs.exclude_items[i];
623 for (j=0; j<incexe->name_list.size(); j++) {
624 sendit(sock, " E %s\n", incexe->name_list.get(j));
626 if (incexe->name_list.size()) {
627 sendit(sock, " N\n");
633 if (res->res_sch.run) {
635 RUN *run = res->res_sch.run;
636 char buf[1000], num[30];
637 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
642 sendit(sock, " --> Run Level=%s\n", level_to_str(run->level));
643 bstrncpy(buf, " hour=", sizeof(buf));
644 for (i=0; i<24; i++) {
645 if (bit_is_set(i, run->hour)) {
646 bsnprintf(num, sizeof(num), "%d ", i);
647 bstrncat(buf, num, sizeof(buf));
650 bstrncat(buf, "\n", sizeof(buf));
652 bstrncpy(buf, " mday=", sizeof(buf));
653 for (i=0; i<31; i++) {
654 if (bit_is_set(i, run->mday)) {
655 bsnprintf(num, sizeof(num), "%d ", i);
656 bstrncat(buf, num, sizeof(buf));
659 bstrncat(buf, "\n", sizeof(buf));
661 bstrncpy(buf, " month=", sizeof(buf));
662 for (i=0; i<12; i++) {
663 if (bit_is_set(i, run->month)) {
664 bsnprintf(num, sizeof(num), "%d ", i);
665 bstrncat(buf, num, sizeof(buf));
668 bstrncat(buf, "\n", sizeof(buf));
670 bstrncpy(buf, " wday=", sizeof(buf));
671 for (i=0; i<7; i++) {
672 if (bit_is_set(i, run->wday)) {
673 bsnprintf(num, sizeof(num), "%d ", i);
674 bstrncat(buf, num, sizeof(buf));
677 bstrncat(buf, "\n", sizeof(buf));
679 bstrncpy(buf, " wom=", sizeof(buf));
680 for (i=0; i<5; i++) {
681 if (bit_is_set(i, run->wom)) {
682 bsnprintf(num, sizeof(num), "%d ", i);
683 bstrncat(buf, num, sizeof(buf));
686 bstrncat(buf, "\n", sizeof(buf));
688 bstrncpy(buf, " woy=", sizeof(buf));
689 for (i=0; i<54; i++) {
690 if (bit_is_set(i, run->woy)) {
691 bsnprintf(num, sizeof(num), "%d ", i);
692 bstrncat(buf, num, sizeof(buf));
695 bstrncat(buf, "\n", sizeof(buf));
697 sendit(sock, " mins=%d\n", run->minute);
699 sendit(sock, " --> ");
700 dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
703 sendit(sock, " --> ");
704 dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
707 sendit(sock, " --> ");
708 dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock);
710 /* If another Run record is chained in, go print it */
716 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
720 sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
721 res->res_pool.pool_type);
722 sendit(sock, " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n",
723 res->res_pool.use_catalog, res->res_pool.use_volume_once,
724 res->res_pool.accept_any_volume, res->res_pool.catalog_files);
725 sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
726 res->res_pool.max_volumes, res->res_pool.AutoPrune,
727 edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
728 sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
729 edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
730 res->res_pool.Recycle,
731 NPRT(res->res_pool.label_format));
732 sendit(sock, " CleaningPrefix=%s LabelType=%d\n",
733 NPRT(res->res_pool.cleaning_prefix), res->res_pool.LabelType);
734 sendit(sock, " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n",
735 res->res_pool.recycle_oldest_volume,
736 res->res_pool.purge_oldest_volume,
737 res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
740 sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name);
741 if (res->res_msgs.mail_cmd)
742 sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd);
743 if (res->res_msgs.operator_cmd)
744 sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd);
747 sendit(sock, "Unknown resource type %d in dump_resource.\n", type);
750 if (recurse && res->res_dir.hdr.next) {
751 dump_resource(type, res->res_dir.hdr.next, sendit, sock);
756 * Free all the members of an INCEXE structure
758 static void free_incexe(INCEXE *incexe)
760 incexe->name_list.destroy();
761 for (int i=0; i<incexe->num_opts; i++) {
762 FOPTS *fopt = incexe->opts_list[i];
763 fopt->regex.destroy();
764 fopt->regexdir.destroy();
765 fopt->regexfile.destroy();
766 fopt->wild.destroy();
767 fopt->wilddir.destroy();
768 fopt->wildfile.destroy();
769 fopt->base.destroy();
770 fopt->fstype.destroy();
779 if (incexe->opts_list) {
780 free(incexe->opts_list);
786 * Free memory of resource -- called when daemon terminates.
787 * NB, we don't need to worry about freeing any references
788 * to other resources as they will be freed when that
789 * resource chain is traversed. Mainly we worry about freeing
790 * allocated strings (names).
792 void free_resource(RES *sres, int type)
795 RES *nres; /* next resource if linked */
796 URES *res = (URES *)sres;
801 /* common stuff -- free the resource name and description */
802 nres = (RES *)res->res_dir.hdr.next;
803 if (res->res_dir.hdr.name) {
804 free(res->res_dir.hdr.name);
806 if (res->res_dir.hdr.desc) {
807 free(res->res_dir.hdr.desc);
812 if (res->res_dir.working_directory) {
813 free(res->res_dir.working_directory);
815 if (res->res_dir.scripts_directory) {
816 free((char *)res->res_dir.scripts_directory);
818 if (res->res_dir.pid_directory) {
819 free(res->res_dir.pid_directory);
821 if (res->res_dir.subsys_directory) {
822 free(res->res_dir.subsys_directory);
824 if (res->res_dir.password) {
825 free(res->res_dir.password);
827 if (res->res_dir.query_file) {
828 free(res->res_dir.query_file);
830 if (res->res_dir.DIRaddrs) {
831 free_addresses(res->res_dir.DIRaddrs);
838 if (res->res_con.password) {
839 free(res->res_con.password);
841 for (int i=0; i<Num_ACL; i++) {
842 if (res->res_con.ACL_lists[i]) {
843 delete res->res_con.ACL_lists[i];
844 res->res_con.ACL_lists[i] = NULL;
849 if (res->res_client.address) {
850 free(res->res_client.address);
852 if (res->res_client.password) {
853 free(res->res_client.password);
857 if (res->res_store.address) {
858 free(res->res_store.address);
860 if (res->res_store.password) {
861 free(res->res_store.password);
863 if (res->res_store.media_type) {
864 free(res->res_store.media_type);
866 if (res->res_store.device) {
867 delete res->res_store.device;
871 if (res->res_cat.db_address) {
872 free(res->res_cat.db_address);
874 if (res->res_cat.db_socket) {
875 free(res->res_cat.db_socket);
877 if (res->res_cat.db_user) {
878 free(res->res_cat.db_user);
880 if (res->res_cat.db_name) {
881 free(res->res_cat.db_name);
883 if (res->res_cat.db_password) {
884 free(res->res_cat.db_password);
888 if ((num=res->res_fs.num_includes)) {
890 free_incexe(res->res_fs.include_items[num]);
892 free(res->res_fs.include_items);
894 res->res_fs.num_includes = 0;
895 if ((num=res->res_fs.num_excludes)) {
897 free_incexe(res->res_fs.exclude_items[num]);
899 free(res->res_fs.exclude_items);
901 res->res_fs.num_excludes = 0;
904 if (res->res_pool.pool_type) {
905 free(res->res_pool.pool_type);
907 if (res->res_pool.label_format) {
908 free(res->res_pool.label_format);
910 if (res->res_pool.cleaning_prefix) {
911 free(res->res_pool.cleaning_prefix);
915 if (res->res_sch.run) {
917 nrun = res->res_sch.run;
927 if (res->res_job.RestoreWhere) {
928 free(res->res_job.RestoreWhere);
930 if (res->res_job.RestoreBootstrap) {
931 free(res->res_job.RestoreBootstrap);
933 if (res->res_job.WriteBootstrap) {
934 free(res->res_job.WriteBootstrap);
936 if (res->res_job.RunBeforeJob) {
937 free(res->res_job.RunBeforeJob);
939 if (res->res_job.RunAfterJob) {
940 free(res->res_job.RunAfterJob);
942 if (res->res_job.RunAfterFailedJob) {
943 free(res->res_job.RunAfterFailedJob);
945 if (res->res_job.ClientRunBeforeJob) {
946 free(res->res_job.ClientRunBeforeJob);
948 if (res->res_job.ClientRunAfterJob) {
949 free(res->res_job.ClientRunAfterJob);
951 if (res->res_job.storage) {
952 delete res->res_job.storage;
956 if (res->res_msgs.mail_cmd) {
957 free(res->res_msgs.mail_cmd);
959 if (res->res_msgs.operator_cmd) {
960 free(res->res_msgs.operator_cmd);
962 free_msgs_res((MSGS *)res); /* free message resource */
966 printf("Unknown resource type %d in free_resource.\n", type);
968 /* Common stuff again -- free the resource, recurse to next one */
973 free_resource(nres, type);
978 * Save the new resource by chaining it into the head list for
979 * the resource. If this is pass 2, we update any resource
980 * pointers because they may not have been defined until
983 void save_resource(int type, RES_ITEM *items, int pass)
986 int rindex = type - r_first;
990 /* Check Job requirements after applying JobDefs */
991 if (type != R_JOB && type != R_JOBDEFS) {
993 * Ensure that all required items are present
995 for (i=0; items[i].name; i++) {
996 if (items[i].flags & ITEM_REQUIRED) {
997 if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
998 Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
999 items[i].name, resources[rindex]);
1002 /* If this triggers, take a look at lib/parse_conf.h */
1003 if (i >= MAX_RES_ITEMS) {
1004 Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
1010 * During pass 2 in each "store" routine, we looked up pointers
1011 * to all the resources referrenced in the current resource, now we
1012 * must copy their addresses from the static record to the allocated
1017 /* Resources not containing a resource */
1026 /* Resources containing another resource or alist */
1028 if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
1029 Emsg1(M_ERROR_TERM, 0, "Cannot find Director resource %s\n", res_all.res_dir.hdr.name);
1031 res->res_dir.messages = res_all.res_dir.messages;
1034 if ((res = (URES *)GetResWithName(type, res_all.res_store.hdr.name)) == NULL) {
1035 Emsg1(M_ERROR_TERM, 0, "Cannot find Storage resource %s\n",
1036 res_all.res_dir.hdr.name);
1038 /* we must explicitly copy the device alist pointer */
1039 res->res_store.device = res_all.res_store.device;
1043 if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
1044 Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
1045 res_all.res_dir.hdr.name);
1047 res->res_job.messages = res_all.res_job.messages;
1048 res->res_job.schedule = res_all.res_job.schedule;
1049 res->res_job.client = res_all.res_job.client;
1050 res->res_job.fileset = res_all.res_job.fileset;
1051 res->res_job.storage = res_all.res_job.storage;
1052 res->res_job.pool = res_all.res_job.pool;
1053 res->res_job.full_pool = res_all.res_job.full_pool;
1054 res->res_job.inc_pool = res_all.res_job.inc_pool;
1055 res->res_job.dif_pool = res_all.res_job.dif_pool;
1056 res->res_job.verify_job = res_all.res_job.verify_job;
1057 res->res_job.jobdefs = res_all.res_job.jobdefs;
1060 if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
1061 Emsg1(M_ERROR_TERM, 0, "Cannot find Counter resource %s\n", res_all.res_counter.hdr.name);
1063 res->res_counter.Catalog = res_all.res_counter.Catalog;
1064 res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
1068 if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
1069 Emsg1(M_ERROR_TERM, 0, "Cannot find Client resource %s\n", res_all.res_client.hdr.name);
1071 res->res_client.catalog = res_all.res_client.catalog;
1075 * Schedule is a bit different in that it contains a RUN record
1076 * chain which isn't a "named" resource. This chain was linked
1077 * in by run_conf.c during pass 2, so here we jam the pointer
1078 * into the Schedule resource.
1080 if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
1081 Emsg1(M_ERROR_TERM, 0, "Cannot find Schedule resource %s\n", res_all.res_client.hdr.name);
1083 res->res_sch.run = res_all.res_sch.run;
1086 Emsg1(M_ERROR, 0, "Unknown resource type %d in save_resource.\n", type);
1090 /* Note, the resource name was already saved during pass 1,
1091 * so here, we can just release it.
1093 if (res_all.res_dir.hdr.name) {
1094 free(res_all.res_dir.hdr.name);
1095 res_all.res_dir.hdr.name = NULL;
1097 if (res_all.res_dir.hdr.desc) {
1098 free(res_all.res_dir.hdr.desc);
1099 res_all.res_dir.hdr.desc = NULL;
1105 * The following code is only executed during pass 1
1109 size = sizeof(DIRRES);
1112 size = sizeof(CONRES);
1115 size =sizeof(CLIENT);
1118 size = sizeof(STORE);
1128 size = sizeof(FILESET);
1131 size = sizeof(SCHED);
1134 size = sizeof(POOL);
1137 size = sizeof(MSGS);
1140 size = sizeof(COUNTER);
1146 printf("Unknown resource type %d in save_resrouce.\n", type);
1152 res = (URES *)malloc(size);
1153 memcpy(res, &res_all, size);
1154 if (!res_head[rindex]) {
1155 res_head[rindex] = (RES *)res; /* store first entry */
1156 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
1157 res->res_dir.hdr.name, rindex);
1160 /* Add new res to end of chain */
1161 for (next=res_head[rindex]; next->next; next=next->next) {
1162 if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
1163 Emsg2(M_ERROR_TERM, 0,
1164 _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
1165 resources[rindex].name, res->res_dir.hdr.name);
1168 next->next = (RES *)res;
1169 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
1170 res->res_dir.hdr.name, rindex, pass);
1176 * Store Device. Note, the resource is created upon the
1177 * first reference. The details of the resource are obtained
1178 * later from the SD.
1180 static void store_device(LEX *lc, RES_ITEM *item, int index, int pass)
1184 int rindex = R_DEVICE - r_first;
1185 int size = sizeof(DEVICE);
1189 token = lex_get_token(lc, T_NAME);
1190 if (!res_head[rindex]) {
1191 res = (URES *)malloc(size);
1192 memset(res, 0, size);
1193 res->res_dev.hdr.name = bstrdup(lc->str);
1194 res_head[rindex] = (RES *)res; /* store first entry */
1195 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(R_DEVICE),
1196 res->res_dir.hdr.name, rindex);
1199 /* See if it is already defined */
1200 for (next=res_head[rindex]; next->next; next=next->next) {
1201 if (strcmp(next->name, lc->str) == 0) {
1207 res = (URES *)malloc(size);
1208 memset(res, 0, size);
1209 res->res_dev.hdr.name = bstrdup(lc->str);
1210 next->next = (RES *)res;
1211 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(R_DEVICE),
1212 res->res_dir.hdr.name, rindex, pass);
1217 set_bit(index, res_all.hdr.item_present);
1219 store_alist_res(lc, item, index, pass);
1225 * Store JobType (backup, verify, restore)
1228 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
1232 token = lex_get_token(lc, T_NAME);
1233 /* Store the type both pass 1 and pass 2 */
1234 for (i=0; jobtypes[i].type_name; i++) {
1235 if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
1236 *(int *)(item->value) = jobtypes[i].job_type;
1242 scan_err1(lc, "Expected a Job Type keyword, got: %s", lc->str);
1245 set_bit(index, res_all.hdr.item_present);
1249 * Store Job Level (Full, Incremental, ...)
1252 void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
1256 token = lex_get_token(lc, T_NAME);
1257 /* Store the level pass 2 so that type is defined */
1258 for (i=0; joblevels[i].level_name; i++) {
1259 if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
1260 *(int *)(item->value) = joblevels[i].level;
1266 scan_err1(lc, "Expected a Job Level keyword, got: %s", lc->str);
1269 set_bit(index, res_all.hdr.item_present);
1273 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
1276 token = lex_get_token(lc, T_NAME);
1277 /* Scan Replacement options */
1278 for (i=0; ReplaceOptions[i].name; i++) {
1279 if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
1280 *(int *)(item->value) = ReplaceOptions[i].token;
1286 scan_err1(lc, "Expected a Restore replacement option, got: %s", lc->str);
1289 set_bit(index, res_all.hdr.item_present);
1293 * Store ACL (access control list)
1296 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
1301 token = lex_get_token(lc, T_NAME);
1303 if (((alist **)item->value)[item->code] == NULL) {
1304 ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
1305 Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
1307 ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
1308 Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
1310 token = lex_get_token(lc, T_ALL);
1311 if (token == T_COMMA) {
1312 continue; /* get another ACL */
1316 set_bit(index, res_all.hdr.item_present);