2 * Main configuration file parser for Bacula Directors,
3 * some parts may be split into separate files such as
4 * the schedule configuration (run_config.c).
6 * Note, the configuration file parser consists of three parts
8 * 1. The generic lexical scanner in lib/lex.c and lib/lex.h
10 * 2. The generic config scanner in lib/parse_config.c and
12 * These files contain the parser code, some utility
13 * routines, and the common store routines (name, int,
16 * 3. The daemon specific file, which contains the Resource
17 * definitions as well as any specific store routines
18 * for the resource records.
20 * Kern Sibbald, January MM
25 Copyright (C) 2000-2005 Kern Sibbald
27 This program is free software; you can redistribute it and/or
28 modify it under the terms of the GNU General Public License as
29 published by the Free Software Foundation; either version 2 of
30 the License, or (at your option) any later version.
32 This program is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
35 General Public License for more details.
37 You should have received a copy of the GNU General Public
38 License along with this program; if not, write to the Free
39 Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
47 /* Define the first and last resource ID record
48 * types. Note, these should be unique for each
49 * daemon though not a requirement.
51 int r_first = R_FIRST;
53 static RES *sres_head[R_LAST - R_FIRST + 1];
54 RES **res_head = sres_head;
56 /* Imported subroutines */
57 extern void store_run(LEX *lc, RES_ITEM *item, int index, int pass);
58 extern void store_finc(LEX *lc, RES_ITEM *item, int index, int pass);
59 extern void store_inc(LEX *lc, RES_ITEM *item, int index, int pass);
62 /* Forward referenced subroutines */
64 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass);
65 void store_level(LEX *lc, RES_ITEM *item, int index, int pass);
66 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass);
67 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass);
70 /* We build the current resource here as we are
71 * scanning the resource configuration definition,
72 * then move it to allocated memory when the resource
76 int res_all_size = sizeof(res_all);
79 /* Definition of records permitted within each
80 * resource with the routine to process the record
81 * information. NOTE! quoted names must be in lower case.
86 * name handler value code flags default_value
88 static RES_ITEM dir_items[] = {
89 {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0},
90 {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0},
91 {"messages", store_res, ITEM(res_dir.messages), R_MSGS, 0, 0},
92 {"dirport", store_addresses_port, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
93 {"diraddress", store_addresses_address, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
94 {"diraddresses",store_addresses, ITEM(res_dir.DIRaddrs), 0, ITEM_DEFAULT, 9101},
95 {"queryfile", store_dir, ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
96 {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
97 {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0},
98 {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0},
99 {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0},
100 {"requiressl", store_yesno, ITEM(res_dir.require_ssl), 1, ITEM_DEFAULT, 0},
101 {"enablessl", store_yesno, ITEM(res_dir.enable_ssl), 1, ITEM_DEFAULT, 0},
102 {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
103 {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0},
104 {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
105 {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30},
106 {NULL, NULL, NULL, 0, 0, 0}
112 * name handler value code flags default_value
114 static RES_ITEM con_items[] = {
115 {"name", store_name, ITEM(res_con.hdr.name), 0, ITEM_REQUIRED, 0},
116 {"description", store_str, ITEM(res_con.hdr.desc), 0, 0, 0},
117 {"enablessl", store_yesno, ITEM(res_con.enable_ssl), 1, ITEM_DEFAULT, 0},
118 {"password", store_password, ITEM(res_con.password), 0, ITEM_REQUIRED, 0},
119 {"jobacl", store_acl, ITEM(res_con.ACL_lists), Job_ACL, 0, 0},
120 {"clientacl", store_acl, ITEM(res_con.ACL_lists), Client_ACL, 0, 0},
121 {"storageacl", store_acl, ITEM(res_con.ACL_lists), Storage_ACL, 0, 0},
122 {"scheduleacl", store_acl, ITEM(res_con.ACL_lists), Schedule_ACL, 0, 0},
123 {"runacl", store_acl, ITEM(res_con.ACL_lists), Run_ACL, 0, 0},
124 {"poolacl", store_acl, ITEM(res_con.ACL_lists), Pool_ACL, 0, 0},
125 {"commandacl", store_acl, ITEM(res_con.ACL_lists), Command_ACL, 0, 0},
126 {"filesetacl", store_acl, ITEM(res_con.ACL_lists), FileSet_ACL, 0, 0},
127 {"catalogacl", store_acl, ITEM(res_con.ACL_lists), Catalog_ACL, 0, 0},
128 {NULL, NULL, NULL, 0, 0, 0}
133 * Client or File daemon resource
135 * name handler value code flags default_value
138 static RES_ITEM cli_items[] = {
139 {"name", store_name, ITEM(res_client.hdr.name), 0, ITEM_REQUIRED, 0},
140 {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0},
141 {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0},
142 {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0},
143 {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102},
144 {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0},
145 {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0},
146 {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0},
147 {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60},
148 {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180},
149 {"autoprune", store_yesno, ITEM(res_client.AutoPrune), 1, ITEM_DEFAULT, 1},
150 {"enablessl", store_yesno, ITEM(res_client.enable_ssl), 1, ITEM_DEFAULT, 0},
151 {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
152 {NULL, NULL, NULL, 0, 0, 0}
155 /* Storage daemon resource
157 * name handler value code flags default_value
159 static RES_ITEM store_items[] = {
160 {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0},
161 {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0},
162 {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103},
163 {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0},
164 {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0},
165 {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0},
166 {"sdpassword", store_password, ITEM(res_store.password), 0, 0, 0},
167 {"device", store_strname, ITEM(res_store.dev_name), 0, ITEM_REQUIRED, 0},
168 {"sddevicename", store_strname, ITEM(res_store.dev_name), 0, 0, 0},
169 {"mediatype", store_strname, ITEM(res_store.media_type), 0, ITEM_REQUIRED, 0},
170 {"autochanger", store_yesno, ITEM(res_store.autochanger), 1, ITEM_DEFAULT, 0},
171 {"enablessl", store_yesno, ITEM(res_store.enable_ssl), 1, ITEM_DEFAULT, 0},
172 {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
173 {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */
174 {NULL, NULL, NULL, 0, 0, 0}
178 * Catalog Resource Directives
180 * name handler value code flags default_value
182 static RES_ITEM cat_items[] = {
183 {"name", store_name, ITEM(res_cat.hdr.name), 0, ITEM_REQUIRED, 0},
184 {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0},
185 {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0},
186 {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0},
187 {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0},
188 /* keep this password as store_str for the moment */
189 {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0},
190 {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0},
191 {"user", store_str, ITEM(res_cat.db_user), 0, 0, 0},
192 {"dbname", store_str, ITEM(res_cat.db_name), 0, ITEM_REQUIRED, 0},
193 {"dbsocket", store_str, ITEM(res_cat.db_socket), 0, 0, 0},
194 {"multipleconnections", store_yesno, ITEM(res_cat.mult_db_connections), 1, 0, 0},
195 {NULL, NULL, NULL, 0, 0, 0}
199 * Job Resource Directives
201 * name handler value code flags default_value
203 RES_ITEM job_items[] = {
204 {"name", store_name, ITEM(res_job.hdr.name), 0, ITEM_REQUIRED, 0},
205 {"description", store_str, ITEM(res_job.hdr.desc), 0, 0, 0},
206 {"type", store_jobtype, ITEM(res_job.JobType), 0, ITEM_REQUIRED, 0},
207 {"level", store_level, ITEM(res_job.JobLevel), 0, 0, 0},
208 {"messages", store_res, ITEM(res_job.messages), R_MSGS, ITEM_REQUIRED, 0},
209 {"storage", store_alist_res, ITEM(res_job.storage), R_STORAGE, ITEM_REQUIRED, MAX_STORE},
210 {"pool", store_res, ITEM(res_job.pool), R_POOL, ITEM_REQUIRED, 0},
211 {"fullbackuppool", store_res, ITEM(res_job.full_pool), R_POOL, 0, 0},
212 {"incrementalbackuppool", store_res, ITEM(res_job.inc_pool), R_POOL, 0, 0},
213 {"differentialbackuppool", store_res, ITEM(res_job.dif_pool), R_POOL, 0, 0},
214 {"client", store_res, ITEM(res_job.client), R_CLIENT, ITEM_REQUIRED, 0},
215 {"fileset", store_res, ITEM(res_job.fileset), R_FILESET, ITEM_REQUIRED, 0},
216 {"schedule", store_res, ITEM(res_job.schedule), R_SCHEDULE, 0, 0},
217 {"verifyjob", store_res, ITEM(res_job.verify_job), R_JOB, 0, 0},
218 {"jobdefs", store_res, ITEM(res_job.jobdefs), R_JOBDEFS, 0, 0},
219 {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0},
220 {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0},
221 {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0},
222 {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS},
223 {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0},
224 {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0},
225 {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0},
226 {"jobretention", store_time, ITEM(res_job.JobRetention), 0, 0, 0},
227 {"prefixlinks", store_yesno, ITEM(res_job.PrefixLinks), 1, ITEM_DEFAULT, 0},
228 {"prunejobs", store_yesno, ITEM(res_job.PruneJobs), 1, ITEM_DEFAULT, 0},
229 {"prunefiles", store_yesno, ITEM(res_job.PruneFiles), 1, ITEM_DEFAULT, 0},
230 {"prunevolumes",store_yesno, ITEM(res_job.PruneVolumes), 1, ITEM_DEFAULT, 0},
231 {"spoolattributes",store_yesno, ITEM(res_job.SpoolAttributes), 1, ITEM_DEFAULT, 0},
232 {"spooldata", store_yesno, ITEM(res_job.spool_data), 1, ITEM_DEFAULT, 0},
233 {"rerunfailedlevels", store_yesno, ITEM(res_job.rerun_failed_levels), 1, ITEM_DEFAULT, 0},
234 {"runbeforejob", store_str, ITEM(res_job.RunBeforeJob), 0, 0, 0},
235 {"runafterjob", store_str, ITEM(res_job.RunAfterJob), 0, 0, 0},
236 {"runafterfailedjob", store_str, ITEM(res_job.RunAfterFailedJob), 0, 0, 0},
237 {"clientrunbeforejob", store_str, ITEM(res_job.ClientRunBeforeJob), 0, 0, 0},
238 {"clientrunafterjob", store_str, ITEM(res_job.ClientRunAfterJob), 0, 0, 0},
239 {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1},
240 {"rescheduleonerror", store_yesno, ITEM(res_job.RescheduleOnError), 1, ITEM_DEFAULT, 0},
241 {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30},
242 {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0},
243 {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10},
244 {"writepartafterjob", store_yesno, ITEM(res_job.write_part_after_job), 1, ITEM_DEFAULT, 0},
245 {NULL, NULL, NULL, 0, 0, 0}
250 * name handler value code flags default_value
252 static RES_ITEM fs_items[] = {
253 {"name", store_name, ITEM(res_fs.hdr.name), 0, ITEM_REQUIRED, 0},
254 {"description", store_str, ITEM(res_fs.hdr.desc), 0, 0, 0},
255 {"include", store_inc, NULL, 0, ITEM_NO_EQUALS, 0},
256 {"exclude", store_inc, NULL, 1, ITEM_NO_EQUALS, 0},
257 {"ignorefilesetchanges", store_yesno, ITEM(res_fs.ignore_fs_changes), 1, ITEM_DEFAULT, 0},
258 {NULL, NULL, NULL, 0, 0, 0}
261 /* Schedule -- see run_conf.c */
264 * name handler value code flags default_value
266 static RES_ITEM sch_items[] = {
267 {"name", store_name, ITEM(res_sch.hdr.name), 0, ITEM_REQUIRED, 0},
268 {"description", store_str, ITEM(res_sch.hdr.desc), 0, 0, 0},
269 {"run", store_run, ITEM(res_sch.run), 0, 0, 0},
270 {NULL, NULL, NULL, 0, 0, 0}
275 * name handler value code flags default_value
277 static RES_ITEM pool_items[] = {
278 {"name", store_name, ITEM(res_pool.hdr.name), 0, ITEM_REQUIRED, 0},
279 {"description", store_str, ITEM(res_pool.hdr.desc), 0, 0, 0},
280 {"pooltype", store_strname, ITEM(res_pool.pool_type), 0, ITEM_REQUIRED, 0},
281 {"labelformat", store_strname, ITEM(res_pool.label_format), 0, 0, 0},
282 {"cleaningprefix", store_strname, ITEM(res_pool.cleaning_prefix), 0, 0, 0},
283 {"usecatalog", store_yesno, ITEM(res_pool.use_catalog), 1, ITEM_DEFAULT, 1},
284 {"usevolumeonce", store_yesno, ITEM(res_pool.use_volume_once), 1, 0, 0},
285 {"purgeoldestvolume", store_yesno, ITEM(res_pool.purge_oldest_volume), 1, 0, 0},
286 {"recycleoldestvolume", store_yesno, ITEM(res_pool.recycle_oldest_volume), 1, 0, 0},
287 {"recyclecurrentvolume", store_yesno, ITEM(res_pool.recycle_current_volume), 1, 0, 0},
288 {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0},
289 {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0},
290 {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0},
291 {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0},
292 {"acceptanyvolume", store_yesno, ITEM(res_pool.accept_any_volume), 1, ITEM_DEFAULT, 1},
293 {"catalogfiles", store_yesno, ITEM(res_pool.catalog_files), 1, ITEM_DEFAULT, 1},
294 {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365},
295 {"volumeuseduration", store_time, ITEM(res_pool.VolUseDuration),0, 0, 0},
296 {"autoprune", store_yesno, ITEM(res_pool.AutoPrune), 1, ITEM_DEFAULT, 1},
297 {"recycle", store_yesno, ITEM(res_pool.Recycle), 1, ITEM_DEFAULT, 1},
298 {NULL, NULL, NULL, 0, 0, 0}
303 * name handler value code flags default_value
305 static RES_ITEM counter_items[] = {
306 {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0},
307 {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0},
308 {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0},
309 {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX},
310 {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0},
311 {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0},
312 {NULL, NULL, NULL, 0, 0, 0}
316 /* Message resource */
317 extern RES_ITEM msgs_items[];
320 * This is the master resource definition.
321 * It must have one item for each of the resources.
323 * NOTE!!! keep it in the same order as the R_codes
324 * or eliminate all resources[rindex].name
326 * name items rcode res_head
328 RES_TABLE resources[] = {
329 {"director", dir_items, R_DIRECTOR},
330 {"client", cli_items, R_CLIENT},
331 {"job", job_items, R_JOB},
332 {"storage", store_items, R_STORAGE},
333 {"catalog", cat_items, R_CATALOG},
334 {"schedule", sch_items, R_SCHEDULE},
335 {"fileset", fs_items, R_FILESET},
336 {"pool", pool_items, R_POOL},
337 {"messages", msgs_items, R_MSGS},
338 {"counter", counter_items, R_COUNTER},
339 {"console", con_items, R_CONSOLE},
340 {"jobdefs", job_items, R_JOBDEFS},
345 /* Keywords (RHS) permitted in Job Level records
347 * level_name level job_type
349 struct s_jl joblevels[] = {
350 {"Full", L_FULL, JT_BACKUP},
351 {"Base", L_BASE, JT_BACKUP},
352 {"Incremental", L_INCREMENTAL, JT_BACKUP},
353 {"Differential", L_DIFFERENTIAL, JT_BACKUP},
354 {"Since", L_SINCE, JT_BACKUP},
355 {"Catalog", L_VERIFY_CATALOG, JT_VERIFY},
356 {"InitCatalog", L_VERIFY_INIT, JT_VERIFY},
357 {"VolumeToCatalog", L_VERIFY_VOLUME_TO_CATALOG, JT_VERIFY},
358 {"DiskToCatalog", L_VERIFY_DISK_TO_CATALOG, JT_VERIFY},
359 {"Data", L_VERIFY_DATA, JT_VERIFY},
360 {" ", L_NONE, JT_ADMIN},
361 {" ", L_NONE, JT_RESTORE},
365 /* Keywords (RHS) permitted in Job type records
369 struct s_jt jobtypes[] = {
370 {"backup", JT_BACKUP},
372 {"verify", JT_VERIFY},
373 {"restore", JT_RESTORE},
377 #ifdef old_deprecated_code
379 /* Keywords (RHS) permitted in Backup and Verify records */
380 static struct s_kw BakVerFields[] = {
387 /* Keywords (RHS) permitted in Restore records */
388 static struct s_kw RestoreFields[] = {
391 {"jobid", 'J'}, /* JobId to restore */
392 {"where", 'W'}, /* root of restore */
393 {"replace", 'R'}, /* replacement options */
394 {"bootstrap", 'B'}, /* bootstrap file */
399 /* Options permitted in Restore replace= */
400 struct s_kw ReplaceOptions[] = {
401 {"always", REPLACE_ALWAYS},
402 {"ifnewer", REPLACE_IFNEWER},
403 {"ifolder", REPLACE_IFOLDER},
404 {"never", REPLACE_NEVER},
408 const char *level_to_str(int level)
411 static char level_no[30];
412 const char *str = level_no;
414 bsnprintf(level_no, sizeof(level_no), "%d", level); /* default if not found */
415 for (i=0; joblevels[i].level_name; i++) {
416 if (level == joblevels[i].level) {
417 str = joblevels[i].level_name;
424 /* Dump contents of resource */
425 void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fmt, ...), void *sock)
427 URES *res = (URES *)reshdr;
429 char ed1[100], ed2[100];
432 sendit(sock, "No %s resource defined\n", res_to_str(type));
435 if (type < 0) { /* no recursion */
441 sendit(sock, "Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n",
442 reshdr->name, res->res_dir.MaxConcurrentJobs,
443 edit_uint64(res->res_dir.FDConnectTimeout, ed1),
444 edit_uint64(res->res_dir.SDConnectTimeout, ed2));
445 if (res->res_dir.query_file) {
446 sendit(sock, " query_file=%s\n", res->res_dir.query_file);
448 if (res->res_dir.messages) {
449 sendit(sock, " --> ");
450 dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock);
454 sendit(sock, "Console: name=%s SSL=%d\n",
455 res->res_con.hdr.name, res->res_con.enable_ssl);
458 if (res->res_counter.WrapCounter) {
459 sendit(sock, "Counter: name=%s min=%d max=%d cur=%d wrapcntr=%s\n",
460 res->res_counter.hdr.name, res->res_counter.MinValue,
461 res->res_counter.MaxValue, res->res_counter.CurrentValue,
462 res->res_counter.WrapCounter->hdr.name);
464 sendit(sock, "Counter: name=%s min=%d max=%d\n",
465 res->res_counter.hdr.name, res->res_counter.MinValue,
466 res->res_counter.MaxValue);
468 if (res->res_counter.Catalog) {
469 sendit(sock, " --> ");
470 dump_resource(-R_CATALOG, (RES *)res->res_counter.Catalog, sendit, sock);
475 sendit(sock, "Client: name=%s address=%s FDport=%d MaxJobs=%u\n",
476 res->res_client.hdr.name, res->res_client.address, res->res_client.FDport,
477 res->res_client.MaxConcurrentJobs);
478 sendit(sock, " JobRetention=%s FileRetention=%s AutoPrune=%d\n",
479 edit_utime(res->res_client.JobRetention, ed1, sizeof(ed1)),
480 edit_utime(res->res_client.FileRetention, ed2, sizeof(ed2)),
481 res->res_client.AutoPrune);
482 if (res->res_client.catalog) {
483 sendit(sock, " --> ");
484 dump_resource(-R_CATALOG, (RES *)res->res_client.catalog, sendit, sock);
488 sendit(sock, "Storage: name=%s address=%s SDport=%d MaxJobs=%u\n"
489 " DeviceName=%s MediaType=%s\n",
490 res->res_store.hdr.name, res->res_store.address, res->res_store.SDport,
491 res->res_store.MaxConcurrentJobs,
492 res->res_store.dev_name, res->res_store.media_type);
495 sendit(sock, "Catalog: name=%s address=%s DBport=%d db_name=%s\n"
496 " db_user=%s MutliDBConn=%d\n",
497 res->res_cat.hdr.name, NPRT(res->res_cat.db_address),
498 res->res_cat.db_port, res->res_cat.db_name, NPRT(res->res_cat.db_user),
499 res->res_cat.mult_db_connections);
503 sendit(sock, "%s: name=%s JobType=%d level=%s Priority=%d MaxJobs=%u\n",
504 type == R_JOB ? "Job" : "JobDefs",
505 res->res_job.hdr.name, res->res_job.JobType,
506 level_to_str(res->res_job.JobLevel), res->res_job.Priority,
507 res->res_job.MaxConcurrentJobs);
508 sendit(sock, " Resched=%d Times=%d Interval=%s Spool=%d WritePartAfterJob=%d\n",
509 res->res_job.RescheduleOnError, res->res_job.RescheduleTimes,
510 edit_uint64_with_commas(res->res_job.RescheduleInterval, ed1),
511 res->res_job.spool_data, res->res_job.write_part_after_job);
512 if (res->res_job.client) {
513 sendit(sock, " --> ");
514 dump_resource(-R_CLIENT, (RES *)res->res_job.client, sendit, sock);
516 if (res->res_job.fileset) {
517 sendit(sock, " --> ");
518 dump_resource(-R_FILESET, (RES *)res->res_job.fileset, sendit, sock);
520 if (res->res_job.schedule) {
521 sendit(sock, " --> ");
522 dump_resource(-R_SCHEDULE, (RES *)res->res_job.schedule, sendit, sock);
524 if (res->res_job.RestoreWhere) {
525 sendit(sock, " --> Where=%s\n", NPRT(res->res_job.RestoreWhere));
527 if (res->res_job.RestoreBootstrap) {
528 sendit(sock, " --> Bootstrap=%s\n", NPRT(res->res_job.RestoreBootstrap));
530 if (res->res_job.RunBeforeJob) {
531 sendit(sock, " --> RunBefore=%s\n", NPRT(res->res_job.RunBeforeJob));
533 if (res->res_job.RunAfterJob) {
534 sendit(sock, " --> RunAfter=%s\n", NPRT(res->res_job.RunAfterJob));
536 if (res->res_job.RunAfterFailedJob) {
537 sendit(sock, " --> RunAfterFailed=%s\n", NPRT(res->res_job.RunAfterFailedJob));
539 if (res->res_job.WriteBootstrap) {
540 sendit(sock, " --> WriteBootstrap=%s\n", NPRT(res->res_job.WriteBootstrap));
542 if (res->res_job.storage[0]) {
543 sendit(sock, " --> ");
545 // dump_resource(-R_STORAGE, (RES *)res->res_job.storage, sendit, sock);
547 if (res->res_job.pool) {
548 sendit(sock, " --> ");
549 dump_resource(-R_POOL, (RES *)res->res_job.pool, sendit, sock);
551 if (res->res_job.full_pool) {
552 sendit(sock, " --> ");
553 dump_resource(-R_POOL, (RES *)res->res_job.full_pool, sendit, sock);
555 if (res->res_job.inc_pool) {
556 sendit(sock, " --> ");
557 dump_resource(-R_POOL, (RES *)res->res_job.inc_pool, sendit, sock);
559 if (res->res_job.dif_pool) {
560 sendit(sock, " --> ");
561 dump_resource(-R_POOL, (RES *)res->res_job.dif_pool, sendit, sock);
563 if (res->res_job.verify_job) {
564 sendit(sock, " --> ");
565 dump_resource(-type, (RES *)res->res_job.verify_job, sendit, sock);
568 if (res->res_job.messages) {
569 sendit(sock, " --> ");
570 dump_resource(-R_MSGS, (RES *)res->res_job.messages, sendit, sock);
576 sendit(sock, "FileSet: name=%s\n", res->res_fs.hdr.name);
577 for (i=0; i<res->res_fs.num_includes; i++) {
578 INCEXE *incexe = res->res_fs.include_items[i];
579 for (j=0; j<incexe->num_opts; j++) {
580 FOPTS *fo = incexe->opts_list[j];
581 sendit(sock, " O %s\n", fo->opts);
582 for (k=0; k<fo->regex.size(); k++) {
583 sendit(sock, " R %s\n", fo->regex.get(k));
585 for (k=0; k<fo->regexdir.size(); k++) {
586 sendit(sock, " RD %s\n", fo->regexdir.get(k));
588 for (k=0; k<fo->regexfile.size(); k++) {
589 sendit(sock, " RF %s\n", fo->regexfile.get(k));
591 for (k=0; k<fo->wild.size(); k++) {
592 sendit(sock, " W %s\n", fo->wild.get(k));
594 for (k=0; k<fo->wilddir.size(); k++) {
595 sendit(sock, " WD %s\n", fo->wilddir.get(k));
597 for (k=0; k<fo->wildfile.size(); k++) {
598 sendit(sock, " WF %s\n", fo->wildfile.get(k));
600 for (k=0; k<fo->base.size(); k++) {
601 sendit(sock, " B %s\n", fo->base.get(k));
603 for (k=0; k<fo->fstype.size(); k++) {
604 sendit(sock, " X %s\n", fo->fstype.get(k));
607 sendit(sock, " D %s\n", fo->reader);
610 sendit(sock, " T %s\n", fo->writer);
612 sendit(sock, " N\n");
614 for (j=0; j<incexe->name_list.size(); j++) {
615 sendit(sock, " I %s\n", incexe->name_list.get(j));
617 if (incexe->name_list.size()) {
618 sendit(sock, " N\n");
622 for (i=0; i<res->res_fs.num_excludes; i++) {
623 INCEXE *incexe = res->res_fs.exclude_items[i];
624 for (j=0; j<incexe->name_list.size(); j++) {
625 sendit(sock, " E %s\n", incexe->name_list.get(j));
627 if (incexe->name_list.size()) {
628 sendit(sock, " N\n");
634 if (res->res_sch.run) {
636 RUN *run = res->res_sch.run;
637 char buf[1000], num[30];
638 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
643 sendit(sock, " --> Run Level=%s\n", level_to_str(run->level));
644 bstrncpy(buf, " hour=", sizeof(buf));
645 for (i=0; i<24; i++) {
646 if (bit_is_set(i, run->hour)) {
647 bsnprintf(num, sizeof(num), "%d ", i);
648 bstrncat(buf, num, sizeof(buf));
651 bstrncat(buf, "\n", sizeof(buf));
653 bstrncpy(buf, " mday=", sizeof(buf));
654 for (i=0; i<31; i++) {
655 if (bit_is_set(i, run->mday)) {
656 bsnprintf(num, sizeof(num), "%d ", i);
657 bstrncat(buf, num, sizeof(buf));
660 bstrncat(buf, "\n", sizeof(buf));
662 bstrncpy(buf, " month=", sizeof(buf));
663 for (i=0; i<12; i++) {
664 if (bit_is_set(i, run->month)) {
665 bsnprintf(num, sizeof(num), "%d ", i);
666 bstrncat(buf, num, sizeof(buf));
669 bstrncat(buf, "\n", sizeof(buf));
671 bstrncpy(buf, " wday=", sizeof(buf));
672 for (i=0; i<7; i++) {
673 if (bit_is_set(i, run->wday)) {
674 bsnprintf(num, sizeof(num), "%d ", i);
675 bstrncat(buf, num, sizeof(buf));
678 bstrncat(buf, "\n", sizeof(buf));
680 bstrncpy(buf, " wom=", sizeof(buf));
681 for (i=0; i<5; i++) {
682 if (bit_is_set(i, run->wom)) {
683 bsnprintf(num, sizeof(num), "%d ", i);
684 bstrncat(buf, num, sizeof(buf));
687 bstrncat(buf, "\n", sizeof(buf));
689 bstrncpy(buf, " woy=", sizeof(buf));
690 for (i=0; i<54; i++) {
691 if (bit_is_set(i, run->woy)) {
692 bsnprintf(num, sizeof(num), "%d ", i);
693 bstrncat(buf, num, sizeof(buf));
696 bstrncat(buf, "\n", sizeof(buf));
698 sendit(sock, " mins=%d\n", run->minute);
700 sendit(sock, " --> ");
701 dump_resource(-R_POOL, (RES *)run->pool, sendit, sock);
704 sendit(sock, " --> ");
705 dump_resource(-R_STORAGE, (RES *)run->storage, sendit, sock);
708 sendit(sock, " --> ");
709 dump_resource(-R_MSGS, (RES *)run->msgs, sendit, sock);
711 /* If another Run record is chained in, go print it */
717 sendit(sock, "Schedule: name=%s\n", res->res_sch.hdr.name);
721 sendit(sock, "Pool: name=%s PoolType=%s\n", res->res_pool.hdr.name,
722 res->res_pool.pool_type);
723 sendit(sock, " use_cat=%d use_once=%d acpt_any=%d cat_files=%d\n",
724 res->res_pool.use_catalog, res->res_pool.use_volume_once,
725 res->res_pool.accept_any_volume, res->res_pool.catalog_files);
726 sendit(sock, " max_vols=%d auto_prune=%d VolRetention=%s\n",
727 res->res_pool.max_volumes, res->res_pool.AutoPrune,
728 edit_utime(res->res_pool.VolRetention, ed1, sizeof(ed1)));
729 sendit(sock, " VolUse=%s recycle=%d LabelFormat=%s\n",
730 edit_utime(res->res_pool.VolUseDuration, ed1, sizeof(ed1)),
731 res->res_pool.Recycle,
732 NPRT(res->res_pool.label_format));
733 sendit(sock, " CleaningPrefix=%s\n",
734 NPRT(res->res_pool.cleaning_prefix));
735 sendit(sock, " RecyleOldest=%d PurgeOldest=%d MaxVolJobs=%d MaxVolFiles=%d\n",
736 res->res_pool.recycle_oldest_volume,
737 res->res_pool.purge_oldest_volume,
738 res->res_pool.MaxVolJobs, res->res_pool.MaxVolFiles);
741 sendit(sock, "Messages: name=%s\n", res->res_msgs.hdr.name);
742 if (res->res_msgs.mail_cmd)
743 sendit(sock, " mailcmd=%s\n", res->res_msgs.mail_cmd);
744 if (res->res_msgs.operator_cmd)
745 sendit(sock, " opcmd=%s\n", res->res_msgs.operator_cmd);
748 sendit(sock, "Unknown resource type %d in dump_resource.\n", type);
751 if (recurse && res->res_dir.hdr.next) {
752 dump_resource(type, res->res_dir.hdr.next, sendit, sock);
757 * Free all the members of an INCEXE structure
759 static void free_incexe(INCEXE *incexe)
761 incexe->name_list.destroy();
762 for (int i=0; i<incexe->num_opts; i++) {
763 FOPTS *fopt = incexe->opts_list[i];
764 fopt->regex.destroy();
765 fopt->regexdir.destroy();
766 fopt->regexfile.destroy();
767 fopt->wild.destroy();
768 fopt->wilddir.destroy();
769 fopt->wildfile.destroy();
770 fopt->base.destroy();
771 fopt->fstype.destroy();
780 if (incexe->opts_list) {
781 free(incexe->opts_list);
787 * Free memory of resource -- called when daemon terminates.
788 * NB, we don't need to worry about freeing any references
789 * to other resources as they will be freed when that
790 * resource chain is traversed. Mainly we worry about freeing
791 * allocated strings (names).
793 void free_resource(RES *sres, int type)
796 RES *nres; /* next resource if linked */
797 URES *res = (URES *)sres;
802 /* common stuff -- free the resource name and description */
803 nres = (RES *)res->res_dir.hdr.next;
804 if (res->res_dir.hdr.name) {
805 free(res->res_dir.hdr.name);
807 if (res->res_dir.hdr.desc) {
808 free(res->res_dir.hdr.desc);
813 if (res->res_dir.working_directory) {
814 free(res->res_dir.working_directory);
816 if (res->res_dir.scripts_directory) {
817 free((char *)res->res_dir.scripts_directory);
819 if (res->res_dir.pid_directory) {
820 free(res->res_dir.pid_directory);
822 if (res->res_dir.subsys_directory) {
823 free(res->res_dir.subsys_directory);
825 if (res->res_dir.password) {
826 free(res->res_dir.password);
828 if (res->res_dir.query_file) {
829 free(res->res_dir.query_file);
831 if (res->res_dir.DIRaddrs) {
832 free_addresses(res->res_dir.DIRaddrs);
838 if (res->res_con.password) {
839 free(res->res_con.password);
841 for (int i=0; i<Num_ACL; i++) {
842 if (res->res_con.ACL_lists[i]) {
843 delete res->res_con.ACL_lists[i];
844 res->res_con.ACL_lists[i] = NULL;
849 if (res->res_client.address) {
850 free(res->res_client.address);
852 if (res->res_client.password) {
853 free(res->res_client.password);
857 if (res->res_store.address) {
858 free(res->res_store.address);
860 if (res->res_store.password) {
861 free(res->res_store.password);
863 if (res->res_store.media_type) {
864 free(res->res_store.media_type);
866 if (res->res_store.dev_name) {
867 free(res->res_store.dev_name);
871 if (res->res_cat.db_address) {
872 free(res->res_cat.db_address);
874 if (res->res_cat.db_socket) {
875 free(res->res_cat.db_socket);
877 if (res->res_cat.db_user) {
878 free(res->res_cat.db_user);
880 if (res->res_cat.db_name) {
881 free(res->res_cat.db_name);
883 if (res->res_cat.db_password) {
884 free(res->res_cat.db_password);
888 if ((num=res->res_fs.num_includes)) {
890 free_incexe(res->res_fs.include_items[num]);
892 free(res->res_fs.include_items);
894 res->res_fs.num_includes = 0;
895 if ((num=res->res_fs.num_excludes)) {
897 free_incexe(res->res_fs.exclude_items[num]);
899 free(res->res_fs.exclude_items);
901 res->res_fs.num_excludes = 0;
904 if (res->res_pool.pool_type) {
905 free(res->res_pool.pool_type);
907 if (res->res_pool.label_format) {
908 free(res->res_pool.label_format);
910 if (res->res_pool.cleaning_prefix) {
911 free(res->res_pool.cleaning_prefix);
915 if (res->res_sch.run) {
917 nrun = res->res_sch.run;
927 if (res->res_job.RestoreWhere) {
928 free(res->res_job.RestoreWhere);
930 if (res->res_job.RestoreBootstrap) {
931 free(res->res_job.RestoreBootstrap);
933 if (res->res_job.WriteBootstrap) {
934 free(res->res_job.WriteBootstrap);
936 if (res->res_job.RunBeforeJob) {
937 free(res->res_job.RunBeforeJob);
939 if (res->res_job.RunAfterJob) {
940 free(res->res_job.RunAfterJob);
942 if (res->res_job.RunAfterFailedJob) {
943 free(res->res_job.RunAfterFailedJob);
945 if (res->res_job.ClientRunBeforeJob) {
946 free(res->res_job.ClientRunBeforeJob);
948 if (res->res_job.ClientRunAfterJob) {
949 free(res->res_job.ClientRunAfterJob);
951 for (int i=0; i < MAX_STORE; i++) {
952 if (res->res_job.storage[i]) {
953 delete (alist *)res->res_job.storage[i];
958 if (res->res_msgs.mail_cmd) {
959 free(res->res_msgs.mail_cmd);
961 if (res->res_msgs.operator_cmd) {
962 free(res->res_msgs.operator_cmd);
964 free_msgs_res((MSGS *)res); /* free message resource */
968 printf("Unknown resource type %d in free_resource.\n", type);
970 /* Common stuff again -- free the resource, recurse to next one */
975 free_resource(nres, type);
980 * Save the new resource by chaining it into the head list for
981 * the resource. If this is pass 2, we update any resource
982 * pointers because they may not have been defined until
985 void save_resource(int type, RES_ITEM *items, int pass)
988 int rindex = type - r_first;
992 /* Check Job requirements after applying JobDefs */
993 if (type != R_JOB && type != R_JOBDEFS) {
995 * Ensure that all required items are present
997 for (i=0; items[i].name; i++) {
998 if (items[i].flags & ITEM_REQUIRED) {
999 if (!bit_is_set(i, res_all.res_dir.hdr.item_present)) {
1000 Emsg2(M_ERROR_TERM, 0, "%s item is required in %s resource, but not found.\n",
1001 items[i].name, resources[rindex]);
1004 /* If this triggers, take a look at lib/parse_conf.h */
1005 if (i >= MAX_RES_ITEMS) {
1006 Emsg1(M_ERROR_TERM, 0, "Too many items in %s resource\n", resources[rindex]);
1012 * During pass 2 in each "store" routine, we looked up pointers
1013 * to all the resources referrenced in the current resource, now we
1014 * must copy their addresses from the static record to the allocated
1019 /* Resources not containing a resource */
1028 /* Resources containing another resource */
1030 if ((res = (URES *)GetResWithName(R_DIRECTOR, res_all.res_dir.hdr.name)) == NULL) {
1031 Emsg1(M_ERROR_TERM, 0, "Cannot find Director resource %s\n", res_all.res_dir.hdr.name);
1033 res->res_dir.messages = res_all.res_dir.messages;
1037 if ((res = (URES *)GetResWithName(type, res_all.res_dir.hdr.name)) == NULL) {
1038 Emsg1(M_ERROR_TERM, 0, "Cannot find Job resource %s\n",
1039 res_all.res_dir.hdr.name);
1041 res->res_job.messages = res_all.res_job.messages;
1042 res->res_job.schedule = res_all.res_job.schedule;
1043 res->res_job.client = res_all.res_job.client;
1044 res->res_job.fileset = res_all.res_job.fileset;
1045 for (int i=0; i < MAX_STORE; i++) {
1046 res->res_job.storage[i] = res_all.res_job.storage[i];
1048 res->res_job.pool = res_all.res_job.pool;
1049 res->res_job.full_pool = res_all.res_job.full_pool;
1050 res->res_job.inc_pool = res_all.res_job.inc_pool;
1051 res->res_job.dif_pool = res_all.res_job.dif_pool;
1052 res->res_job.verify_job = res_all.res_job.verify_job;
1053 res->res_job.jobdefs = res_all.res_job.jobdefs;
1056 if ((res = (URES *)GetResWithName(R_COUNTER, res_all.res_counter.hdr.name)) == NULL) {
1057 Emsg1(M_ERROR_TERM, 0, "Cannot find Counter resource %s\n", res_all.res_counter.hdr.name);
1059 res->res_counter.Catalog = res_all.res_counter.Catalog;
1060 res->res_counter.WrapCounter = res_all.res_counter.WrapCounter;
1064 if ((res = (URES *)GetResWithName(R_CLIENT, res_all.res_client.hdr.name)) == NULL) {
1065 Emsg1(M_ERROR_TERM, 0, "Cannot find Client resource %s\n", res_all.res_client.hdr.name);
1067 res->res_client.catalog = res_all.res_client.catalog;
1071 * Schedule is a bit different in that it contains a RUN record
1072 * chain which isn't a "named" resource. This chain was linked
1073 * in by run_conf.c during pass 2, so here we jam the pointer
1074 * into the Schedule resource.
1076 if ((res = (URES *)GetResWithName(R_SCHEDULE, res_all.res_client.hdr.name)) == NULL) {
1077 Emsg1(M_ERROR_TERM, 0, "Cannot find Schedule resource %s\n", res_all.res_client.hdr.name);
1079 res->res_sch.run = res_all.res_sch.run;
1082 Emsg1(M_ERROR, 0, "Unknown resource type %d in save_resource.\n", type);
1086 /* Note, the resource name was already saved during pass 1,
1087 * so here, we can just release it.
1089 if (res_all.res_dir.hdr.name) {
1090 free(res_all.res_dir.hdr.name);
1091 res_all.res_dir.hdr.name = NULL;
1093 if (res_all.res_dir.hdr.desc) {
1094 free(res_all.res_dir.hdr.desc);
1095 res_all.res_dir.hdr.desc = NULL;
1101 * The following code is only executed during pass 1
1105 size = sizeof(DIRRES);
1108 size = sizeof(CONRES);
1111 size =sizeof(CLIENT);
1114 size = sizeof(STORE);
1124 size = sizeof(FILESET);
1127 size = sizeof(SCHED);
1130 size = sizeof(POOL);
1133 size = sizeof(MSGS);
1136 size = sizeof(COUNTER);
1139 printf("Unknown resource type %d in save_resrouce.\n", type);
1146 res = (URES *)malloc(size);
1147 memcpy(res, &res_all, size);
1148 if (!res_head[rindex]) {
1149 res_head[rindex] = (RES *)res; /* store first entry */
1150 Dmsg3(900, "Inserting first %s res: %s index=%d\n", res_to_str(type),
1151 res->res_dir.hdr.name, rindex);
1154 /* Add new res to end of chain */
1155 for (next=res_head[rindex]; next->next; next=next->next) {
1156 if (strcmp(next->name, res->res_dir.hdr.name) == 0) {
1157 Emsg2(M_ERROR_TERM, 0,
1158 _("Attempt to define second %s resource named \"%s\" is not permitted.\n"),
1159 resources[rindex].name, res->res_dir.hdr.name);
1162 next->next = (RES *)res;
1163 Dmsg4(900, "Inserting %s res: %s index=%d pass=%d\n", res_to_str(type),
1164 res->res_dir.hdr.name, rindex, pass);
1170 * Store JobType (backup, verify, restore)
1173 void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass)
1177 token = lex_get_token(lc, T_NAME);
1178 /* Store the type both pass 1 and pass 2 */
1179 for (i=0; jobtypes[i].type_name; i++) {
1180 if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) {
1181 *(int *)(item->value) = jobtypes[i].job_type;
1187 scan_err1(lc, "Expected a Job Type keyword, got: %s", lc->str);
1190 set_bit(index, res_all.hdr.item_present);
1194 * Store Job Level (Full, Incremental, ...)
1197 void store_level(LEX *lc, RES_ITEM *item, int index, int pass)
1201 token = lex_get_token(lc, T_NAME);
1202 /* Store the level pass 2 so that type is defined */
1203 for (i=0; joblevels[i].level_name; i++) {
1204 if (strcasecmp(lc->str, joblevels[i].level_name) == 0) {
1205 *(int *)(item->value) = joblevels[i].level;
1211 scan_err1(lc, "Expected a Job Level keyword, got: %s", lc->str);
1214 set_bit(index, res_all.hdr.item_present);
1217 void store_replace(LEX *lc, RES_ITEM *item, int index, int pass)
1220 token = lex_get_token(lc, T_NAME);
1221 /* Scan Replacement options */
1222 for (i=0; ReplaceOptions[i].name; i++) {
1223 if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) {
1224 *(int *)(item->value) = ReplaceOptions[i].token;
1230 scan_err1(lc, "Expected a Restore replacement option, got: %s", lc->str);
1233 set_bit(index, res_all.hdr.item_present);
1237 * Store ACL (access control list)
1240 void store_acl(LEX *lc, RES_ITEM *item, int index, int pass)
1245 token = lex_get_token(lc, T_NAME);
1247 if (((alist **)item->value)[item->code] == NULL) {
1248 ((alist **)item->value)[item->code] = New(alist(10, owned_by_alist));
1249 Dmsg1(900, "Defined new ACL alist at %d\n", item->code);
1251 ((alist **)item->value)[item->code]->append(bstrdup(lc->str));
1252 Dmsg2(900, "Appended to %d %s\n", item->code, lc->str);
1254 token = lex_get_token(lc, T_ALL);
1255 if (token == T_COMMA) {
1256 continue; /* get another ACL */
1260 set_bit(index, res_all.hdr.item_present);