From d90c27623a8e65d7467df28c5faff0be8fbddbed Mon Sep 17 00:00:00 2001 From: Kern Sibbald Date: Wed, 18 Jun 2008 20:09:20 +0000 Subject: [PATCH] 18Jun08 kes Eliminate ints from conf files and structures to avoid word alignment problems 17Jun08 kes Fix bug reported by Scott Barninger where the bacula script refers to scripts in the wrong directory. Needed to meet the requirements of recent FHS location changes. git-svn-id: https://bacula.svn.sourceforge.net/svnroot/bacula/trunk@7166 91ce42f0-d328-0410-95d8-f526ca767f89 --- bacula/scripts/bacula.in | 22 +++++------ bacula/src/console/console_conf.c | 2 +- bacula/src/console/console_conf.h | 2 +- bacula/src/dird/dird.c | 10 ++--- bacula/src/dird/dird_conf.c | 60 ++++++++++++++--------------- bacula/src/dird/dird_conf.h | 62 +++++++++++++++--------------- bacula/src/filed/filed_conf.c | 4 +- bacula/src/filed/filed_conf.h | 2 +- bacula/src/jcr.h | 63 +++++++++++++++---------------- bacula/src/lib/parse_conf.c | 22 +++++------ bacula/src/lib/parse_conf.h | 32 ++++++++-------- bacula/src/stored/stored_conf.c | 16 ++++---- bacula/src/stored/stored_conf.h | 6 +-- bacula/src/version.h | 6 +-- bacula/technotes-2.5 | 6 +++ 15 files changed, 159 insertions(+), 156 deletions(-) diff --git a/bacula/scripts/bacula.in b/bacula/scripts/bacula.in index 0382ab8f20..8010035a59 100755 --- a/bacula/scripts/bacula.in +++ b/bacula/scripts/bacula.in @@ -13,22 +13,20 @@ # easier to "steal" this code for the development # environment where they are different. # -BACFDCFG=@sysconfdir@ -BACSDCFG=@sysconfdir@ -BACDIRCFG=@sysconfdir@ +SCRIPTDIR=@scriptdir@ case "$1" in start) - [ -x ${BACSDCFG}/bacula-ctl-sd ] && ${BACSDCFG}/bacula-ctl-sd $1 $2 - [ -x ${BACFDCFG}/bacula-ctl-fd ] && ${BACFDCFG}/bacula-ctl-fd $1 $2 - [ -x ${BACDIRCFG}/bacula-ctl-dir ] && ${BACDIRCFG}/bacula-ctl-dir $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 ;; stop) # Stop the FD first so that SD will fail jobs and update catalog - [ -x ${BACFDCFG}/bacula-ctl-fd ] && ${BACFDCFG}/bacula-ctl-fd $1 $2 - [ -x ${BACSDCFG}/bacula-ctl-sd ] && ${BACSDCFG}/bacula-ctl-sd $1 $2 - [ -x ${BACDIRCFG}/bacula-ctl-dir ] && ${BACDIRCFG}/bacula-ctl-dir $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd $1 $2 + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir $1 $2 echo sleep 6 ;; @@ -39,9 +37,9 @@ case "$1" in ;; status) - [ -x ${BACSDCFG}/bacula-ctl-sd ] && ${BACSDCFG}/bacula-ctl-sd status - [ -x ${BACFDCFG}/bacula-ctl-fd ] && ${BACFDCFG}/bacula-ctl-fd status - [ -x ${BACDIRCFG}/bacula-ctl-dir ] && ${BACDIRCFG}/bacula-ctl-dir status + [ -x ${SCRIPTDIR}/bacula-ctl-sd ] && ${SCRIPTDIR}/bacula-ctl-sd status + [ -x ${SCRIPTDIR}/bacula-ctl-fd ] && ${SCRIPTDIR}/bacula-ctl-fd status + [ -x ${SCRIPTDIR}/bacula-ctl-dir ] && ${SCRIPTDIR}/bacula-ctl-dir status ;; *) diff --git a/bacula/src/console/console_conf.c b/bacula/src/console/console_conf.c index 1bdda9357e..73165e828f 100644 --- a/bacula/src/console/console_conf.c +++ b/bacula/src/console/console_conf.c @@ -105,7 +105,7 @@ static RES_ITEM cons_items[] = { static RES_ITEM dir_items[] = { {"name", store_name, ITEM(res_dir.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_dir.hdr.desc), 0, 0, 0}, - {"dirport", store_int, ITEM(res_dir.DIRport), 0, ITEM_DEFAULT, 9101}, + {"dirport", store_pint32, ITEM(res_dir.DIRport), 0, ITEM_DEFAULT, 9101}, {"address", store_str, ITEM(res_dir.address), 0, 0, 0}, {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, {"tlsauthenticate",store_bool, ITEM(res_dir.tls_enable), 0, 0, 0}, diff --git a/bacula/src/console/console_conf.h b/bacula/src/console/console_conf.h index d786161b3d..ad116c68e4 100644 --- a/bacula/src/console/console_conf.h +++ b/bacula/src/console/console_conf.h @@ -80,7 +80,7 @@ struct CONRES { /* Director */ struct DIRRES { RES hdr; - int DIRport; /* UA server port */ + uint32_t DIRport; /* UA server port */ char *address; /* UA server address */ char *password; /* UA server password */ bool tls_authenticate; /* Authenticate with TLS */ diff --git a/bacula/src/dird/dird.c b/bacula/src/dird/dird.c index cba298e56c..9a0326e7dd 100644 --- a/bacula/src/dird/dird.c +++ b/bacula/src/dird/dird.c @@ -663,7 +663,7 @@ static bool check_resources() /* Transfer default items from JobDefs Resource */ for (i=0; job_items[i].name; i++) { char **def_svalue, **svalue; /* string value */ - int *def_ivalue, *ivalue; /* integer value */ + uint32_t *def_ivalue, *ivalue; /* integer value */ bool *def_bvalue, *bvalue; /* bool value */ int64_t *def_lvalue, *lvalue; /* 64 bit values */ uint32_t offset; @@ -717,16 +717,16 @@ static bool check_resources() * Note, our store_bit does not handle bitmaped fields */ } else if (job_items[i].handler == store_bit || - job_items[i].handler == store_pint || + job_items[i].handler == store_pint32 || job_items[i].handler == store_jobtype || job_items[i].handler == store_level || - job_items[i].handler == store_pint || + job_items[i].handler == store_int32 || job_items[i].handler == store_migtype || job_items[i].handler == store_replace) { - def_ivalue = (int *)((char *)(job->jobdefs) + offset); + def_ivalue = (uint32_t *)((char *)(job->jobdefs) + offset); Dmsg5(400, "Job \"%s\", field \"%s\" def_ivalue=%d item %d offset=%u\n", job->name(), job_items[i].name, *def_ivalue, i, offset); - ivalue = (int *)((char *)job + offset); + ivalue = (uint32_t *)((char *)job + offset); *ivalue = *def_ivalue; set_bit(i, job->hdr.item_present); /* diff --git a/bacula/src/dird/dird_conf.c b/bacula/src/dird/dird_conf.c index a446b83a33..d4649d4ad7 100644 --- a/bacula/src/dird/dird_conf.c +++ b/bacula/src/dird/dird_conf.c @@ -118,7 +118,7 @@ static RES_ITEM dir_items[] = { {"scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, 0}, {"piddirectory",store_dir, ITEM(res_dir.pid_directory), 0, ITEM_REQUIRED, 0}, {"subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, 0}, - {"maximumconcurrentjobs", store_pint, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"maximumconcurrentjobs", store_pint32, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, {"password", store_password, ITEM(res_dir.password), 0, ITEM_REQUIRED, 0}, {"fdconnecttimeout", store_time,ITEM(res_dir.FDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, {"sdconnecttimeout", store_time,ITEM(res_dir.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, @@ -182,15 +182,15 @@ static RES_ITEM cli_items[] = { {"description", store_str, ITEM(res_client.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_client.address), 0, ITEM_REQUIRED, 0}, {"fdaddress", store_str, ITEM(res_client.address), 0, 0, 0}, - {"fdport", store_pint, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102}, + {"fdport", store_pint32, ITEM(res_client.FDport), 0, ITEM_DEFAULT, 9102}, {"password", store_password, ITEM(res_client.password), 0, ITEM_REQUIRED, 0}, - {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0}, + {"fdpassword", store_password, ITEM(res_client.password), 0, 0, 0}, {"catalog", store_res, ITEM(res_client.catalog), R_CATALOG, ITEM_REQUIRED, 0}, {"fileretention", store_time, ITEM(res_client.FileRetention), 0, ITEM_DEFAULT, 60*60*24*60}, {"jobretention", store_time, ITEM(res_client.JobRetention), 0, ITEM_DEFAULT, 60*60*24*180}, {"heartbeatinterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 0}, {"autoprune", store_bool, ITEM(res_client.AutoPrune), 0, ITEM_DEFAULT, true}, - {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"maximumconcurrentjobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, {"tlsauthenticate", store_bool, ITEM(res_client.tls_authenticate), 0, 0, 0}, {"tlsenable", store_bool, ITEM(res_client.tls_enable), 0, 0, 0}, {"tlsrequire", store_bool, ITEM(res_client.tls_require), 0, 0, 0}, @@ -209,7 +209,7 @@ static RES_ITEM cli_items[] = { static RES_ITEM store_items[] = { {"name", store_name, ITEM(res_store.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_store.hdr.desc), 0, 0, 0}, - {"sdport", store_pint, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103}, + {"sdport", store_pint32, ITEM(res_store.SDport), 0, ITEM_DEFAULT, 9103}, {"address", store_str, ITEM(res_store.address), 0, ITEM_REQUIRED, 0}, {"sdaddress", store_str, ITEM(res_store.address), 0, 0, 0}, {"password", store_password, ITEM(res_store.password), 0, ITEM_REQUIRED, 0}, @@ -219,8 +219,8 @@ static RES_ITEM store_items[] = { {"autochanger", store_bool, ITEM(res_store.autochanger), 0, ITEM_DEFAULT, 0}, {"enabled", store_bool, ITEM(res_store.enabled), 0, ITEM_DEFAULT, true}, {"heartbeatinterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 0}, - {"maximumconcurrentjobs", store_pint, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, - {"sddport", store_pint, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */ + {"maximumconcurrentjobs", store_pint32, ITEM(res_store.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"sddport", store_pint32, ITEM(res_store.SDDport), 0, 0, 0}, /* deprecated */ {"tlsauthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0}, {"tlsenable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0}, {"tlsrequire", store_bool, ITEM(res_store.tls_require), 0, 0, 0}, @@ -241,7 +241,7 @@ static RES_ITEM cat_items[] = { {"description", store_str, ITEM(res_cat.hdr.desc), 0, 0, 0}, {"address", store_str, ITEM(res_cat.db_address), 0, 0, 0}, {"dbaddress", store_str, ITEM(res_cat.db_address), 0, 0, 0}, - {"dbport", store_pint, ITEM(res_cat.db_port), 0, 0, 0}, + {"dbport", store_pint32, ITEM(res_cat.db_port), 0, 0, 0}, /* keep this password as store_str for the moment */ {"password", store_str, ITEM(res_cat.db_password), 0, 0, 0}, {"dbpassword", store_str, ITEM(res_cat.db_password), 0, 0, 0}, @@ -279,25 +279,25 @@ RES_ITEM job_items[] = { {"run", store_alist_str, ITEM(res_job.run_cmds), 0, 0, 0}, /* Root of where to restore files */ {"where", store_dir, ITEM(res_job.RestoreWhere), 0, 0, 0}, - {"regexwhere", store_str, ITEM(res_job.RegexWhere), 0, 0, 0}, - {"stripprefix", store_str, ITEM(res_job.strip_prefix), 0, 0, 0}, + {"regexwhere", store_str, ITEM(res_job.RegexWhere), 0, 0, 0}, + {"stripprefix", store_str, ITEM(res_job.strip_prefix), 0, 0, 0}, {"addprefix", store_str, ITEM(res_job.add_prefix), 0, 0, 0}, {"addsuffix", store_str, ITEM(res_job.add_suffix), 0, 0, 0}, /* Where to find bootstrap during restore */ {"bootstrap",store_dir, ITEM(res_job.RestoreBootstrap), 0, 0, 0}, /* Where to write bootstrap file during backup */ {"writebootstrap",store_dir, ITEM(res_job.WriteBootstrap), 0, 0, 0}, - {"writeverifylist",store_dir, ITEM(res_job.WriteVerifyList), 0, 0, 0}, + {"writeverifylist",store_dir,ITEM(res_job.WriteVerifyList), 0, 0, 0}, {"replace", store_replace, ITEM(res_job.replace), 0, ITEM_DEFAULT, REPLACE_ALWAYS}, {"maxrunschedtime", store_time, ITEM(res_job.MaxRunSchedTime), 0, 0, 0}, {"maxruntime", store_time, ITEM(res_job.MaxRunTime), 0, 0, 0}, /* xxxMaxWaitTime are deprecated */ {"fullmaxwaittime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0}, {"incrementalmaxwaittime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0}, - {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, + {"differentialmaxwaittime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, {"fullmaxruntime", store_time, ITEM(res_job.FullMaxRunTime), 0, 0, 0}, {"incrementalmaxruntime", store_time, ITEM(res_job.IncMaxRunTime), 0, 0, 0}, - {"differentialmaxruntime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, + {"differentialmaxruntime", store_time, ITEM(res_job.DiffMaxRunTime), 0, 0, 0}, {"maxwaittime", store_time, ITEM(res_job.MaxWaitTime), 0, 0, 0}, {"maxstartdelay",store_time, ITEM(res_job.MaxStartDelay), 0, 0, 0}, {"maxfullinterval", store_time, ITEM(res_job.MaxFullInterval), 0, 0, 0}, @@ -319,11 +319,11 @@ RES_ITEM job_items[] = { {"runafterfailedjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, {"clientrunbeforejob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, {"clientrunafterjob", store_short_runscript, ITEM(res_job.RunScripts), 0, 0, 0}, - {"maximumconcurrentjobs", store_pint, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, + {"maximumconcurrentjobs", store_pint32, ITEM(res_job.MaxConcurrentJobs), 0, ITEM_DEFAULT, 1}, {"rescheduleonerror", store_bool, ITEM(res_job.RescheduleOnError), 0, ITEM_DEFAULT, false}, {"rescheduleinterval", store_time, ITEM(res_job.RescheduleInterval), 0, ITEM_DEFAULT, 60 * 30}, - {"rescheduletimes", store_pint, ITEM(res_job.RescheduleTimes), 0, 0, 0}, - {"priority", store_pint, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10}, + {"rescheduletimes", store_pint32, ITEM(res_job.RescheduleTimes), 0, 0, 0}, + {"priority", store_pint32, ITEM(res_job.Priority), 0, ITEM_DEFAULT, 10}, {"writepartafterjob", store_bool, ITEM(res_job.write_part_after_job), 0, ITEM_DEFAULT, true}, {"selectionpattern", store_str, ITEM(res_job.selection_pattern), 0, 0, 0}, {"runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0}, @@ -380,9 +380,9 @@ static RES_ITEM pool_items[] = { {"purgeoldestvolume", store_bool, ITEM(res_pool.purge_oldest_volume), 0, 0, 0}, {"recycleoldestvolume", store_bool, ITEM(res_pool.recycle_oldest_volume), 0, 0, 0}, {"recyclecurrentvolume", store_bool, ITEM(res_pool.recycle_current_volume), 0, 0, 0}, - {"maximumvolumes", store_pint, ITEM(res_pool.max_volumes), 0, 0, 0}, - {"maximumvolumejobs", store_pint, ITEM(res_pool.MaxVolJobs), 0, 0, 0}, - {"maximumvolumefiles", store_pint, ITEM(res_pool.MaxVolFiles), 0, 0, 0}, + {"maximumvolumes", store_pint32, ITEM(res_pool.max_volumes), 0, 0, 0}, + {"maximumvolumejobs", store_pint32, ITEM(res_pool.MaxVolJobs), 0, 0, 0}, + {"maximumvolumefiles", store_pint32, ITEM(res_pool.MaxVolFiles), 0, 0, 0}, {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes), 0, 0, 0}, {"catalogfiles", store_bool, ITEM(res_pool.catalog_files), 0, ITEM_DEFAULT, true}, {"volumeretention", store_time, ITEM(res_pool.VolRetention), 0, ITEM_DEFAULT, 60*60*24*365}, @@ -407,8 +407,8 @@ static RES_ITEM pool_items[] = { static RES_ITEM counter_items[] = { {"name", store_name, ITEM(res_counter.hdr.name), 0, ITEM_REQUIRED, 0}, {"description", store_str, ITEM(res_counter.hdr.desc), 0, 0, 0}, - {"minimum", store_int, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0}, - {"maximum", store_pint, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX}, + {"minimum", store_int32, ITEM(res_counter.MinValue), 0, ITEM_DEFAULT, 0}, + {"maximum", store_pint32, ITEM(res_counter.MaxValue), 0, ITEM_DEFAULT, INT32_MAX}, {"wrapcounter", store_res, ITEM(res_counter.WrapCounter), R_COUNTER, 0, 0}, {"catalog", store_res, ITEM(res_counter.Catalog), R_CATALOG, 0, 0}, {NULL, NULL, {0}, 0, 0, 0} @@ -515,7 +515,7 @@ const char *level_to_str(int level) bsnprintf(level_no, sizeof(level_no), "%c (%d)", level, level); /* default if not found */ for (i=0; joblevels[i].level_name; i++) { - if (level == joblevels[i].level) { + if (level == (int)joblevels[i].level) { str = joblevels[i].level_name; break; } @@ -1627,7 +1627,7 @@ void store_migtype(LEX *lc, RES_ITEM *item, int index, int pass) /* Store the type both pass 1 and pass 2 */ for (i=0; migtypes[i].type_name; i++) { if (strcasecmp(lc->str, migtypes[i].type_name) == 0) { - *(int *)(item->value) = migtypes[i].job_type; + *(uint32_t *)(item->value) = migtypes[i].job_type; i = 0; break; } @@ -1653,7 +1653,7 @@ void store_jobtype(LEX *lc, RES_ITEM *item, int index, int pass) /* Store the type both pass 1 and pass 2 */ for (i=0; jobtypes[i].type_name; i++) { if (strcasecmp(lc->str, jobtypes[i].type_name) == 0) { - *(int *)(item->value) = jobtypes[i].job_type; + *(uint32_t *)(item->value) = jobtypes[i].job_type; i = 0; break; } @@ -1677,7 +1677,7 @@ void store_level(LEX *lc, RES_ITEM *item, int index, int pass) /* Store the level pass 2 so that type is defined */ for (i=0; joblevels[i].level_name; i++) { if (strcasecmp(lc->str, joblevels[i].level_name) == 0) { - *(int *)(item->value) = joblevels[i].level; + *(uint32_t *)(item->value) = joblevels[i].level; i = 0; break; } @@ -1697,7 +1697,7 @@ void store_replace(LEX *lc, RES_ITEM *item, int index, int pass) /* Scan Replacement options */ for (i=0; ReplaceOptions[i].name; i++) { if (strcasecmp(lc->str, ReplaceOptions[i].name) == 0) { - *(int *)(item->value) = ReplaceOptions[i].token; + *(uint32_t *)(item->value) = ReplaceOptions[i].token; i = 0; break; } @@ -1745,13 +1745,13 @@ static void store_runscript_when(LEX *lc, RES_ITEM *item, int index, int pass) lex_get_token(lc, T_NAME); if (strcasecmp(lc->str, "before") == 0) { - *(int *)(item->value) = SCRIPT_Before ; + *(uint32_t *)(item->value) = SCRIPT_Before ; } else if (strcasecmp(lc->str, "after") == 0) { - *(int *)(item->value) = SCRIPT_After; + *(uint32_t *)(item->value) = SCRIPT_After; } else if (strcasecmp(lc->str, "aftervss") == 0) { - *(int *)(item->value) = SCRIPT_AfterVSS; + *(uint32_t *)(item->value) = SCRIPT_AfterVSS; } else if (strcasecmp(lc->str, "always") == 0) { - *(int *)(item->value) = SCRIPT_Any; + *(uint32_t *)(item->value) = SCRIPT_Any; } else { scan_err2(lc, _("Expect %s, got: %s"), "Before, After, AfterVSS or Always", lc->str); } diff --git a/bacula/src/dird/dird_conf.h b/bacula/src/dird/dird_conf.h index 031f81eff8..8693318a7b 100644 --- a/bacula/src/dird/dird_conf.h +++ b/bacula/src/dird/dird_conf.h @@ -72,20 +72,20 @@ enum { /* Used for certain KeyWord tables */ struct s_kw { const char *name; - int token; + uint32_t token; }; /* Job Level keyword structure */ struct s_jl { - const char *level_name; /* level keyword */ - int level; /* level */ - int job_type; /* JobType permitting this level */ + const char *level_name; /* level keyword */ + uint32_t level; /* level */ + uint32_t job_type; /* JobType permitting this level */ }; /* Job Type keyword structure */ struct s_jt { const char *type_name; - int job_type; + uint32_t job_type; }; /* Definition of the contents of each Resource */ @@ -125,11 +125,11 @@ public: char *tls_dhfile; /* TLS Diffie-Hellman Parameters */ alist *tls_allowed_cns; /* TLS Allowed Clients */ TLS_CONTEXT *tls_ctx; /* Shared TLS Context */ + utime_t stats_retention; /* Stats retention period in seconds */ bool tls_authenticate; /* Authenticated with TLS */ bool tls_enable; /* Enable TLS */ bool tls_require; /* Require TLS */ bool tls_verify_peer; /* TLS Verify Client Certificate */ - utime_t stats_retention; /* Stats retention period in seconds */ /* Methods */ char *name() const; @@ -150,10 +150,10 @@ public: RES hdr; bool found; /* found with SD */ - int num_writers; /* number of writers */ - int max_writers; /* = 1 for files */ - int reserved; /* number of reserves */ - int num_drives; /* for autochanger */ + int32_t num_writers; /* number of writers */ + int32_t max_writers; /* = 1 for files */ + int32_t reserved; /* number of reserves */ + int32_t num_drives; /* for autochanger */ bool autochanger; /* set if device is autochanger */ bool open; /* drive open */ bool append; /* in append mode */ @@ -225,14 +225,14 @@ class CAT { public: RES hdr; - int db_port; /* Port */ + uint32_t db_port; /* Port */ char *db_address; /* host name for remote access */ char *db_socket; /* Socket for local access */ char *db_password; char *db_user; char *db_name; char *db_driver; /* Select appropriate driver */ - int mult_db_connections; /* set if multiple connections wanted */ + uint32_t mult_db_connections; /* set if multiple connections wanted */ /* Methods */ char *name() const; @@ -249,7 +249,7 @@ class CLIENT { public: RES hdr; - int FDport; /* Where File daemon listens */ + uint32_t FDport; /* Where File daemon listens */ utime_t FileRetention; /* file retention period in seconds */ utime_t JobRetention; /* job retention period in seconds */ utime_t heartbeat_interval; /* Interval to send heartbeats */ @@ -284,8 +284,8 @@ class STORE { public: RES hdr; - int SDport; /* port where Directors connect */ - int SDDport; /* data port for File daemon */ + uint32_t SDport; /* port where Directors connect */ + uint32_t SDDport; /* data port for File daemon */ char *address; char *password; char *media_type; @@ -305,7 +305,7 @@ public: bool autochanger; /* set if autochanger */ int64_t StorageId; /* Set from Storage DB record */ utime_t heartbeat_interval; /* Interval to send heartbeats */ - int drives; /* number of drives in autochanger */ + uint32_t drives; /* number of drives in autochanger */ /* Methods */ char *dev_name() const; @@ -363,13 +363,13 @@ class JOB { public: RES hdr; - int JobType; /* job type (backup, verify, restore */ - int JobLevel; /* default backup/verify level */ - int Priority; /* Job priority */ - int RestoreJobId; /* What -- JobId to restore */ - int RescheduleTimes; /* Number of times to reschedule job */ - int replace; /* How (overwrite, ..) */ - int selection_type; + uint32_t JobType; /* job type (backup, verify, restore */ + uint32_t JobLevel; /* default backup/verify level */ + int32_t Priority; /* Job priority */ + uint32_t RestoreJobId; /* What -- JobId to restore */ + int32_t RescheduleTimes; /* Number of times to reschedule job */ + uint32_t replace; /* How (overwrite, ..) */ + uint32_t selection_type; char *RestoreWhere; /* Where on disk to restore -- directory */ char *RegexWhere; /* RegexWhere option */ @@ -469,7 +469,7 @@ struct FOPTS { struct INCEXE { FOPTS *current_opts; /* points to current options structure */ FOPTS **opts_list; /* options list */ - int num_opts; /* number of options items */ + int32_t num_opts; /* number of options items */ alist name_list; /* filename list -- holds char * */ alist plugin_list; /* filename list for plugins */ }; @@ -484,9 +484,9 @@ public: bool new_include; /* Set if new include used */ INCEXE **include_items; /* array of incexe structures */ - int num_includes; /* number in array */ + int32_t num_includes; /* number in array */ INCEXE **exclude_items; - int num_excludes; + int32_t num_excludes; bool have_MD5; /* set if MD5 initialized */ struct MD5Context md5c; /* MD5 of include/exclude */ char MD5[30]; /* base 64 representation of MD5 */ @@ -598,9 +598,9 @@ union URES { class RUN { public: RUN *next; /* points to next run record */ - int level; /* level override */ - int Priority; /* priority override */ - int job_type; + uint32_t level; /* level override */ + int32_t Priority; /* priority override */ + uint32_t job_type; bool spool_data; /* Data spooling override */ bool spool_data_set; /* Data spooling override given */ bool write_part_after_job; /* Write part after job override */ @@ -613,8 +613,8 @@ public: STORE *storage; /* Storage override */ MSGS *msgs; /* Messages override */ char *since; - int level_no; - int minute; /* minute to run job */ + uint32_t level_no; + uint32_t minute; /* minute to run job */ time_t last_run; /* last time run */ time_t next_run; /* next time to run */ char hour[nbytes_for_bits(24)]; /* bit set for each hour */ diff --git a/bacula/src/filed/filed_conf.c b/bacula/src/filed/filed_conf.c index 18f68d7605..4641e9d6a4 100644 --- a/bacula/src/filed/filed_conf.c +++ b/bacula/src/filed/filed_conf.c @@ -97,11 +97,11 @@ static RES_ITEM cli_items[] = { {"subsysdirectory", store_dir, ITEM(res_client.subsys_directory), 0, 0, 0}, {"plugindirectory", store_dir, ITEM(res_client.plugin_directory), 0, 0, 0}, {"scriptsdirectory", store_dir, ITEM(res_client.scripts_directory), 0, 0, 0}, - {"maximumconcurrentjobs", store_pint, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 20}, + {"maximumconcurrentjobs", store_pint32, ITEM(res_client.MaxConcurrentJobs), 0, ITEM_DEFAULT, 20}, {"messages", store_res, ITEM(res_client.messages), R_MSGS, 0, 0}, {"sdconnecttimeout", store_time,ITEM(res_client.SDConnectTimeout), 0, ITEM_DEFAULT, 60 * 30}, {"heartbeatinterval", store_time, ITEM(res_client.heartbeat_interval), 0, ITEM_DEFAULT, 0}, - {"maximumnetworkbuffersize", store_pint, ITEM(res_client.max_network_buffer_size), 0, 0, 0}, + {"maximumnetworkbuffersize", store_pint32, ITEM(res_client.max_network_buffer_size), 0, 0, 0}, #ifdef DATA_ENCRYPTION {"pkisignatures", store_bool, ITEM(res_client.pki_sign), 0, ITEM_DEFAULT, 0}, {"pkiencryption", store_bool, ITEM(res_client.pki_encrypt), 0, ITEM_DEFAULT, 0}, diff --git a/bacula/src/filed/filed_conf.h b/bacula/src/filed/filed_conf.h index 776a9e0c7f..6cfba0f683 100644 --- a/bacula/src/filed/filed_conf.h +++ b/bacula/src/filed/filed_conf.h @@ -82,7 +82,7 @@ struct CLIENT { char *plugin_directory; /* Plugin directory */ char *scripts_directory; MSGS *messages; /* daemon message handler */ - int MaxConcurrentJobs; + uint32_t MaxConcurrentJobs; utime_t SDConnectTimeout; /* timeout in seconds */ utime_t heartbeat_interval; /* Interval to send heartbeats */ uint32_t max_network_buffer_size; /* max network buf size */ diff --git a/bacula/src/jcr.h b/bacula/src/jcr.h index 2ad419f19a..7d4c0cb8f6 100644 --- a/bacula/src/jcr.h +++ b/bacula/src/jcr.h @@ -111,7 +111,7 @@ enum { #define job_waiting(jcr) \ (jcr->JobStatus == JS_WaitFD || \ - jcr->JobStatus == JS_WaitSD || \ + jcr->JobStatus == JS_WaitSD || \ jcr->JobStatus == JS_WaitMedia || \ jcr->JobStatus == JS_WaitMount || \ jcr->JobStatus == JS_WaitStoreRes || \ @@ -164,13 +164,13 @@ typedef void (JCR_free_HANDLER)(JCR *jcr); class JCR { private: pthread_mutex_t mutex; /* jcr mutex */ - volatile int _use_count; /* use count */ + volatile int32_t _use_count; /* use count */ public: void lock() {P(mutex); }; void unlock() {V(mutex); }; void inc_use_count(void) {lock(); _use_count++; unlock(); }; void dec_use_count(void) {lock(); _use_count--; unlock(); }; - int use_count() { return _use_count; }; + int32_t use_count() { return _use_count; }; void init_mutex(void) {pthread_mutex_init(&mutex, NULL); }; void destroy_mutex(void) {pthread_mutex_destroy(&mutex); }; bool is_job_canceled() {return job_canceled(this); }; @@ -197,10 +197,10 @@ public: uint64_t JobBytes; /* Number of bytes processed this job */ uint64_t ReadBytes; /* Bytes read -- before compression */ uint32_t Errors; /* Number of non-fatal errors */ - volatile int JobStatus; /* ready, running, blocked, terminated */ - int JobType; /* backup, restore, verify ... */ - int JobLevel; /* Job level */ - int JobPriority; /* Job priority */ + volatile int32_t JobStatus; /* ready, running, blocked, terminated */ + int32_t JobType; /* backup, restore, verify ... */ + int32_t JobLevel; /* Job level */ + int32_t JobPriority; /* Job priority */ time_t sched_time; /* job schedule time, i.e. when it should start */ time_t start_time; /* when job actually started */ time_t run_time; /* used for computing speed */ @@ -215,7 +215,7 @@ public: char *where; /* prefix to restore files to */ char *RegexWhere; /* file relocation in restore */ alist *where_bregexp; /* BREGEXP alist for path manipulation */ - int cached_pnl; /* cached path length */ + int32_t cached_pnl; /* cached path length */ POOLMEM *cached_path; /* cached path */ bool prefix_links; /* Prefix links with Where path */ bool gui; /* set if gui using console */ @@ -269,8 +269,8 @@ public: uint32_t SDJobFiles; /* Number of files written, this job */ uint64_t SDJobBytes; /* Number of bytes processed this job */ uint32_t SDErrors; /* Number of non-fatal errors */ - volatile int SDJobStatus; /* Storage Job Status */ - volatile int FDJobStatus; /* File daemon Job Status */ + volatile int32_t SDJobStatus; /* Storage Job Status */ + volatile int32_t FDJobStatus; /* File daemon Job Status */ uint32_t ExpectedFiles; /* Expected restore files */ uint32_t MediaId; /* DB record IDs associated with this job */ FileId_t FileId; /* Last file id inserted */ @@ -292,10 +292,10 @@ public: POOLMEM *rstore_source; /* Where read storage came from */ POOLMEM *wstore_source; /* Where write storage came from */ POOLMEM *catalog_source; /* Where catalog came from */ - int replace; /* Replace option */ - int NumVols; /* Number of Volume used in pool */ - int reschedule_count; /* Number of times rescheduled */ - int FDVersion; /* File daemon version number */ + uint32_t replace; /* Replace option */ + int32_t NumVols; /* Number of Volume used in pool */ + int32_t reschedule_count; /* Number of times rescheduled */ + int32_t FDVersion; /* File daemon version number */ int64_t spool_size; /* Spool size for this job */ bool spool_data; /* Spool data in SD */ bool acquired_resource_locks; /* set if resource locks acquired */ @@ -316,7 +316,7 @@ public: uint32_t num_files_examined; /* files examined this job */ POOLMEM *last_fname; /* last file saved/verified */ POOLMEM *acl_text; /* text of ACL for backup */ - int last_type; /* type of last file saved/verified */ + int32_t last_type; /* type of last file saved/verified */ /*********FIXME********* add missing files and files to be retried */ int incremental; /* set if incremental for SINCE */ time_t mtime; /* begin time for SINCE */ @@ -326,8 +326,8 @@ public: POOLMEM *compress_buf; /* Compression buffer */ int32_t compress_buf_size; /* Length of compression buffer */ void *pZLIB_compress_workset; /* zlib compression session data */ - int replace; /* Replace options */ - int buf_size; /* length of buffer */ + int32_t replace; /* Replace options */ + int32_t buf_size; /* length of buffer */ FF_PKT *ff; /* Find Files packet */ char stored_addr[MAX_NAME_LENGTH]; /* storage daemon address */ uint32_t StartFile; @@ -342,7 +342,7 @@ public: DIRRES* director; /* Director resource */ bool VSS; /* VSS used by FD */ #ifdef USE_TCADB - TCADB *file_list; /* Previous file list (accurate mode) */ + TCADB *file_list; /* Previous file list (accurate mode) */ POOLMEM *hash_name; #else htable *file_list; /* Previous file list (accurate mode) */ @@ -356,7 +356,7 @@ public: JCR *prev_dev; /* previous JCR attached to device */ char *dir_auth_key; /* Dir auth key */ pthread_cond_t job_start_wait; /* Wait for FD to start Job */ - int type; + int32_t type; DCR *read_dcr; /* device context for reading */ DCR *dcr; /* device context record */ alist *dcrs; /* list of dcrs open */ @@ -367,7 +367,7 @@ public: int32_t NumWriteVolumes; /* number of volumes written */ int32_t NumReadVolumes; /* total number of volumes to read */ int32_t CurReadVolume; /* current read volume number */ - int label_errors; /* count of label errors */ + int32_t label_errors; /* count of label errors */ bool session_opened; long Ticket; /* ticket for this job */ bool ignore_label_errors; /* ignore Volume label errors */ @@ -375,7 +375,7 @@ public: bool no_attributes; /* set if no attributes wanted */ int64_t spool_size; /* Spool size for this job */ bool spool_data; /* set to spool data */ - int CurVol; /* Current Volume count */ + int32_t CurVol; /* Current Volume count */ DIRRES* director; /* Director resource */ alist *write_store; /* list of write storage devices sent by DIR */ alist *read_store; /* list of read devices sent by DIR */ @@ -395,13 +395,12 @@ public: uint32_t read_StartBlock; uint32_t read_EndBlock; /* Device wait times */ - int min_wait; - int max_wait; - int max_num_wait; - int wait_sec; - int rem_wait_sec; - int num_wait; - + int32_t min_wait; + int32_t max_wait; + int32_t max_num_wait; + int32_t wait_sec; + int32_t rem_wait_sec; + int32_t num_wait; #endif /* STORAGE_DAEMON */ }; @@ -412,10 +411,10 @@ public: */ struct s_last_job { dlink link; - int Errors; /* FD/SD errors */ - int JobType; - int JobStatus; - int JobLevel; + int32_t Errors; /* FD/SD errors */ + int32_t JobType; + int32_t JobStatus; + int32_t JobLevel; uint32_t JobId; uint32_t VolSessionId; uint32_t VolSessionTime; diff --git a/bacula/src/lib/parse_conf.c b/bacula/src/lib/parse_conf.c index 85cb77d831..1811a5ade4 100644 --- a/bacula/src/lib/parse_conf.c +++ b/bacula/src/lib/parse_conf.c @@ -212,12 +212,12 @@ static void init_resource(CONFIG *config, int type, RES_ITEM *items, int pass) items[i].default_value); if (items[i].flags & ITEM_DEFAULT && items[i].default_value != 0) { if (items[i].handler == store_bit) { - *(int *)(items[i].value) |= items[i].code; + *(uint32_t *)(items[i].value) |= items[i].code; } else if (items[i].handler == store_bool) { *(bool *)(items[i].value) = items[i].default_value != 0; - } else if (items[i].handler == store_pint || - items[i].handler == store_int) { - *(int *)(items[i].value) = items[i].default_value; + } else if (items[i].handler == store_pint32 || + items[i].handler == store_int32) { + *(uint32_t *)(items[i].value) = items[i].default_value; } else if (items[i].handler == store_int64) { *(int64_t *)(items[i].value) = items[i].default_value; } else if (items[i].handler == store_size) { @@ -600,19 +600,19 @@ void store_defs(LEX *lc, RES_ITEM *item, int index, int pass) /* Store an integer at specified address */ -void store_int(LEX *lc, RES_ITEM *item, int index, int pass) +void store_int32(LEX *lc, RES_ITEM *item, int index, int pass) { lex_get_token(lc, T_INT32); - *(int *)(item->value) = lc->int32_val; + *(uint32_t *)(item->value) = lc->int32_val; scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } /* Store a positive integer at specified address */ -void store_pint(LEX *lc, RES_ITEM *item, int index, int pass) +void store_pint32(LEX *lc, RES_ITEM *item, int index, int pass) { lex_get_token(lc, T_PINT32); - *(int *)(item->value) = lc->pint32_val; + *(uint32_t *)(item->value) = lc->pint32_val; scan_to_eol(lc); set_bit(index, res_all.hdr.item_present); } @@ -716,9 +716,9 @@ void store_bit(LEX *lc, RES_ITEM *item, int index, int pass) { lex_get_token(lc, T_NAME); if (strcasecmp(lc->str, "yes") == 0 || strcasecmp(lc->str, "true") == 0) { - *(int *)(item->value) |= item->code; + *(uint32_t *)(item->value) |= item->code; } else if (strcasecmp(lc->str, "no") == 0 || strcasecmp(lc->str, "false") == 0) { - *(int *)(item->value) &= ~(item->code); + *(uint32_t *)(item->value) &= ~(item->code); } else { scan_err2(lc, _("Expect %s, got: %s"), "YES, NO, TRUE, or FALSE", lc->str); /* YES and NO must not be translated */ } @@ -754,7 +754,7 @@ void store_label(LEX *lc, RES_ITEM *item, int index, int pass) /* Store the label pass 2 so that type is defined */ for (i=0; tapelabels[i].name; i++) { if (strcasecmp(lc->str, tapelabels[i].name) == 0) { - *(int *)(item->value) = tapelabels[i].token; + *(uint32_t *)(item->value) = tapelabels[i].token; i = 0; break; } diff --git a/bacula/src/lib/parse_conf.h b/bacula/src/lib/parse_conf.h index 6ce5586669..898c916193 100644 --- a/bacula/src/lib/parse_conf.h +++ b/bacula/src/lib/parse_conf.h @@ -58,9 +58,9 @@ struct RES_ITEM { RES *resvalue; RES **presvalue; }; - int code; /* item code/additional info */ - int flags; /* flags: default, required, ... */ - int default_value; /* default value */ + int32_t code; /* item code/additional info */ + uint32_t flags; /* flags: default, required, ... */ + int32_t default_value; /* default value */ }; /* For storing name_addr items in res_items table */ @@ -77,8 +77,8 @@ public: RES *next; /* pointer to next resource of this type */ char *name; /* resource name */ char *desc; /* resource description */ - int rcode; /* resource id or type */ - int refcnt; /* reference count for releasing */ + uint32_t rcode; /* resource id or type */ + int32_t refcnt; /* reference count for releasing */ char item_present[MAX_RES_ITEMS]; /* set if item is present in conf file */ }; @@ -91,7 +91,7 @@ public: struct RES_TABLE { const char *name; /* resource name */ RES_ITEM *items; /* list of resource keywords */ - int rcode; /* code if needed */ + uint32_t rcode; /* code if needed */ }; @@ -135,12 +135,12 @@ class CONFIG { public: const char *m_cf; /* config file */ LEX_ERROR_HANDLER *m_scan_error; /* error handler if non-null */ - int m_err_type; /* the way to terminate on failure */ + int32_t m_err_type; /* the way to terminate on failure */ void *m_res_all; /* pointer to res_all buffer */ - int m_res_all_size; /* length of buffer */ + int32_t m_res_all_size; /* length of buffer */ /* The below are not yet implemented */ - int m_r_first; /* first daemon resource type */ - int m_r_last; /* last daemon resource type */ + int32_t m_r_first; /* first daemon resource type */ + int32_t m_r_last; /* last daemon resource type */ RES_TABLE *m_resources; /* pointer to table of permitted resources */ RES **m_res_head; /* pointer to defined resources */ brwlock_t m_res_lock; /* resource lock */ @@ -149,11 +149,11 @@ public: void init( const char *cf, LEX_ERROR_HANDLER *scan_error, - int err_type, + int32_t err_type, void *vres_all, - int res_all_size, - int r_first, - int r_last, + int32_t res_all_size, + int32_t r_first, + int32_t r_last, RES_TABLE *resources, RES **res_head); @@ -198,8 +198,8 @@ void store_strname(LEX *lc, RES_ITEM *item, int index, int pass); void store_res(LEX *lc, RES_ITEM *item, int index, int pass); void store_alist_res(LEX *lc, RES_ITEM *item, int index, int pass); void store_alist_str(LEX *lc, RES_ITEM *item, int index, int pass); -void store_int(LEX *lc, RES_ITEM *item, int index, int pass); -void store_pint(LEX *lc, RES_ITEM *item, int index, int pass); +void store_int32(LEX *lc, RES_ITEM *item, int index, int pass); +void store_pint32(LEX *lc, RES_ITEM *item, int index, int pass); void store_msgs(LEX *lc, RES_ITEM *item, int index, int pass); void store_int64(LEX *lc, RES_ITEM *item, int index, int pass); void store_bit(LEX *lc, RES_ITEM *item, int index, int pass); diff --git a/bacula/src/stored/stored_conf.c b/bacula/src/stored/stored_conf.c index 15818e8bd5..dc141317af 100644 --- a/bacula/src/stored/stored_conf.c +++ b/bacula/src/stored/stored_conf.c @@ -76,7 +76,7 @@ static RES_ITEM store_items[] = { {"subsysdirectory", store_dir, ITEM(res_store.subsys_directory), 0, 0, 0}, {"plugindirectory", store_dir, ITEM(res_store.plugin_directory), 0, 0, 0}, {"scriptsdirectory", store_dir, ITEM(res_store.scripts_directory), 0, 0, 0}, - {"maximumconcurrentjobs", store_pint, ITEM(res_store.max_concurrent_jobs), 0, ITEM_DEFAULT, 20}, + {"maximumconcurrentjobs", store_pint32, ITEM(res_store.max_concurrent_jobs), 0, ITEM_DEFAULT, 20}, {"heartbeatinterval", store_time, ITEM(res_store.heartbeat_interval), 0, ITEM_DEFAULT, 0}, {"tlsauthenticate", store_bool, ITEM(res_store.tls_authenticate), 0, 0, 0}, {"tlsenable", store_bool, ITEM(res_store.tls_enable), 0, 0, 0}, @@ -146,19 +146,19 @@ static RES_ITEM dev_items[] = { {"alertcommand", store_strname,ITEM(res_dev.alert_command), 0, 0, 0}, {"maximumchangerwait", store_time, ITEM(res_dev.max_changer_wait), 0, ITEM_DEFAULT, 5 * 60}, {"maximumopenwait", store_time, ITEM(res_dev.max_open_wait), 0, ITEM_DEFAULT, 5 * 60}, - {"maximumopenvolumes", store_pint, ITEM(res_dev.max_open_vols), 0, ITEM_DEFAULT, 1}, - {"maximumnetworkbuffersize", store_pint, ITEM(res_dev.max_network_buffer_size), 0, 0, 0}, + {"maximumopenvolumes", store_pint32, ITEM(res_dev.max_open_vols), 0, ITEM_DEFAULT, 1}, + {"maximumnetworkbuffersize", store_pint32, ITEM(res_dev.max_network_buffer_size), 0, 0, 0}, {"volumepollinterval", store_time, ITEM(res_dev.vol_poll_interval), 0, 0, 0}, {"maximumrewindwait", store_time, ITEM(res_dev.max_rewind_wait), 0, ITEM_DEFAULT, 5 * 60}, - {"minimumblocksize", store_pint, ITEM(res_dev.min_block_size), 0, 0, 0}, - {"maximumblocksize", store_pint, ITEM(res_dev.max_block_size), 0, 0, 0}, + {"minimumblocksize", store_pint32, ITEM(res_dev.min_block_size), 0, 0, 0}, + {"maximumblocksize", store_pint32, ITEM(res_dev.max_block_size), 0, 0, 0}, {"maximumvolumesize", store_size, ITEM(res_dev.max_volume_size), 0, 0, 0}, {"maximumfilesize", store_size, ITEM(res_dev.max_file_size), 0, ITEM_DEFAULT, 1000000000}, {"volumecapacity", store_size, ITEM(res_dev.volume_capacity), 0, 0, 0}, {"spooldirectory", store_dir, ITEM(res_dev.spool_directory), 0, 0, 0}, {"maximumspoolsize", store_size, ITEM(res_dev.max_spool_size), 0, 0, 0}, {"maximumjobspoolsize", store_size, ITEM(res_dev.max_job_spool_size), 0, 0, 0}, - {"driveindex", store_pint, ITEM(res_dev.drive_index), 0, 0, 0}, + {"driveindex", store_pint32, ITEM(res_dev.drive_index), 0, 0, 0}, {"maximumpartsize", store_size, ITEM(res_dev.max_part_size), 0, ITEM_DEFAULT, 0}, {"mountpoint", store_strname,ITEM(res_dev.mount_point), 0, 0, 0}, {"mountcommand", store_strname,ITEM(res_dev.mount_command), 0, 0, 0}, @@ -204,7 +204,7 @@ RES_TABLE resources[] = { */ struct s_kw { const char *name; - int token; + int32_t token; }; static s_kw dev_types[] = { @@ -230,7 +230,7 @@ static void store_devtype(LEX *lc, RES_ITEM *item, int index, int pass) /* Store the label pass 2 so that type is defined */ for (i=0; dev_types[i].name; i++) { if (strcasecmp(lc->str, dev_types[i].name) == 0) { - *(int *)(item->value) = dev_types[i].token; + *(uint32_t *)(item->value) = dev_types[i].token; i = 0; break; } diff --git a/bacula/src/stored/stored_conf.h b/bacula/src/stored/stored_conf.h index 679ccfb58f..21323de09a 100644 --- a/bacula/src/stored/stored_conf.h +++ b/bacula/src/stored/stored_conf.h @@ -124,9 +124,9 @@ public: char *changer_command; /* Changer command -- external program */ char *alert_command; /* Alert command -- external program */ char *spool_directory; /* Spool file directory */ - int dev_type; /* device type */ - int label_type; /* label type */ - int autoselect; /* Automatically select from AutoChanger */ + uint32_t dev_type; /* device type */ + uint32_t label_type; /* label type */ + uint32_t autoselect; /* Automatically select from AutoChanger */ uint32_t drive_index; /* Autochanger drive index */ uint32_t cap_bits; /* Capabilities of this device */ utime_t max_changer_wait; /* Changer timeout */ diff --git a/bacula/src/version.h b/bacula/src/version.h index 03027f7d72..43612bdb48 100644 --- a/bacula/src/version.h +++ b/bacula/src/version.h @@ -3,9 +3,9 @@ */ #undef VERSION -#define VERSION "2.5.0" -#define BDATE "04 June 2008" -#define LSMDATE "04Jun08" +#define VERSION "2.5.1" +#define BDATE "18 June 2008" +#define LSMDATE "18Jun08" #define PROG_COPYRIGHT "Copyright (C) %d-2008 Free Software Foundation Europe e.V.\n" #define BYEAR "2008" /* year for copyright messages in progs */ diff --git a/bacula/technotes-2.5 b/bacula/technotes-2.5 index 81e624a049..5c5b884c2e 100644 --- a/bacula/technotes-2.5 +++ b/bacula/technotes-2.5 @@ -25,7 +25,13 @@ vtape driver General: +18Jun08 +kes Eliminate ints from conf files and structures to avoid + word alignment problems 17Jun08 +kes Fix bug reported by Scott Barninger where the bacula script + refers to scripts in the wrong directory. Needed to meet the + requirements of recent FHS location changes. ebl Disable vtape for FreeBSD. 15Jun08 ebl Modify disk-changer to check if slot contains something before -- 2.39.5