]> git.sur5r.net Git - bacula/bacula/blobdiff - bacula/src/dird/dird_conf.c
Cleanup new timeout code for bconsole
[bacula/bacula] / bacula / src / dird / dird_conf.c
index 5afe768a6d2ca188867bf727ad1a3a8ccbd5dd75..0517e4a9abad84bb61d7b7b1ca5e9cce972abce3 100644 (file)
@@ -112,6 +112,7 @@ static RES_ITEM dir_items[] = {
    {"dirport",     store_addresses_port,    ITEM(res_dir.DIRaddrs),  0, ITEM_DEFAULT, 9101},
    {"diraddress",  store_addresses_address, ITEM(res_dir.DIRaddrs),  0, ITEM_DEFAULT, 9101},
    {"diraddresses",store_addresses,         ITEM(res_dir.DIRaddrs),  0, ITEM_DEFAULT, 9101},
+   {"dirsourceaddress",store_addresses_address, ITEM(res_dir.DIRsrc_addr),  0, ITEM_DEFAULT, 0},
    {"queryfile",   store_dir,      ITEM(res_dir.query_file), 0, ITEM_REQUIRED, 0},
    {"workingdirectory", store_dir, ITEM(res_dir.working_directory), 0, ITEM_REQUIRED, 0},
    {"plugindirectory",  store_dir, ITEM(res_dir.plugin_directory),  0, 0, 0},
@@ -313,7 +314,7 @@ RES_ITEM job_items[] = {
    {"enabled",     store_bool, ITEM(res_job.enabled), 0, ITEM_DEFAULT, true},
    {"spoolattributes",store_bool, ITEM(res_job.SpoolAttributes), 0, ITEM_DEFAULT, false},
    {"spooldata",   store_bool, ITEM(res_job.spool_data), 0, ITEM_DEFAULT, false},
-   {"spoolsize",   store_size, ITEM(res_job.spool_size), 0, 0, 0},
+   {"spoolsize",   store_size64, ITEM(res_job.spool_size), 0, 0, 0},
    {"rerunfailedlevels",   store_bool, ITEM(res_job.rerun_failed_levels), 0, ITEM_DEFAULT, false},
    {"prefermountedvolumes", store_bool, ITEM(res_job.PreferMountedVolumes), 0, ITEM_DEFAULT, true},
    {"runbeforejob", store_short_runscript,  ITEM(res_job.RunScripts),  0, 0, 0},
@@ -334,7 +335,7 @@ RES_ITEM job_items[] = {
    {"accurate",           store_bool, ITEM(res_job.accurate), 0,0,0},
    {"allowduplicatejobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, false},
    {"allowhigherduplicates",   store_bool, ITEM(res_job.AllowHigherDuplicates), 0, ITEM_DEFAULT, true},
-   {"cancelqueuedduplicates",  store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, true},
+   {"cancelqueuedduplicates",  store_bool, ITEM(res_job.CancelQueuedDuplicates), 0, ITEM_DEFAULT, false},
    {"cancelrunningduplicates", store_bool, ITEM(res_job.CancelRunningDuplicates), 0, ITEM_DEFAULT, false},
    {"pluginoptions", store_str, ITEM(res_job.PluginOptions), 0, 0, 0},
    {NULL, NULL, {0}, 0, 0, 0}
@@ -385,13 +386,13 @@ static RES_ITEM pool_items[] = {
    {"maximumvolumes",  store_pint32,    ITEM(res_pool.max_volumes),   0, 0,        0},
    {"maximumvolumejobs", store_pint32,  ITEM(res_pool.MaxVolJobs),    0, 0,       0},
    {"maximumvolumefiles", store_pint32, ITEM(res_pool.MaxVolFiles),   0, 0,       0},
-   {"maximumvolumebytes", store_size, ITEM(res_pool.MaxVolBytes),   0, 0,       0},
+   {"maximumvolumebytes", store_size64, ITEM(res_pool.MaxVolBytes),   0, 0,       0},
    {"catalogfiles",    store_bool,    ITEM(res_pool.catalog_files),  0, ITEM_DEFAULT, true},
    {"volumeretention", store_time,    ITEM(res_pool.VolRetention),   0, ITEM_DEFAULT, 60*60*24*365},
    {"volumeuseduration", store_time,  ITEM(res_pool.VolUseDuration), 0, 0, 0},
    {"migrationtime",  store_time,     ITEM(res_pool.MigrationTime), 0, 0, 0},
-   {"migrationhighbytes", store_size, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
-   {"migrationlowbytes", store_size,  ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
+   {"migrationhighbytes", store_size64, ITEM(res_pool.MigrationHighBytes), 0, 0, 0},
+   {"migrationlowbytes", store_size64,  ITEM(res_pool.MigrationLowBytes), 0, 0, 0},
    {"nextpool",      store_res,       ITEM(res_pool.NextPool), R_POOL, 0, 0},
    {"storage",       store_alist_res, ITEM(res_pool.storage),  R_STORAGE, 0, 0},
    {"autoprune",     store_bool,      ITEM(res_pool.AutoPrune), 0, ITEM_DEFAULT, true},
@@ -805,6 +806,9 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fm
             }
             sendit(sock, "      N\n");
          }
+         if (incexe->ignoredir) {
+            sendit(sock, "      Z %s\n", incexe->ignoredir);
+         }
          for (j=0; j<incexe->name_list.size(); j++) {
             sendit(sock, "      I %s\n", incexe->name_list.get(j));
          }
@@ -941,7 +945,7 @@ next_run:
       sendit(sock, _("      MaxVolJobs=%d MaxVolFiles=%d MaxVolBytes=%s\n"),
               res->res_pool.MaxVolJobs, 
               res->res_pool.MaxVolFiles,
-              edit_uint64(res->res_pool.MaxVolFiles, ed1));
+              edit_uint64(res->res_pool.MaxVolBytes, ed1));
       sendit(sock, _("      MigTime=%s MigHiBytes=%s MigLoBytes=%s\n"),
               edit_utime(res->res_pool.MigrationTime, ed1, sizeof(ed1)),
               edit_uint64(res->res_pool.MigrationHighBytes, ed2),
@@ -1025,6 +1029,9 @@ static void free_incexe(INCEXE *incexe)
    if (incexe->opts_list) {
       free(incexe->opts_list);
    }
+   if (incexe->ignoredir) {
+      free(incexe->ignoredir);
+   }
    free(incexe);
 }
 
@@ -1079,6 +1086,9 @@ void free_resource(RES *sres, int type)
       if (res->res_dir.DIRaddrs) {
          free_addresses(res->res_dir.DIRaddrs);
       }
+      if (res->res_dir.DIRsrc_addr) {
+         free_addresses(res->res_dir.DIRsrc_addr);
+      }
       if (res->res_dir.tls_ctx) { 
          free_tls_context(res->res_dir.tls_ctx);
       }
@@ -1978,7 +1988,7 @@ static void store_runscript(LEX *lc, RES_ITEM *item, int index, int pass)
        */
       res_runscript.set_job_code_callback(job_code_callback_filesetname);
       while ((c=(char*)res_runscript.commands->pop()) != NULL) {
-         t = (long)res_runscript.commands->pop();
+         t = (intptr_t)res_runscript.commands->pop();
          RUNSCRIPT *script = new_runscript();
          memcpy(script, &res_runscript, sizeof(RUNSCRIPT));
          script->command = c;