/* sql_update.c */
bool db_update_job_start_record(JCR *jcr, B_DB *db, JOB_DBR *jr);
-int db_update_job_end_record(JCR *jcr, B_DB *db, JOB_DBR *jr, bool stats_enabled);
+int db_update_job_end_record(JCR *jcr, B_DB *db, JOB_DBR *jr);
int db_update_client_record(JCR *jcr, B_DB *mdb, CLIENT_DBR *cr);
int db_update_pool_record(JCR *jcr, B_DB *db, POOL_DBR *pr);
bool db_update_storage_record(JCR *jcr, B_DB *mdb, STORAGE_DBR *sr);
int db_add_digest_to_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, char *digest, int type);
int db_mark_file_record(JCR *jcr, B_DB *mdb, FileId_t FileId, JobId_t JobId);
void db_make_inchanger_unique(JCR *jcr, B_DB *mdb, MEDIA_DBR *mr);
+int db_update_stats(JCR *jcr, B_DB *mdb, time_t age);
#endif /* __SQL_PROTOS_H */
return stat;
}
+/*
+ * Update Long term statistics with all jobs that were run before
+ * age seconds
+ */
+int
+db_update_stats(JCR *jcr, B_DB *mdb, time_t age)
+{
+ char ed1[30];
+ utime_t now = (utime_t)time(NULL);
+ edit_uint64(now - age, ed1);
+
+ Mmsg(mdb->cmd,
+ "INSERT INTO JobStat "
+ "SELECT * "
+ "FROM Job "
+ "WHERE JobStatus IN ('T', 'f', 'A', 'E') "
+ "AND JobId NOT IN (SELECT JobId FROM JobStat) "
+ "AND JobTDate < %s ", ed1);
+ QUERY_DB(jcr, mdb, mdb->cmd); /* TODO: get a message ? */
+ return sql_affected_rows(mdb);
+}
+
/*
* Given an incoming integer, set the string buffer to either NULL or the value
*
bsnprintf(s, n, id ? "%s" : "NULL", edit_int64(id, ed1));
}
-
/*
* Update the Job record at end of Job
*
* 1 on success
*/
int
-db_update_job_end_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr, bool stats_enabled)
+db_update_job_end_record(JCR *jcr, B_DB *mdb, JOB_DBR *jr)
{
char dt[MAX_TIME_LENGTH];
char rdt[MAX_TIME_LENGTH];
stat = UPDATE_DB(jcr, mdb, mdb->cmd);
- if (stat && stats_enabled) {
- Mmsg(mdb->cmd,
- "INSERT INTO JobStat (SELECT * FROM Job WHERE JobId=%s)",
- edit_int64(jr->JobId, ed3));
- INSERT_DB(jcr, mdb, mdb->cmd); /* TODO: get a message ? */
- }
db_unlock(mdb);
return stat;
}
{
int stat;
char ed1[50];
-
db_lock(mdb);
Mmsg(mdb->cmd, "UPDATE Storage SET AutoChanger=%d WHERE StorageId=%s",
sr->AutoChanger, edit_int64(sr->StorageId, ed1));
{"selectionpattern", store_str, ITEM(res_job.selection_pattern), 0, 0, 0},
{"runscript", store_runscript, ITEM(res_job.RunScripts), 0, ITEM_NO_EQUALS, 0},
{"selectiontype", store_migtype, ITEM(res_job.selection_type), 0, 0, 0},
- {"usestatistics", store_bool, ITEM(res_job.stats_enabled), 0, 0, 0},
{"accurate", store_bool, ITEM(res_job.accurate), 0,0,0},
{"allowduplicatejobs", store_bool, ITEM(res_job.AllowDuplicateJobs), 0, ITEM_DEFAULT, false},
{"allowhigherduplicates", store_bool, ITEM(res_job.AllowHigherDuplicates), 0, ITEM_DEFAULT, true},
if (res->res_job.spool_size) {
sendit(sock, _(" SpoolSize=%s\n"), edit_uint64(res->res_job.spool_size, ed1));
}
- if (res->res_job.stats_enabled) {
- sendit(sock, _(" StatsEnabled=%d\n"), res->res_job.stats_enabled);
- }
if (res->res_job.JobType == JT_BACKUP) {
sendit(sock, _(" Accurate=%d\n"), res->res_job.accurate);
}
bool write_part_after_job; /* Set to write part after job in SD */
bool enabled; /* Set if job enabled */
bool OptimizeJobScheduling; /* Set if we should optimize Job scheduling */
- bool stats_enabled; /* Keep job records in a table for long term statistics */
bool accurate; /* Set if it is an accurate backup job */
bool AllowDuplicateJobs; /* Allow duplicate jobs */
bool AllowHigherDuplicates; /* Permit Higher Level */
jcr->jr.VolSessionId = jcr->VolSessionId;
jcr->jr.VolSessionTime = jcr->VolSessionTime;
jcr->jr.JobErrors = jcr->Errors;
- if (!db_update_job_end_record(jcr, jcr->db, &jcr->jr, jcr->job->stats_enabled)) {
+ if (!db_update_job_end_record(jcr, jcr->db, &jcr->jr)) {
Jmsg(jcr, M_WARNING, 0, _("Error updating job record. %s"),
db_strerror(jcr->db));
}
BSOCK *user = ua->UA_sock;
- Dmsg1(900, "Command: %s\n", ua->UA_sock->msg);
+ Dmsg1(900, "Command: %s\n", ua->argk[0]);
if (ua->argc == 0) {
return false;
}
static int update_volume(UAContext *ua);
static bool update_pool(UAContext *ua);
static bool update_job(UAContext *ua);
+static bool update_stats(UAContext *ua);
/*
* Update a Pool Record in the database.
* changes pool info for volume
* update slots [scan=...]
* updates autochanger slots
+ * update stats [days=...]
+ * updates long term statistics
*/
int update_cmd(UAContext *ua, const char *cmd)
{
NT_("pool"), /* 2 */
NT_("slots"), /* 3 */
NT_("jobid"), /* 4 */
+ NT_("stats"), /* 5 */
NULL};
if (!open_client_db(ua)) {
case 4:
update_job(ua);
return 1;
+ case 5:
+ update_stats(ua);
+ return 1;
default:
break;
}
add_prompt(ua, _("Volume parameters"));
add_prompt(ua, _("Pool from resource"));
add_prompt(ua, _("Slots from autochanger"));
+ add_prompt(ua, _("Long term statistics"));
switch (do_prompt(ua, _("item"), _("Choose catalog item to update"), NULL, 0)) {
case 0:
update_volume(ua);
case 2:
update_slots(ua);
break;
+ case 3:
+ update_stats(ua);
+ break;
default:
break;
}
return 1;
}
+/*
+ * Update long term statistics
+ */
+static bool update_stats(UAContext *ua)
+{
+ int i = find_arg_with_value(ua, NT_("days"));
+ utime_t since=0;
+
+ if (i >= 0) {
+ since = atoi(ua->argv[i]) * 24*60*60;
+ }
+
+ int nb = db_update_stats(ua->jcr, ua->db, since);
+ ua->info_msg(_("Updating %i job(s).\n"), nb);
+
+ return true;
+}
+
/*
* Update pool record -- pull info from current POOL resource
*/
jr.VolSessionTime = mjcr->VolSessionTime;
jr.JobTDate = (utime_t)mjcr->start_time;
jr.ClientId = mjcr->ClientId;
- if (!db_update_job_end_record(bjcr, db, &jr, false)) {
+ if (!db_update_job_end_record(bjcr, db, &jr)) {
Pmsg1(0, _("Could not update job record. ERR=%s\n"), db_strerror(db));
}
mjcr->read_dcr = NULL;
return 1;
}
- if (!db_update_job_end_record(bjcr, db, jr, false)) {
+ if (!db_update_job_end_record(bjcr, db, jr)) {
Pmsg2(0, _("Could not update JobId=%u record. ERR=%s\n"), jr->JobId, db_strerror(db));
free_jcr(mjcr);
return 0;
remove reader/writer in FOPTS????
General:
+09Sep08
+ebl Change the new statistic implementation. Remove the UseStatistic
+ directive and add a "update stats [days=...]" command. You can
+ now decide when copy job records from Job table to JobStat.
+ Statistics are much more accurate with this.
08Sep08
kes Fix SQL case problem that may cause the failure of DiskToCatalog
in bug #1149.