const char *cnt_JobMedia = "SELECT count(*) FROM JobMedia WHERE MediaId=%s";
const char *sel_JobMedia = "SELECT JobId FROM JobMedia WHERE MediaId=%s";
+/* Count Select JobIds for File deletion */
+const char *count_select_job =
+ "SELECT count(*) from Job "
+ "WHERE JobTDate<%s "
+ "AND ClientId=%s "
+ "AND PurgedFiles=0";
+
+
/* Select JobIds for File deletion. */
const char *select_job =
"SELECT JobId from Job "
* for use when inserting individual files into the tree.
*/
const char *uar_jobid_fileindex =
- "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId=File.JobId "
- "AND Job.StartTime<'%s' "
+ "AND Job.StartTime<='%s' "
"AND Path.Path='%s' "
"AND Filename.Name='%s' "
"AND Client.Name='%s' "
"ORDER BY Job.StartTime DESC LIMIT 1";
const char *uar_jobids_fileindex =
- "SELECT Job.JobId, File.FileIndex FROM Job,File,Path,Filename,Client "
+ "SELECT Job.JobId,File.FileIndex FROM Job,File,Path,Filename,Client "
"WHERE Job.JobId IN (%s) "
"AND Job.JobId=File.JobId "
- "AND Job.StartTime<'%s' "
+ "AND Job.StartTime<='%s' "
"AND Path.Path='%s' "
"AND Filename.Name='%s' "
"AND Client.Name='%s' "
extern const char CATS_IMP_EXP *select_admin_del;
extern const char CATS_IMP_EXP *select_migrate_del;
extern const char CATS_IMP_EXP *select_job;
+extern const char CATS_IMP_EXP *count_select_job;
extern const char CATS_IMP_EXP *del_File;
extern const char CATS_IMP_EXP *cnt_File;
extern const char CATS_IMP_EXP *cnt_DelCand;
void purge_files_from_job(UAContext *ua, JobId_t JobId);
void purge_job_from_catalog(UAContext *ua, JobId_t JobId);
void purge_job_records_from_catalog(UAContext *ua, JobId_t JobId);
+void purge_jobs_from_catalog(UAContext *ua, char *jobs);
/* ua_run.c */
}
-/*
- * Called here to count the number of Jobs to be pruned
- */
-static int file_count_handler(void *ctx, int num_fields, char **row)
-{
- struct s_file_del_ctx *del = (struct s_file_del_ctx *)ctx;
- del->tot_ids++;
- return 0;
-}
-
-
/*
* Called here to make in memory list of JobIds to be
* deleted and the associated PurgedFiles flag.
int prune_files(UAContext *ua, CLIENT *client)
{
struct s_file_del_ctx del;
+ struct s_count_ctx cnt;
POOLMEM *query = get_pool_memory(PM_MESSAGE);
int i;
utime_t now, period;
now = (utime_t)time(NULL);
/* Select Jobs -- for counting */
- Mmsg(query, select_job, edit_uint64(now - period, ed1),
+ Mmsg(query, count_select_job, edit_uint64(now - period, ed1),
edit_int64(cr.ClientId, ed2));
Dmsg3(050, "select now=%u period=%u sql=%s\n", (uint32_t)now, (uint32_t)period, query);
- if (!db_sql_query(ua->db, query, file_count_handler, (void *)&del)) {
- if (ua->verbose) {
- bsendmsg(ua, "%s", db_strerror(ua->db));
- }
+ cnt.count = 0;
+ if (!db_sql_query(ua->db, query, count_handler, (void *)&cnt)) {
+ bsendmsg(ua, "%s", db_strerror(ua->db));
Dmsg0(050, "Count failed\n");
goto bail_out;
}
- if (del.tot_ids == 0) {
+ if (cnt.count == 0) {
if (ua->verbose) {
bsendmsg(ua, _("No Files found to prune.\n"));
}
goto bail_out;
}
- if (del.tot_ids < MAX_DEL_LIST_LEN) {
- del.max_ids = del.tot_ids + 1;
+ if (cnt.count < MAX_DEL_LIST_LEN) {
+ del.max_ids = cnt.count + 1;
} else {
del.max_ids = MAX_DEL_LIST_LEN;
}
del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids);
/* Now process same set but making a delete list */
+ Mmsg(query, select_job, edit_uint64(now - period, ed1),
+ edit_int64(cr.ClientId, ed2));
db_sql_query(ua->db, query, file_delete_handler, (void *)&del);
for (i=0; i < del.num_ids; i++) {
}
/*
- * OK, now we have the list of JobId's to be pruned, first check
- * if the Files have been purged, if not, purge (delete) them.
- * Then delete the Job entry, and finally and JobMedia records.
+ * OK, now we have the list of JobId's to be pruned, send them
+ * off to be deleted batched 1000 at a time.
*/
- for (i=0; i < del.num_ids; i++) {
- /* Don't prune current job */
- if (ua->jcr->JobId != del.JobId[i]) {
- if (!del.PurgedFiles[i]) {
- purge_files_from_job(ua, del.JobId[i]);
+
+ for (i=0; del.num_ids; ) {
+ for (int j=0; j<1000 && del.num_ids; j++) {
+ del.num_ids--;
+ if (ua->jcr->JobId == del.JobId[i]) {
+ continue;
}
- purge_job_from_catalog(ua, del.JobId[i]);
+ pm_strcat(query, ",");
+ pm_strcpy(query, edit_int64(del.JobId[i++], ed1));
del.num_del++;
}
+ purge_jobs_from_catalog(ua, query);
}
bsendmsg(ua, _("Pruned %d %s for client %s from catalog.\n"), del.num_del,
del.num_del==1?_("Job"):_("Jobs"), client->name());
}
-
/*
* Remove File records for a particular Job.
*/
db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
}
+void purge_jobs_from_catalog(UAContext *ua, char *jobs)
+{
+ POOL_MEM query(PM_MESSAGE);
+
+ /* Delete (or purge) records associated with the job */
+ Mmsg(query, "DELETE FROM File WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+ Dmsg1(050, "Delete File sql=%s\n", query.c_str());
+
+ Mmsg(query, "DELETE FROM JobMedia WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+ Dmsg1(050, "Delete JobMedia sql=%s\n", query.c_str());
+
+ Mmsg(query, "DELETE FROM Log WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+ Dmsg1(050, "Delete Log sql=%s\n", query.c_str());
+
+ /* Now remove the Job record itself */
+ Mmsg(query, "DELETE FROM Job WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+ Dmsg1(050, "Delete Job sql=%s\n", query.c_str());
+
+ /*
+ * Now mark Job as having files purged. This is necessary to
+ * avoid having too many Jobs to process in future prunings. If
+ * we don't do this, the number of JobId's in our in memory list
+ * could grow very large.
+ */
+ Mmsg(query, "UPDATE Job SET PurgedFiles=1 WHERE JobId IN (%s)", jobs);
+ db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL);
+}
+
+
void purge_files_from_volume(UAContext *ua, MEDIA_DBR *mr )
{} /* ***FIXME*** implement */
}
if (!rx->found) {
ua->error_msg(_("No database record found for: %s\n"), file);
+// ua->error_msg("Query=%s\n", rx->query);
return true;
}
return true;
if (dev->num_writers == 0) {
memcpy(&dev->VolCatInfo, &dcr->VolCatInfo, sizeof(dev->VolCatInfo));
}
+
+ /*
+ * Insanity check
+ *
+ * Check to see if the tape position as defined by the OS is
+ * the same as our concept. If it is not, we bail out, because
+ * it means the user has probably manually rewound the tape.
+ * Note, we check only if num_writers == 0, but this code will
+ * also work fine for any number of writers. If num_writers > 0,
+ * we probably should cancel all jobs using this device, or
+ * perhaps even abort the SD, or at a minimum, mark the tape
+ * in error. Another strategy with num_writers == 0, would be
+ * to rewind the tape and do a new eod() request.
+ */
+ if (dev->is_tape() && dev->num_writers == 0) {
+ int32_t file = dev->get_os_tape_file();
+ if (file >= 0 && file != (int32_t)dev->get_file()) {
+ Jmsg(jcr, M_FATAL, 0, _("Invalid tape position on volume \"%s\""
+ " on device %s. Expected %d, got %d\n"),
+ dev->VolHdr.VolumeName, dev->print_name(), dev->get_file(), file);
+ goto get_out;
+ }
+ }
}
} else {
/* Not already in append mode, so mount the device */
Technical notes on version 2.1
General:
+22Mar07
+kes Implement new prunning code that prunes up to 1000 jobs at
+ the same time. The same technique can be applied to a number
+ of other prune/purge subroutines.
+kes Add an insanity check when starting a new tape job to ensure
+ that the tape position has not been changed. If so fail the
+ job.
+kes Fix Win32 build for dlls to add the folllowing:
+ src/lib/bsock.c -- new file with new entry points
+ src/lib/dlist.c -- new entry points
+ src/cats/sql_create.c -- new entry point
+ src/cats/sql_cmds.c -- new DATA item exported
21Mar07
ebl batch mode cleanup bscan-test is ok now.
kes Implement SD code to check length of disk volume before appending.