if ( bdb->bi_txn_cp_task ) {
struct re_s *re = bdb->bi_txn_cp_task;
bdb->bi_txn_cp_task = NULL;
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
if ( ldap_pvt_runqueue_isrunning( &slapd_rq, re ) )
ldap_pvt_runqueue_stoptask( &slapd_rq, re );
ldap_pvt_runqueue_remove( &slapd_rq, re );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
}
bdb->bi_txn_cp = 0;
break;
c->log );
return 1;
}
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
bdb->bi_txn_cp_task = ldap_pvt_runqueue_insert( &slapd_rq,
bdb->bi_txn_cp_min * 60, bdb_checkpoint, bdb,
LDAP_XSTRING(bdb_checkpoint), c->be->be_suffix[0].bv_val );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
}
}
} break;
c->log );
return 1;
}
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
bdb->bi_index_task = ldap_pvt_runqueue_insert( &slapd_rq, 36000,
bdb_online_index, c->be,
LDAP_XSTRING(bdb_online_index), c->be->be_suffix[0].bv_val );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
}
break;
if ( li->li_task ) {
struct re_s *re = li->li_task;
li->li_task = NULL;
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
if ( ldap_pvt_runqueue_isrunning( &slapd_rq, re ))
ldap_pvt_runqueue_stoptask( &slapd_rq, re );
ldap_pvt_runqueue_remove( &slapd_rq, re );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
}
li->li_age = 0;
li->li_cycle = 0;
struct re_s *re = li->li_task;
if ( re )
re->interval.tv_sec = li->li_cycle;
- else
+ else {
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
li->li_task = ldap_pvt_runqueue_insert( &slapd_rq,
li->li_cycle, accesslog_purge, li,
"accesslog_purge", li->li_db ?
li->li_db->be_suffix[0].bv_val :
c->be->be_suffix[0].bv_val );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
+ }
}
}
break;
ber_dupbv( &li->li_db->be_rootndn, li->li_db->be_nsuffix );
}
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
ldap_pvt_runqueue_insert( &slapd_rq, 3600, accesslog_db_root, on,
"accesslog_db_root", li->li_db->be_suffix[0].bv_val );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
return 0;
}
}
/* re-fetch it, in case it was already removed */
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
sie->si_re = ldap_pvt_runqueue_find( &slapd_rq, do_syncrepl, sie );
if ( sie->si_re ) {
if ( ldap_pvt_runqueue_isrunning( &slapd_rq, sie->si_re ) )
ldap_pvt_runqueue_remove( &slapd_rq, sie->si_re );
}
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
ldap_pvt_thread_mutex_destroy( &sie->si_mutex );
bindconf_free( &sie->si_bindconf );
if ( !isMe ) {
init_syncrepl( si );
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
si->si_re = ldap_pvt_runqueue_insert( &slapd_rq,
si->si_interval, do_syncrepl, si, "do_syncrepl",
si->si_ridtxt );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
if ( si->si_re )
rc = config_sync_shadow( c ) ? -1 : 0;
else
for ( sip = &c->be->be_syncinfo, i=0; *sip; i++ ) {
si = *sip;
if ( c->valx == -1 || i == c->valx ) {
+ int isrunning = 0;
*sip = si->si_next;
/* If the task is currently active, we have to leave
* it running. It will exit on its own. This will only
* happen when running on the cn=config DB.
*/
- if ( si->si_re &&
- ldap_pvt_runqueue_isrunning( &slapd_rq, si->si_re ) ) {
+ if ( si->si_re ) {
+ ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
+ isrunning = ldap_pvt_runqueue_isrunning( &slapd_rq, si->si_re );
+ ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
+ }
+ if ( si->si_re && isrunning ) {
si->si_ctype = 0;
} else {
syncinfo_free( si, 0 );