/* syncprov.c - syncrepl provider */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
- * Copyright 2004-2014 The OpenLDAP Foundation.
+ * Copyright 2004-2017 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "config.h"
#include "ldap_rq.h"
+#ifdef LDAP_DEVEL
#define CHECK_CSN 1
+#endif
/* A modify request on a particular entry */
typedef struct modinst {
/* Record of a persistent search */
typedef struct syncops {
struct syncops *s_next;
+ struct syncprov_info_t *s_si;
struct berval s_base; /* ndn of search base */
ID s_eid; /* entryID of search base */
Operation *s_op; /* search op */
int s_inuse; /* reference count */
struct syncres *s_res;
struct syncres *s_restail;
+ void *s_pool_cookie;
ldap_pvt_thread_mutex_t s_mutex;
} syncops;
static void free_resinfo( syncres *sr )
{
syncres **st;
+ int freeit = 0;
ldap_pvt_thread_mutex_lock( &sr->s_info->ri_mutex );
for (st = &sr->s_info->ri_list; *st; st = &(*st)->s_rilist) {
if (*st == sr) {
break;
}
}
+ if ( !sr->s_info->ri_list )
+ freeit = 1;
ldap_pvt_thread_mutex_unlock( &sr->s_info->ri_mutex );
- if ( !sr->s_info->ri_list ) {
+ if ( freeit ) {
ldap_pvt_thread_mutex_destroy( &sr->s_info->ri_mutex );
if ( sr->s_info->ri_e )
entry_free( sr->s_info->ri_e );
}
}
+#define FS_UNLINK 1
+#define FS_LOCK 2
+
static int
-syncprov_free_syncop( syncops *so )
+syncprov_free_syncop( syncops *so, int flags )
{
syncres *sr, *srnext;
GroupAssertion *ga, *gnext;
- ldap_pvt_thread_mutex_lock( &so->s_mutex );
+ if ( flags & FS_LOCK )
+ ldap_pvt_thread_mutex_lock( &so->s_mutex );
/* already being freed, or still in use */
if ( !so->s_inuse || --so->s_inuse > 0 ) {
- ldap_pvt_thread_mutex_unlock( &so->s_mutex );
+ if ( flags & FS_LOCK )
+ ldap_pvt_thread_mutex_unlock( &so->s_mutex );
return 0;
}
ldap_pvt_thread_mutex_unlock( &so->s_mutex );
+ if (( flags & FS_UNLINK ) && so->s_si ) {
+ syncops **sop;
+ ldap_pvt_thread_mutex_lock( &so->s_si->si_ops_mutex );
+ for ( sop = &so->s_si->si_ops; *sop; sop = &(*sop)->s_next ) {
+ if ( *sop == so ) {
+ *sop = so->s_next;
+ break;
+ }
+ }
+ ldap_pvt_thread_mutex_unlock( &so->s_si->si_ops_mutex );
+ }
if ( so->s_flags & PS_IS_DETACHED ) {
filter_free( so->s_op->ors_filter );
for ( ga = so->s_op->o_groups; ga; ga=gnext ) {
static int
syncprov_qplay( Operation *op, syncops *so )
{
- slap_overinst *on = LDAP_SLIST_FIRST(&so->s_op->o_extra)->oe_key;
syncres *sr;
int rc = 0;
if ( rc == 0 && so->s_res ) {
syncprov_qstart( so );
- } else {
- so->s_flags ^= PS_TASK_QUEUED;
}
- ldap_pvt_thread_mutex_unlock( &so->s_mutex );
return rc;
}
rc = syncprov_qplay( op, so );
+ /* if an error occurred, or no responses left, task is no longer queued */
+ if ( !rc && !so->s_res )
+ rc = 1;
+
/* decrement use count... */
- syncprov_free_syncop( so );
+ if ( !syncprov_free_syncop( so, FS_UNLINK )) {
+ if ( rc )
+ /* if we didn't unlink, and task is no longer queued, clear flag */
+ so->s_flags ^= PS_TASK_QUEUED;
+ ldap_pvt_thread_mutex_unlock( &so->s_mutex );
+ }
return NULL;
}
{
so->s_flags |= PS_TASK_QUEUED;
so->s_inuse++;
- ldap_pvt_thread_pool_submit( &connection_pool,
- syncprov_qtask, so );
+ ldap_pvt_thread_pool_submit2( &connection_pool,
+ syncprov_qtask, so, &so->s_pool_cookie );
}
/* Queue a persistent search response */
a = attr_find( opc->se->e_attrs, slap_schema.si_ad_entryUUID );
if ( a )
ri->ri_uuid = a->a_nvals[0];
+ else
+ ri->ri_uuid.bv_len = 0;
if ( csn.bv_len ) {
ri->ri_csn.bv_val = (char *)(ri + 1);
ri->ri_csn.bv_len = csn.bv_len;
ri->ri_csn.bv_val = ri->ri_uuid.bv_val + ri->ri_uuid.bv_len;
memcpy( ri->ri_csn.bv_val, csn.bv_val, csn.bv_len );
ri->ri_csn.bv_val[csn.bv_len] = '\0';
+ } else {
+ ri->ri_csn.bv_val = NULL;
}
}
ri->ri_list = &opc->ssres;
if ( lock )
ldap_pvt_thread_mutex_unlock( &so->s_op->o_conn->c_mutex );
}
- return syncprov_free_syncop( so );
+ return syncprov_free_syncop( so, FS_LOCK );
}
static int
{
slap_overinst *on = (slap_overinst *)op->o_bd->bd_info;
syncprov_info_t *si = on->on_bi.bi_private;
- syncops *so, *soprev;
+ syncops *so, **sop;
ldap_pvt_thread_mutex_lock( &si->si_ops_mutex );
- for ( so=si->si_ops, soprev = (syncops *)&si->si_ops; so;
- soprev=so, so=so->s_next ) {
+ for ( sop=&si->si_ops; (so = *sop); sop = &(*sop)->s_next ) {
if ( so->s_op->o_connid == op->o_connid &&
so->s_op->o_msgid == op->orn_msgid ) {
so->s_op->o_abandon = 1;
- soprev->s_next = so->s_next;
+ *sop = so->s_next;
break;
}
}
* with saveit == TRUE
*/
snext = ss->s_next;
- if ( syncprov_free_syncop( ss ) ) {
+ if ( syncprov_free_syncop( ss, FS_LOCK ) ) {
*pss = snext;
gonext = 0;
}
for (sm = opc->smatches; sm; sm=snext) {
snext = sm->sm_next;
- syncprov_free_syncop( sm->sm_op );
+ syncprov_free_syncop( sm->sm_op, FS_LOCK|FS_UNLINK );
op->o_tmpfree( sm, op->o_tmpmemctx );
}
/* Remove op from lock table */
mt = opc->smt;
if ( mt ) {
+ modinst *mi = (modinst *)(opc+1), **m2;
ldap_pvt_thread_mutex_lock( &mt->mt_mutex );
- mt->mt_mods = mt->mt_mods->mi_next;
+ for (m2 = &mt->mt_mods; ; m2 = &(*m2)->mi_next) {
+ if ( *m2 == mi ) {
+ *m2 = mi->mi_next;
+ if ( mt->mt_tail == mi )
+ mt->mt_tail = ( m2 == &mt->mt_mods ) ? NULL : (modinst *)m2;
+ break;
+ }
+ }
/* If there are more, promote the next one */
if ( mt->mt_mods ) {
ldap_pvt_thread_mutex_unlock( &mt->mt_mutex );
opm.o_bd->bd_info = on->on_info->oi_orig;
opm.o_managedsait = SLAP_CONTROL_NONCRITICAL;
opm.o_no_schema_check = 1;
+ opm.o_opid = -1;
opm.o_bd->be_modify( &opm, &rsm );
if ( rsm.sr_err == LDAP_NO_SUCH_OBJECT &&
}
/* Allocate a record. UUIDs are not NUL-terminated. */
- se = ch_malloc( sizeof( slog_entry ) + opc->suuid.bv_len +
+ se = ch_malloc( sizeof( slog_entry ) + opc->suuid.bv_len +
op->o_csn.bv_len + 1 );
se->se_next = NULL;
se->se_tag = op->o_tag;
}
sl->sl_num++;
while ( sl->sl_num > sl->sl_size ) {
- int i, j;
+ int i;
se = sl->sl_head;
sl->sl_head = se->se_next;
for ( i=0; i<sl->sl_numcsns; i++ )
} else {
ldap_pvt_thread_rdwr_wunlock( &si->si_csn_rwlock );
}
+ if ( csn_changed )
+ si->si_numops++;
goto leave;
}
- si->si_numops++;
+ if ( csn_changed )
+ si->si_numops++;
if ( si->si_chkops || si->si_chktime ) {
/* Never checkpoint adding the context entry,
* it will deadlock
continue;
syncprov_qresp( opc, sm->sm_op, LDAP_SYNC_DELETE );
}
+ if ( opc->ssres.s_info )
+ free_resinfo( &opc->ssres );
break;
}
}
/* See if we're already modifying this entry... */
mtdummy.mt_dn = op->o_req_ndn;
+retry:
ldap_pvt_thread_mutex_lock( &si->si_mods_mutex );
mt = avl_find( si->si_mods, &mtdummy, sp_avl_cmp );
if ( mt ) {
ldap_pvt_thread_mutex_lock( &mt->mt_mutex );
if ( mt->mt_mods == NULL ) {
/* Cannot reuse this mt, as another thread is about
- * to release it in syncprov_op_cleanup.
+ * to release it in syncprov_op_cleanup. Wait for them
+ * to finish; our own insert is required to succeed.
*/
ldap_pvt_thread_mutex_unlock( &mt->mt_mutex );
- mt = NULL;
+ ldap_pvt_thread_mutex_unlock( &si->si_mods_mutex );
+ ldap_pvt_thread_yield();
+ goto retry;
}
}
if ( mt ) {
- ldap_pvt_thread_mutex_unlock( &si->si_mods_mutex );
mt->mt_tail->mi_next = mi;
mt->mt_tail = mi;
+ ldap_pvt_thread_mutex_unlock( &si->si_mods_mutex );
/* wait for this op to get to head of list */
while ( mt->mt_mods != mi ) {
+ modinst *m2;
+ /* don't wait on other mods from the same thread */
+ for ( m2 = mt->mt_mods; m2; m2 = m2->mi_next ) {
+ if ( m2->mi_op->o_threadctx == op->o_threadctx ) {
+ break;
+ }
+ }
+ if ( m2 )
+ break;
+
ldap_pvt_thread_mutex_unlock( &mt->mt_mutex );
/* FIXME: if dynamic config can delete overlays or
* databases we'll have to check for cleanup here.
/* clean up if the caller is giving up */
if ( op->o_abandon ) {
- modinst *m2;
- for ( m2 = mt->mt_mods; m2 && m2->mi_next != mi;
- m2 = m2->mi_next );
- if ( m2 ) {
- m2->mi_next = mi->mi_next;
- if ( mt->mt_tail == mi ) mt->mt_tail = m2;
+ modinst **m2;
+ slap_callback **sc;
+ for (m2 = &mt->mt_mods; ; m2 = &(*m2)->mi_next) {
+ if ( *m2 == mi ) {
+ *m2 = mi->mi_next;
+ if ( mt->mt_tail == mi )
+ mt->mt_tail = ( m2 == &mt->mt_mods ) ? NULL : (modinst *)m2;
+ break;
+ }
+ }
+ for (sc = &op->o_callback; ; sc = &(*sc)->sc_next) {
+ if ( *sc == cb ) {
+ *sc = cb->sc_next;
+ break;
+ }
}
op->o_tmpfree( cb, op->o_tmpmemctx );
ldap_pvt_thread_mutex_unlock( &mt->mt_mutex );
} else {
/* It's RefreshAndPersist, transition to Persist phase */
syncprov_sendinfo( op, rs, ( ss->ss_flags & SS_PRESENT ) ?
- LDAP_TAG_SYNC_REFRESH_PRESENT : LDAP_TAG_SYNC_REFRESH_DELETE,
+ LDAP_TAG_SYNC_REFRESH_PRESENT : LDAP_TAG_SYNC_REFRESH_DELETE,
( ss->ss_flags & SS_CHANGED ) ? &cookie : NULL,
1, NULL, 0 );
if ( !BER_BVISNULL( &cookie ))
syncops so = {0};
fbase_cookie fc;
opcookie opc;
- slap_callback sc;
+ slap_callback sc = {0};
fc.fss = &so;
fc.fbase = 0;
}
sop = ch_malloc( sizeof( syncops ));
*sop = so;
- ldap_pvt_thread_mutex_init( &sop->s_mutex );
sop->s_rid = srs->sr_state.rid;
sop->s_sid = srs->sr_state.sid;
/* set refcount=2 to prevent being freed out from under us
ldap_pvt_thread_yield();
ldap_pvt_thread_mutex_lock( &si->si_ops_mutex );
}
+ if ( op->o_abandon ) {
+ ldap_pvt_thread_mutex_unlock( &si->si_ops_mutex );
+ ch_free( sop );
+ return SLAPD_ABANDON;
+ }
+ ldap_pvt_thread_mutex_init( &sop->s_mutex );
sop->s_next = si->si_ops;
+ sop->s_si = si;
si->si_ops = sop;
ldap_pvt_thread_mutex_unlock( &si->si_ops_mutex );
}
- /* snapshot the ctxcsn */
+ /* snapshot the ctxcsn
+ * Note: this must not be done before the psearch setup. (ITS#8365)
+ */
ldap_pvt_thread_rdwr_rlock( &si->si_csn_rwlock );
numcsns = si->si_numcsns;
if ( numcsns ) {
}
dirty = si->si_dirty;
ldap_pvt_thread_rdwr_runlock( &si->si_csn_rwlock );
-
+
/* If we have a cookie, handle the PRESENT lookups */
if ( srs->sr_state.ctxcsn ) {
sessionlog *sl;
int i, j;
- /* If we don't have any CSN of our own yet, pretend nothing
- * has changed.
+ /* If we don't have any CSN of our own yet, bail out.
*/
- if ( !numcsns )
- goto no_change;
+ if ( !numcsns ) {
+ rs->sr_err = LDAP_UNWILLING_TO_PERFORM;
+ rs->sr_text = "consumer has state info but provider doesn't!";
+ goto bailout;
+ }
if ( !si->si_nopres )
do_present = SS_PRESENT;
/* our state is older, complain to consumer */
rs->sr_err = LDAP_UNWILLING_TO_PERFORM;
rs->sr_text = "consumer state is newer than provider!";
+ Log4( LDAP_DEBUG_SYNC, ldap_syslog_level,
+ "consumer %d state %s is newer than provider %d state %s\n",
+ sids[i], srs->sr_state.ctxcsn[i].bv_val, sids[j], /* == slap_serverID */
+ ctxcsn[j].bv_val);
bailout:
if ( sop ) {
syncops **sp = &si->si_ops;
send_ldap_result( op, rs );
return rs->sr_err;
}
- }
+ }
if ( BER_BVISEMPTY( &mincsn )) {
mincsn = maxcsn;
minsid = maxsid;
/* If nothing has changed, shortcut it */
if ( !changed && !dirty ) {
do_present = 0;
-no_change: if ( !(op->o_sync_mode & SLAP_SYNC_PERSIST) ) {
+no_change: if ( !(op->o_sync_mode & SLAP_SYNC_PERSIST) ) {
LDAPControl *ctrls[2];
ctrls[0] = NULL;
}
}
} else {
+ /* The consumer knows nothing, we know nothing. OK. */
+ if (!numcsns)
+ goto no_change;
/* No consumer state, assume something has changed */
changed = SS_CHANGED;
}
char csnbuf[ LDAP_PVT_CSNSTR_BUFSIZE ];
struct berval csn;
- if ( SLAP_SYNC_SHADOW( op->o_bd )) {
+ if ( slap_serverID || SLAP_SYNC_SHADOW( op->o_bd )) {
/* If we're also a consumer, then don't generate anything.
* Wait for our provider to send it to us, or for a local
* modify if we have multimaster.
ConfigReply *cr
)
{
- slap_overinst *on = (slap_overinst *) be->bd_info;
- syncprov_info_t *si = (syncprov_info_t *)on->on_bi.bi_private;
+ slap_overinst *on = (slap_overinst *) be->bd_info;
+ syncprov_info_t *si = (syncprov_info_t *)on->on_bi.bi_private;
#ifdef SLAP_CONFIG_DELETE
syncops *so, *sonext;
#endif /* SLAP_CONFIG_DELETE */
rs.sr_err = LDAP_UNAVAILABLE;
send_ldap_result( so->s_op, &rs );
sonext=so->s_next;
- syncprov_drop_psearch( so, 0);
+ if ( so->s_flags & PS_TASK_QUEUED )
+ ldap_pvt_thread_pool_retract( so->s_pool_cookie );
+ if ( !syncprov_drop_psearch( so, 0 ))
+ so->s_si = NULL;
}
si->si_ops=NULL;
ldap_pvt_thread_mutex_unlock( &si->si_ops_mutex );
overlay_unregister_control( be, LDAP_CONTROL_SYNC );
#endif /* SLAP_CONFIG_DELETE */
- return 0;
+ return 0;
}
static int