ei->bei_kids = NULL;
ei->bei_lruprev = NULL;
+#if 0
ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
ei->bei_lrunext = cache->c_eifree;
cache->c_eifree = ei;
ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
+#else
+ ch_free( ei );
+#endif
}
#define LRU_DEL( c, e ) do { \
ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
- ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
bdb_cache_entryinfo_lock( ei->bei_parent );
+ ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
ei2->bei_id = ei->bei_id;
ei2->bei_parent = ei->bei_parent;
bdb->bi_cache.c_leaves++;
rc = avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
avl_dup_error );
- if ( rc ) {
- /* This should never happen; entry cache is corrupt */
- bdb->bi_dbenv->log_flush( bdb->bi_dbenv, NULL );
- assert( !rc );
- }
#ifdef BDB_HIER
- ei->bei_parent->bei_ckids++;
+ /* it's possible for hdb_cache_find_parent to beat us to it */
+ if ( !rc ) {
+ ei->bei_parent->bei_ckids++;
+ }
#endif
}
ei.bei_nrdn.bv_len = ndn->bv_len -
(ei.bei_nrdn.bv_val - ndn->bv_val);
+ eip->bei_finders++;
bdb_cache_entryinfo_unlock( eip );
BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Reading %s",
rc = bdb_dn2id( op, &ei.bei_nrdn, &ei, txn, &lock );
if (rc) {
bdb_cache_entryinfo_lock( eip );
+ eip->bei_finders--;
bdb_cache_entry_db_unlock( bdb, &lock );
*res = eip;
return rc;
ei.bei_nrdn.bv_len = len;
rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
/* add_internal left eip and c_rwlock locked */
+ eip->bei_finders--;
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
bdb_cache_entry_db_unlock( bdb, &lock );
if ( rc ) {
*res = eip;
return DB_NOTFOUND;
}
- bdb_cache_entryinfo_unlock( eip );
bdb_cache_entryinfo_lock( ei2 );
+ bdb_cache_entryinfo_unlock( eip );
eip = ei2;
{
struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
- int rc;
+ int rc, add;
ei.bei_id = id;
ei.bei_kids = NULL;
ein->bei_bdb = bdb;
#endif
ei.bei_ckids = 0;
+ add = 1;
/* This node is not fully connected yet */
ein->bei_state |= CACHE_ENTRY_NOT_LINKED;
+ /* If this is the first time, save this node
+ * to be returned later.
+ */
+ if ( eir == NULL ) {
+ eir = ein;
+ ein->bei_finders++;
+ }
+
+again:
/* Insert this node into the ID tree */
ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
bdb_id_cmp, bdb_id_dup_err ) ) {
EntryInfo *eix = ein->bei_lrunext;
+ if ( eix->bei_state & (CACHE_ENTRY_PURGED|CACHE_ENTRY_DELETED) ) {
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+ ldap_pvt_thread_yield();
+ goto again;
+ }
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+
/* Someone else created this node just before us.
* Free our new copy and use the existing one.
*/
bdb_cache_entryinfo_free( &bdb->bi_cache, ein );
- ein = eix;
-
- /* Link in any kids we've already processed */
- if ( ei2 ) {
- bdb_cache_entryinfo_lock( ein );
- avl_insert( &ein->bei_kids, (caddr_t)ei2,
- bdb_rdn_cmp, avl_dup_error );
- ein->bei_ckids++;
- bdb_cache_entryinfo_unlock( ein );
+
+ /* if it was the node we were looking for, just return it */
+ if ( eir == ein ) {
+ *res = eix;
+ rc = 0;
+ bdb_cache_entryinfo_lock( eix );
+ break;
}
- }
- /* If this is the first time, save this node
- * to be returned later.
- */
- if ( eir == NULL ) eir = ein;
+ ein = ei2;
+ ei2 = eix;
+ add = 0;
+
+ /* otherwise, link up what we have and return */
+ goto gotparent;
+ }
/* If there was a previous node, link it to this one */
if ( ei2 ) ei2->bei_parent = ein;
if ( eip.bei_id ) {
ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
(caddr_t) &eip, bdb_id_cmp );
+ if ( ei2 && ( ei2->bei_state & ( CACHE_ENTRY_PURGED|CACHE_ENTRY_DELETED ))) {
+ ei2 = NULL;
+ }
} else {
ei2 = &bdb->bi_cache.c_dntree;
}
- bdb->bi_cache.c_eiused++;
- if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
+ if ( add ) {
+ bdb->bi_cache.c_eiused++;
+ if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
bdb->bi_cache.c_leaves++;
+ }
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+gotparent:
/* Got the parent, link in and we're done. */
if ( ei2 ) {
- bdb_cache_entryinfo_lock( eir );
bdb_cache_entryinfo_lock( ei2 );
+ bdb_cache_entryinfo_lock( eir );
ein->bei_parent = ei2;
- avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
- avl_dup_error);
- ei2->bei_ckids++;
+ if ( avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
+ avl_dup_error) == 0 )
+ ei2->bei_ckids++;
/* Reset all the state info */
for (ein = eir; ein != ei2; ein=ein->bei_parent)
* or this node is being deleted, skip it.
*/
if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
- CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) ||
+ CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING |
+ CACHE_ENTRY_ONELEVEL )) ||
elru->bei_finders > 0 ) {
bdb_cache_entryinfo_unlock( elru );
goto bottom;
}
+ if ( bdb_cache_entryinfo_trylock( elru->bei_parent )) {
+ bdb_cache_entryinfo_unlock( elru );
+ goto bottom;
+ }
+
/* entryinfo is locked */
islocked = 1;
}
next:
- if ( islocked )
+ if ( islocked ) {
bdb_cache_entryinfo_unlock( elru );
+ bdb_cache_entryinfo_unlock( elru->bei_parent );
+ }
- if ( count >= efree && eicount >= eifree ) {
- if ( count || ecount > bdb->bi_cache.c_cursize ) {
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
- /* HACK: we seem to be losing track, fix up now */
- if ( ecount > bdb->bi_cache.c_cursize )
- bdb->bi_cache.c_cursize = ecount;
- bdb->bi_cache.c_cursize -= count;
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
- }
+ if ( count >= efree && eicount >= eifree )
break;
- }
bottom:
if ( elnext == bdb->bi_cache.c_lruhead )
break;
#endif
}
+ if ( count || ecount > bdb->bi_cache.c_cursize ) {
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
+ /* HACK: we seem to be losing track, fix up now */
+ if ( ecount > bdb->bi_cache.c_cursize )
+ bdb->bi_cache.c_cursize = ecount;
+ bdb->bi_cache.c_cursize -= count;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
+ }
bdb->bi_cache.c_lruhead = elnext;
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
bdb->bi_cache.c_purging = 0;
}
-EntryInfo *
-bdb_cache_find_info(
- struct bdb_info *bdb,
- ID id )
-{
- EntryInfo ei = { 0 },
- *ei2;
-
- ei.bei_id = id;
-
- ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
- ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
- (caddr_t) &ei, bdb_id_cmp );
- ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
- return ei2;
-}
-
/*
* cache_find_id - find an entry in the cache, given id.
* The entry is locked for Read upon return. Call with flag ID_LOCKED if
if ( *eip ) {
/* If the lock attempt fails, the info is in use */
if ( bdb_cache_entryinfo_trylock( *eip )) {
+ int del = (*eip)->bei_state & CACHE_ENTRY_DELETED;
ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
/* If this node is being deleted, treat
* as if the delete has already finished
*/
- if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
+ if ( del ) {
return DB_NOTFOUND;
}
/* otherwise, wait for the info to free up */
} else {
(*eip)->bei_finders++;
(*eip)->bei_state |= CACHE_ENTRY_REFERENCED;
+ if ( flag & ID_NOENTRY ) {
+ bdb_cache_entryinfo_unlock( *eip );
+ return 0;
+ }
/* Make sure only one thread tries to load the entry */
load1:
#ifdef SLAP_ZONE_ALLOC
if ( rc == 0 ) {
ep->e_private = *eip;
#ifdef BDB_HIER
+ while ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED )
+ ldap_pvt_thread_yield();
bdb_fix_dn( ep, 0 );
#endif
(*eip)->bei_e = ep;
bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax ) {
ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
if ( !bdb->bi_cache.c_purging ) {
- if ( !( flag & ID_NOCACHE )) {
+ if ( load && !( flag & ID_NOCACHE )) {
bdb->bi_cache.c_cursize++;
if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
purge = 1;
if ( eip->bei_dkids ) eip->bei_dkids++;
#endif
+ if (eip->bei_parent) {
+ bdb_cache_entryinfo_lock( eip->bei_parent );
+ eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
+ bdb_cache_entryinfo_unlock( eip->bei_parent );
+ }
+
rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
/* bdb_csn_commit can cause this when adding the database root entry */
if ( new->bei_e ) {
e->e_private = new;
new->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
- if (eip->bei_parent) {
- eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
- }
bdb_cache_entryinfo_unlock( eip );
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
/* Get write lock on the data */
rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
if ( rc ) {
+ bdb_cache_entryinfo_lock( ei );
/* couldn't lock, undo and give up */
ei->bei_state ^= CACHE_ENTRY_DELETED;
+ bdb_cache_entryinfo_unlock( ei );
return rc;
}
/* set lru mutex */
ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
+ bdb_cache_entryinfo_lock( ei->bei_parent );
+ bdb_cache_entryinfo_lock( ei );
rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
+ bdb_cache_entryinfo_unlock( ei );
/* free lru mutex */
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
int decr_leaf = 0;
/* already freed? */
- if ( !e->bei_parent )
+ if ( !e->bei_parent ) {
+ assert(0);
return -1;
+ }
- /* Lock the parent's kids tree */
- bdb_cache_entryinfo_lock( e->bei_parent );
+ e->bei_state |= CACHE_ENTRY_PURGED;
#ifdef BDB_HIER
e->bei_parent->bei_ckids--;
== NULL )
{
rc = -1;
+ assert(0);
}
if ( e->bei_parent->bei_kids )
decr_leaf = 1;
- bdb_cache_entryinfo_unlock( e->bei_parent );
-
ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
/* id tree */
if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp )) {
cache->c_leaves--;
} else {
rc = -1;
+ assert(0);
}
ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
+ bdb_cache_entryinfo_unlock( e->bei_parent );
if ( rc == 0 ){
/* lru */