X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=servers%2Fslapd%2Fback-bdb%2Fcache.c;h=b470762ea91438b6f9042d601f68b8b7f2481755;hb=cfc8c67523837451fe2810755bf5e5cbf791ddac;hp=f68f89e4f01d4759183ca5646ac47ed7049ccb9a;hpb=f696f7ab55cbd526bc6e008aa3c89f41bf2c8fb9;p=openldap diff --git a/servers/slapd/back-bdb/cache.c b/servers/slapd/back-bdb/cache.c index f68f89e4f0..b470762ea9 100644 --- a/servers/slapd/back-bdb/cache.c +++ b/servers/slapd/back-bdb/cache.c @@ -2,7 +2,7 @@ /* $OpenLDAP$ */ /* This work is part of OpenLDAP Software . * - * Copyright 2000-2004 The OpenLDAP Foundation. + * Copyright 2000-2008 The OpenLDAP Foundation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -26,10 +26,19 @@ #include "back-bdb.h" -static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e); +#include "ldap_rq.h" + +#ifdef BDB_HIER +#define bdb_cache_lru_add hdb_cache_lru_add +#endif +static void bdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei ); + +static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr); #ifdef LDAP_DEBUG +#ifdef SLAPD_UNUSED static void bdb_lru_print(Cache *cache); #endif +#endif static EntryInfo * bdb_cache_entryinfo_new( Cache *cache ) @@ -86,16 +95,10 @@ bdb_cache_entry_db_relock( rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0, list, 2, NULL ); - if (rc) { -#ifdef NEW_LOGGING - LDAP_LOG( CACHE, DETAIL1, - "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n", - ei->bei_id, rw, rc ); -#else + if (rc && !tryOnly) { Debug( LDAP_DEBUG_TRACE, "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n", ei->bei_id, rw, rc ); -#endif } else { *lock = list[1].lock; } @@ -126,16 +129,10 @@ bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei, rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0, &lockobj, db_rw, lock); - if (rc) { -#ifdef NEW_LOGGING - LDAP_LOG( CACHE, DETAIL1, - "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n", - ei->bei_id, rw, rc ); -#else + if (rc && !tryOnly) { Debug( LDAP_DEBUG_TRACE, "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n", ei->bei_id, rw, rc ); -#endif } return rc; #endif /* NO_THREADS */ @@ -149,7 +146,7 @@ bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock ) #else int rc; - if ( !lock ) return 0; + if ( !lock || lock->mode == DB_LOCK_NG ) return 0; rc = LOCK_PUT ( env, lock ); return rc; @@ -179,6 +176,7 @@ bdb_cache_entryinfo_destroy( EntryInfo *e ) } else { \ (cache)->c_lrutail = (ei)->bei_lruprev; \ } \ + (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \ } while(0) #define LRU_ADD( cache, ei ) do { \ @@ -188,8 +186,10 @@ bdb_cache_entryinfo_destroy( EntryInfo *e ) } \ (cache)->c_lruhead = (ei); \ (ei)->bei_lruprev = NULL; \ - if ( (cache)->c_lrutail == NULL ) { \ - (cache)->c_lrutail = (ei); \ + if ( !ldap_pvt_thread_mutex_trylock( &(cache)->lru_tail_mutex )) { \ + if ( (cache)->c_lrutail == NULL ) \ + (cache)->c_lrutail = (ei); \ + ldap_pvt_thread_mutex_unlock( &(cache)->lru_tail_mutex ); \ } \ } while(0) @@ -235,6 +235,9 @@ bdb_entryinfo_add_internal( #ifdef BDB_HIER ei2->bei_rdn = ei->bei_rdn; #endif +#ifdef SLAP_ZONE_ALLOC + ei2->bei_bdb = bdb; +#endif /* Add to cache ID tree */ if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) { @@ -249,9 +252,24 @@ bdb_entryinfo_add_internal( ei->bei_rdn.bv_val = NULL; #endif } else { + int rc; + + bdb->bi_cache.c_eiused++; ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn ); - avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp, + + /* This is a new leaf node. But if parent had no kids, then it was + * a leaf and we would be decrementing that. So, only increment if + * the parent already has kids. + */ + if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id ) + bdb->bi_cache.c_leaves++; + rc = avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp, avl_dup_error ); + if ( rc ) { + /* This should never happen; entry cache is corrupt */ + bdb->bi_dbenv->log_flush( bdb->bi_dbenv, NULL ); + assert( !rc ); + } #ifdef BDB_HIER ei->bei_parent->bei_ckids++; #endif @@ -270,7 +288,7 @@ bdb_entryinfo_add_internal( int bdb_cache_find_ndn( Operation *op, - DB_TXN *txn, + u_int32_t locker, struct berval *ndn, EntryInfo **res ) { @@ -307,15 +325,23 @@ bdb_cache_find_ndn( ei.bei_parent = eip; ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp ); if ( !ei2 ) { + DB_LOCK lock; int len = ei.bei_nrdn.bv_len; + if ( BER_BVISEMPTY( ndn )) { + *res = eip; + return LDAP_SUCCESS; + } + ei.bei_nrdn.bv_len = ndn->bv_len - (ei.bei_nrdn.bv_val - ndn->bv_val); bdb_cache_entryinfo_unlock( eip ); - rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei ); + lock.mode = DB_LOCK_NG; + rc = bdb_dn2id( op, &ei.bei_nrdn, &ei, locker, &lock ); if (rc) { bdb_cache_entryinfo_lock( eip ); + bdb_cache_entry_db_unlock( bdb->bi_dbenv, &lock ); *res = eip; return rc; } @@ -325,6 +351,7 @@ bdb_cache_find_ndn( rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 ); /* add_internal left eip and c_rwlock locked */ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); + bdb_cache_entry_db_unlock( bdb->bi_dbenv, &lock ); if ( rc ) { *res = eip; return rc; @@ -365,25 +392,24 @@ bdb_cache_find_ndn( /* Walk up the tree from a child node, looking for an ID that's already * been linked into the cache. */ -static int +int hdb_cache_find_parent( Operation *op, - DB_TXN *txn, + u_int32_t locker, ID id, EntryInfo **res ) { struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private; EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL; - char ndn[SLAP_LDAPDN_MAXLEN]; - ID parent; int rc; - int addlru = 1; + int addlru = 0; ei.bei_id = id; ei.bei_kids = NULL; + ei.bei_ckids = 0; for (;;) { - rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id ); + rc = hdb_dn2id_parent( op, locker, &ei, &eip.bei_id ); if ( rc ) break; /* Save the previous node, if any */ @@ -395,6 +421,11 @@ hdb_cache_find_parent( ein->bei_kids = ei.bei_kids; ein->bei_nrdn = ei.bei_nrdn; ein->bei_rdn = ei.bei_rdn; + ein->bei_ckids = ei.bei_ckids; +#ifdef SLAP_ZONE_ALLOC + ein->bei_bdb = bdb; +#endif + ei.bei_ckids = 0; /* This node is not fully connected yet */ ein->bei_state = CACHE_ENTRY_NOT_LINKED; @@ -416,12 +447,11 @@ hdb_cache_find_parent( bdb_cache_entryinfo_lock( ein ); avl_insert( &ein->bei_kids, (caddr_t)ei2, bdb_rdn_cmp, avl_dup_error ); + ein->bei_ckids++; bdb_cache_entryinfo_unlock( ein ); } + addlru = 0; - if ( !eir ) { - addlru = 0; - } } /* If this is the first time, save this node @@ -439,14 +469,24 @@ hdb_cache_find_parent( } else { ei2 = &bdb->bi_cache.c_dntree; } + bdb->bi_cache.c_eiused++; + if ( ei2 && ( ei2->bei_kids || !ei2->bei_id )) + bdb->bi_cache.c_leaves++; ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); + if ( addlru ) { + ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex ); + bdb_cache_lru_add( bdb, ein ); + } + addlru = 1; + /* Got the parent, link in and we're done. */ if ( ei2 ) { bdb_cache_entryinfo_lock( ei2 ); ein->bei_parent = ei2; avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp, avl_dup_error); + ei2->bei_ckids++; bdb_cache_entryinfo_unlock( ei2 ); bdb_cache_entryinfo_lock( eir ); @@ -458,6 +498,7 @@ hdb_cache_find_parent( } ei.bei_kids = NULL; ei.bei_id = eip.bei_id; + ei.bei_ckids = 1; avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp, avl_dup_error ); } @@ -500,61 +541,94 @@ int hdb_cache_load( } #endif -/* caller must have lru_mutex locked. mutex +/* caller must have lru_head_mutex locked. mutex * will be unlocked on return. */ static void bdb_cache_lru_add( struct bdb_info *bdb, - u_int32_t locker, EntryInfo *ei ) { DB_LOCK lock, *lockp; + EntryInfo *elru, *elprev; + int count = 0; + + LRU_ADD( &bdb->bi_cache, ei ); + ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex ); - if ( locker ) { + /* See if we're above the cache size limit */ + if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize ) + return; + + if ( bdb->bi_cache.c_locker ) { lockp = &lock; } else { lockp = NULL; } - /* See if we're above the cache size limit */ - if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) { - EntryInfo *elru, *elprev; - int i = 0; + /* Don't bother if we can't get the lock */ + if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.lru_tail_mutex ) ) + return; - /* Look for an unused entry to remove */ - for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev, i++ ) { - elprev = elru->bei_lruprev; + /* Look for an unused entry to remove */ + for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) { + elprev = elru->bei_lruprev; + + /* If we can successfully writelock it, then + * the object is idle. + */ + if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, + bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) { - /* Too many probes, not enough idle, give up */ - if (i > 10) break; - /* If we can successfully writelock it, then - * the object is idle. + /* If this node is in the process of linking into the cache, + * or this node is being deleted, skip it. */ - if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, elru, 1, 1, - lockp ) == 0 ) { - /* If there's no entry, or this node is in - * the process of linking into the cache, - * skip it. - */ - if ( !elru->bei_e || (elru->bei_state & CACHE_ENTRY_NOT_LINKED) ) { - bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); - continue; - } - LRU_DELETE( &bdb->bi_cache, elru ); + if ( elru->bei_state & + ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) { + bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); + continue; + } + /* Free entry for this node if it's present */ + if ( elru->bei_e ) { elru->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq ); +#else bdb_entry_return( elru->bei_e ); +#endif elru->bei_e = NULL; - bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); - --bdb->bi_cache.c_cursize; - if (bdb->bi_cache.c_cursize < bdb->bi_cache.c_maxsize) - break; + count++; + } + /* ITS#4010 if we're in slapcat, and this node is a leaf + * node, free it. + * + * FIXME: we need to do this for slapd as well, (which is + * why we compute bi_cache.c_leaves now) but at the moment + * we can't because it causes unresolvable deadlocks. + */ + if ( slapMode & SLAP_TOOL_READONLY ) { + if ( !elru->bei_kids ) { + /* This does LRU_DELETE for us */ + bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 ); + bdb_cache_delete_cleanup( &bdb->bi_cache, elru ); + } + /* Leave node on LRU list for a future pass */ + } else { + LRU_DELETE( &bdb->bi_cache, elru ); + } + bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp ); + + if ( count >= bdb->bi_cache.c_minfree ) { + ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock ); + bdb->bi_cache.c_cursize -= count; + ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); + break; } } } - LRU_ADD( &bdb->bi_cache, ei ); - ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_mutex ); + + ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex ); } EntryInfo * @@ -562,7 +636,8 @@ bdb_cache_find_info( struct bdb_info *bdb, ID id ) { - EntryInfo ei, *ei2; + EntryInfo ei = { 0 }, + *ei2; ei.bei_id = id; @@ -591,11 +666,14 @@ bdb_cache_find_id( { struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private; Entry *ep = NULL; - int rc = 0; - EntryInfo ei; + int rc = 0, load = 0; + EntryInfo ei = { 0 }; ei.bei_id = id; +#ifdef SLAP_ZONE_ALLOC + slap_zh_rlock(bdb->bi_cache.c_zctx); +#endif /* If we weren't given any info, see if we have it already cached */ if ( !*eip ) { again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock ); @@ -633,60 +711,95 @@ again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock ); /* See if the ID exists in the database; add it to the cache if so */ if ( !*eip ) { #ifndef BDB_HIER - rc = bdb_id2entry( op->o_bd, tid, id, &ep ); + rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep ); if ( rc == 0 ) { - rc = bdb_cache_find_ndn( op, tid, + rc = bdb_cache_find_ndn( op, locker, &ep->e_nname, eip ); if ( *eip ) islocked = 1; if ( rc ) { + ep->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, ep, (*eip)->bei_zseq ); +#else bdb_entry_return( ep ); +#endif ep = NULL; } } #else - rc = hdb_cache_find_parent(op, tid, id, eip ); - if ( rc == 0 && *eip ) islocked = 1; + rc = hdb_cache_find_parent(op, locker, id, eip ); + if ( rc == 0 ) islocked = 1; #endif } /* Ok, we found the info, do we have the entry? */ - if ( *eip && rc == 0 ) { + if ( rc == 0 ) { if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) { rc = DB_NOTFOUND; } else { - rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); - /* entry is protected now, we don't need to hold the entryinfo */ + /* Make sure only one thread tries to load the entry */ +load1: +#ifdef SLAP_ZONE_ALLOC + if ((*eip)->bei_e && !slap_zn_validate( + bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) { + (*eip)->bei_e = NULL; + (*eip)->bei_zseq = 0; + } +#endif + if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) { + load = 1; + (*eip)->bei_state |= CACHE_ENTRY_LOADING; + } if ( islocked ) { bdb_cache_entryinfo_unlock( *eip ); islocked = 0; } - if ( rc == 0 ) { - if ( !(*eip)->bei_e ) { - if (!ep) { - rc = bdb_id2entry( op->o_bd, tid, id, &ep ); + rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); + if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) { + rc = DB_NOTFOUND; + bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); + } else if ( rc == 0 ) { + if ( load ) { + /* Give up original read lock, obtain write lock + */ + if ( rc == 0 ) { + rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, + *eip, 1, 0, lock ); + } + if ( rc == 0 && !ep) { + rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep ); } if ( rc == 0 ) { - bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, - *eip, 1, 0, lock ); - /* Make sure no other modifier beat us to it */ - if ( (*eip)->bei_e ) { - bdb_entry_return( ep ); - ep = NULL; + ep->e_private = *eip; #ifdef BDB_HIER - /* Check for subtree renames */ - rc = bdb_fix_dn( (*eip)->bei_e, 1 ); - if ( rc ) rc = bdb_fix_dn( (*eip)->bei_e, 2 ); + bdb_fix_dn( ep, 0 ); #endif - } else { - ep->e_private = *eip; -#ifdef BDB_HIER - bdb_fix_dn( ep, 0 ); + (*eip)->bei_e = ep; +#ifdef SLAP_ZONE_ALLOC + (*eip)->bei_zseq = *((ber_len_t *)ep - 2); #endif - (*eip)->bei_e = ep; - } - bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, + ep = NULL; + } + bdb_cache_entryinfo_lock( *eip ); + (*eip)->bei_state ^= CACHE_ENTRY_LOADING; + bdb_cache_entryinfo_unlock( *eip ); + if ( rc == 0 ) { + /* If we succeeded, downgrade back to a readlock. */ + rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, *eip, 0, 0, lock ); + } else { + /* Otherwise, release the lock. */ + bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); } + } else if ( !(*eip)->bei_e ) { + /* Some other thread is trying to load the entry, + * give it a chance to finish. + */ + bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock ); + ldap_pvt_thread_yield(); + bdb_cache_entryinfo_lock( *eip ); + islocked = 1; + goto load1; #ifdef BDB_HIER } else { /* Check for subtree renames @@ -707,23 +820,51 @@ again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock ); } } } + if ( islocked ) { + bdb_cache_entryinfo_unlock( *eip ); + } + if ( ep ) { + ep->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, ep, (*eip)->bei_zseq ); +#else + bdb_entry_return( ep ); +#endif + } if ( rc == 0 ) { - /* set lru mutex */ - ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_mutex ); - /* if entry is on LRU list, remove from old spot */ - if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) { - LRU_DELETE( &bdb->bi_cache, *eip ); - } else { - /* if entry is new, bump cache size */ + + if ( load ) { + ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock ); bdb->bi_cache.c_cursize++; + ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); + } + + ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex ); + + /* If the LRU list has only one entry and this is it, it + * doesn't need to be added again. + */ + if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail && + bdb->bi_cache.c_lruhead == *eip ) { + ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex ); + } else { + /* if entry is on LRU list, remove from old spot */ + if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) { + ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex ); + LRU_DELETE( &bdb->bi_cache, *eip ); + ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex ); + } + /* lru_head_mutex is unlocked for us */ + bdb_cache_lru_add( bdb, *eip ); } - /* lru_mutex is unlocked for us */ - bdb_cache_lru_add( bdb, locker, *eip ); } - if ( islocked ) { - bdb_cache_entryinfo_unlock( *eip ); +#ifdef SLAP_ZONE_ALLOC + if (rc == 0 && (*eip)->bei_e) { + slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e); } + slap_zh_runlock(bdb->bi_cache.c_zctx); +#endif return rc; } @@ -758,9 +899,11 @@ bdb_cache_add( u_int32_t locker ) { EntryInfo *new, ei; - struct berval rdn = e->e_name; DB_LOCK lock; int rc; +#ifdef BDB_HIER + struct berval rdn = e->e_name; +#endif ei.bei_id = e->e_id; ei.bei_parent = eip; @@ -778,7 +921,8 @@ bdb_cache_add( #ifdef BDB_HIER if ( nrdn->bv_len != e->e_nname.bv_len ) { - char *ptr = strchr( rdn.bv_val, ',' ); + char *ptr = ber_bvchr( &rdn, ',' ); + assert( ptr != NULL ); rdn.bv_len = ptr - rdn.bv_val; } ber_dupbv( &ei.bei_rdn, &rdn ); @@ -789,7 +933,11 @@ bdb_cache_add( /* bdb_csn_commit can cause this when adding the database root entry */ if ( new->bei_e ) { new->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, new->bei_e, new->bei_zseq ); +#else bdb_entry_return( new->bei_e ); +#endif } new->bei_e = e; e->e_private = new; @@ -798,15 +946,17 @@ bdb_cache_add( if (eip->bei_parent) { eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS; } + bdb_cache_entryinfo_unlock( eip ); - /* set lru mutex */ - ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_mutex ); ++bdb->bi_cache.c_cursize; - /* lru_mutex is unlocked for us */ - bdb_cache_lru_add( bdb, locker, new ); - - bdb_cache_entryinfo_unlock( eip ); ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock ); + + /* set lru mutex */ + ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex ); + + /* lru_head_mutex is unlocked for us */ + bdb_cache_lru_add( bdb, new ); + return rc; } @@ -840,20 +990,22 @@ bdb_cache_modify( */ int bdb_cache_modrdn( + struct bdb_info *bdb, Entry *e, struct berval *nrdn, Entry *new, EntryInfo *ein, - DB_ENV *env, u_int32_t locker, DB_LOCK *lock ) { EntryInfo *ei = BEI(e), *pei; - struct berval rdn; int rc; +#ifdef BDB_HIER + struct berval rdn; +#endif /* Get write lock on data */ - rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock ); + rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, ei, 1, 0, lock ); if ( rc ) return rc; /* If we've done repeated mods on a cached entry, then e_attrs @@ -878,15 +1030,22 @@ bdb_cache_modrdn( avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp ); free( ei->bei_nrdn.bv_val ); ber_dupbv( &ei->bei_nrdn, nrdn ); + + if ( !pei->bei_kids ) + pei->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS; + #ifdef BDB_HIER free( ei->bei_rdn.bv_val ); rdn = e->e_name; if ( nrdn->bv_len != e->e_nname.bv_len ) { - char *ptr = strchr(rdn.bv_val, ','); + char *ptr = ber_bvchr(&rdn, ','); + assert( ptr != NULL ); rdn.bv_len = ptr - rdn.bv_val; } ber_dupbv( &ei->bei_rdn, &rdn ); + pei->bei_ckids--; + if ( pei->bei_dkids ) pei->bei_dkids--; #endif if (!ein) { @@ -896,15 +1055,24 @@ bdb_cache_modrdn( bdb_cache_entryinfo_unlock( pei ); bdb_cache_entryinfo_lock( ein ); } + /* parent now has kids */ + if ( ein->bei_state & CACHE_ENTRY_NO_KIDS ) + ein->bei_state ^= CACHE_ENTRY_NO_KIDS; #ifdef BDB_HIER + /* parent might now have grandkids */ + if ( ein->bei_state & CACHE_ENTRY_NO_GRANDKIDS && + !(ei->bei_state & (CACHE_ENTRY_NO_KIDS))) + ein->bei_state ^= CACHE_ENTRY_NO_GRANDKIDS; + { - int max = ei->bei_modrdns; /* Record the generation number of this change */ - for ( pei = ein; pei->bei_parent; pei = pei->bei_parent ) { - if ( pei->bei_modrdns > max ) max = pei->bei_modrdns; - } - ei->bei_modrdns = max + 1; + ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex ); + bdb->bi_modrdns++; + ei->bei_modrdns = bdb->bi_modrdns; + ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex ); } + ein->bei_ckids++; + if ( ein->bei_dkids ) ein->bei_dkids++; #endif avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error ); bdb_cache_entryinfo_unlock( ein ); @@ -928,7 +1096,7 @@ bdb_cache_delete( EntryInfo *ei = BEI(e); int rc; - assert( e->e_private ); + assert( e->e_private != NULL ); /* Set this early, warn off any queriers */ ei->bei_state |= CACHE_ENTRY_DELETED; @@ -945,29 +1113,22 @@ bdb_cache_delete( return rc; } - /* set cache write lock */ - ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock ); - - /* Lock the parent's kids tree */ - bdb_cache_entryinfo_lock( ei->bei_parent ); - -#ifdef NEW_LOGGING - LDAP_LOG( CACHE, ENTRY, - "bdb_cache_delete: delete %ld.\n", e->e_id, 0, 0 ); -#else Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n", e->e_id, 0, 0 ); -#endif /* set lru mutex */ - ldap_pvt_thread_mutex_lock( &cache->lru_mutex ); - rc = bdb_cache_delete_internal( cache, e->e_private ); - /* free lru mutex */ - ldap_pvt_thread_mutex_unlock( &cache->lru_mutex ); + ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex ); + + /* set cache write lock */ + ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock ); + + rc = bdb_cache_delete_internal( cache, e->e_private, 1 ); /* free cache write lock */ ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock ); - bdb_cache_entryinfo_unlock( ei->bei_parent ); + + /* free lru mutex */ + ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex ); /* Leave entry info locked */ @@ -977,13 +1138,17 @@ bdb_cache_delete( void bdb_cache_delete_cleanup( Cache *cache, - Entry *e ) + EntryInfo *ei ) { - EntryInfo *ei = BEI(e); - - ei->bei_e = NULL; - e->e_private = NULL; - bdb_entry_return( e ); + if ( ei->bei_e ) { + ei->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq ); +#else + bdb_entry_return( ei->bei_e ); +#endif + ei->bei_e = NULL; + } free( ei->bei_nrdn.bv_val ); ei->bei_nrdn.bv_val = NULL; @@ -1008,13 +1173,17 @@ bdb_cache_delete_cleanup( static int bdb_cache_delete_internal( Cache *cache, - EntryInfo *e ) + EntryInfo *e, + int decr ) { int rc = 0; /* return code */ + /* Lock the parent's kids tree */ + bdb_cache_entryinfo_lock( e->bei_parent ); + #ifdef BDB_HIER e->bei_parent->bei_ckids--; - if ( e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--; + if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--; #endif /* dn tree */ if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp ) @@ -1022,26 +1191,25 @@ bdb_cache_delete_internal( { rc = -1; } + if ( e->bei_parent->bei_kids ) + cache->c_leaves--; /* id tree */ if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) { rc = -1; } - if (rc != 0) { - return rc; - } + if ( rc == 0 ){ + cache->c_eiused--; - /* lru */ - LRU_DELETE( cache, e ); - cache->c_cursize--; + /* lru */ + LRU_DELETE( cache, e ); + if ( e->bei_e ) cache->c_cursize--; + } - /* - * flag entry to be freed later by a call to cache_return_entry() - */ - e->bei_state |= CACHE_ENTRY_DELETED; + bdb_cache_entryinfo_unlock( e->bei_parent ); - return( 0 ); + return( rc ); } static void @@ -1053,7 +1221,11 @@ bdb_entryinfo_release( void *data ) } if ( ei->bei_e ) { ei->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq ); +#else bdb_entry_return( ei->bei_e ); +#endif } bdb_cache_entryinfo_destroy( ei ); } @@ -1064,26 +1236,32 @@ bdb_cache_release_all( Cache *cache ) /* set cache write lock */ ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock ); /* set lru mutex */ - ldap_pvt_thread_mutex_lock( &cache->lru_mutex ); + ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex ); -#ifdef NEW_LOGGING - LDAP_LOG( CACHE, ENTRY, "bdb_cache_release_all: enter\n", 0, 0, 0 ); -#else Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 ); -#endif avl_free( cache->c_dntree.bei_kids, NULL ); avl_free( cache->c_idtree, bdb_entryinfo_release ); + for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) { + cache->c_lruhead = cache->c_eifree->bei_lrunext; + bdb_cache_entryinfo_destroy(cache->c_eifree); + } + cache->c_cursize = 0; + cache->c_eiused = 0; + cache->c_leaves = 0; + cache->c_idtree = NULL; cache->c_lruhead = NULL; cache->c_lrutail = NULL; + cache->c_dntree.bei_kids = NULL; /* free lru mutex */ - ldap_pvt_thread_mutex_unlock( &cache->lru_mutex ); + ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex ); /* free cache write lock */ ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock ); } #ifdef LDAP_DEBUG +#ifdef SLAPD_UNUSED static void bdb_lru_print( Cache *cache ) { @@ -1101,40 +1279,48 @@ bdb_lru_print( Cache *cache ) } } #endif +#endif #ifdef BDB_REUSE_LOCKERS static void bdb_locker_id_free( void *key, void *data ) { DB_ENV *env = key; - int lockid = (int) data; + u_int32_t lockid = (long)data; int rc; rc = XLOCK_ID_FREE( env, lockid ); if ( rc == EINVAL ) { DB_LOCKREQ lr; -#ifdef NEW_LOGGING - LDAP_LOG( BACK_BDB, ERR, - "bdb_locker_id_free: %d err %s(%d)\n", - lockid, db_strerror(rc), rc ); -#else Debug( LDAP_DEBUG_ANY, - "bdb_locker_id_free: %d err %s(%d)\n", - lockid, db_strerror(rc), rc ); -#endif - memset( &lr, 0, sizeof(lr) ); - + "bdb_locker_id_free: %lu err %s(%d)\n", + (unsigned long) lockid, db_strerror(rc), rc ); /* release all locks held by this locker. */ lr.op = DB_LOCK_PUT_ALL; + lr.obj = NULL; env->lock_vec( env, lockid, 0, &lr, 1, NULL ); XLOCK_ID_FREE( env, lockid ); } } +/* free up any keys used by the main thread */ +void +bdb_locker_flush( DB_ENV *env ) +{ + void *data; + void *ctx = ldap_pvt_thread_pool_context(); + + if ( !ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) { + ldap_pvt_thread_pool_setkey( ctx, env, NULL, NULL ); + bdb_locker_id_free( env, data ); + } +} + int -bdb_locker_id( Operation *op, DB_ENV *env, int *locker ) +bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker ) { - int i, rc, lockid; + int i, rc; + u_int32_t lockid; void *data; void *ctx; @@ -1161,27 +1347,22 @@ bdb_locker_id( Operation *op, DB_ENV *env, int *locker ) if ( rc != 0) { return rc; } - data = (void *)lockid; + data = (void *)((long)lockid); if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env, data, bdb_locker_id_free ) ) ) { XLOCK_ID_FREE( env, lockid ); -#ifdef NEW_LOGGING - LDAP_LOG( BACK_BDB, ERR, "bdb_locker_id: err %s(%d)\n", - db_strerror(rc), rc, 0 ); -#else Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n", db_strerror(rc), rc, 0 ); -#endif return rc; } } else { - lockid = (int)data; + lockid = (long)data; } *locker = lockid; return 0; } -#endif +#endif /* BDB_REUSE_LOCKERS */ void bdb_cache_delete_entry( @@ -1196,7 +1377,11 @@ bdb_cache_delete_entry( if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) { LRU_DELETE( &bdb->bi_cache, ei ); ei->bei_e->e_private = NULL; +#ifdef SLAP_ZONE_ALLOC + bdb_entry_return( bdb, ei->bei_e, ei->bei_zseq ); +#else bdb_entry_return( ei->bei_e ); +#endif ei->bei_e = NULL; --bdb->bi_cache.c_cursize; }