/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
- * Copyright 2000-2006 The OpenLDAP Foundation.
+ * Copyright 2000-2009 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "ldap_rq.h"
#ifdef BDB_HIER
-#define bdb_cache_lru_add hdb_cache_lru_add
+#define bdb_cache_lru_purge hdb_cache_lru_purge
#endif
-static void bdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei );
+static void bdb_cache_lru_purge( struct bdb_info *bdb );
static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
#ifdef LDAP_DEBUG
+#define SLAPD_UNUSED
#ifdef SLAPD_UNUSED
static void bdb_lru_print(Cache *cache);
#endif
#endif
+/* For concurrency experiments only! */
+#if 0
+#define ldap_pvt_thread_rdwr_wlock(a) 0
+#define ldap_pvt_thread_rdwr_wunlock(a) 0
+#define ldap_pvt_thread_rdwr_rlock(a) 0
+#define ldap_pvt_thread_rdwr_runlock(a) 0
+#endif
+
+#if 0
+#define ldap_pvt_thread_mutex_trylock(a) 0
+#endif
+
static EntryInfo *
bdb_cache_entryinfo_new( Cache *cache )
{
EntryInfo *ei = NULL;
if ( cache->c_eifree ) {
- ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
+ ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
if ( cache->c_eifree ) {
ei = cache->c_eifree;
cache->c_eifree = ei->bei_lrunext;
+ ei->bei_finders = 0;
}
- ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
+ ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
}
- if ( ei ) {
- ei->bei_lrunext = NULL;
- ei->bei_state = 0;
- } else {
- ei = ch_calloc(1, sizeof(struct bdb_entry_info));
+ if ( !ei ) {
+ ei = ch_calloc(1, sizeof(EntryInfo));
ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
}
+ ei->bei_state = CACHE_ENTRY_REFERENCED;
+
return ei;
}
+static void
+bdb_cache_entryinfo_free( Cache *cache, EntryInfo *ei )
+{
+ free( ei->bei_nrdn.bv_val );
+ ei->bei_nrdn.bv_val = NULL;
+#ifdef BDB_HIER
+ free( ei->bei_rdn.bv_val );
+ ei->bei_rdn.bv_val = NULL;
+ ei->bei_modrdns = 0;
+ ei->bei_ckids = 0;
+ ei->bei_dkids = 0;
+#endif
+ ei->bei_parent = NULL;
+ ei->bei_kids = NULL;
+ ei->bei_lruprev = NULL;
+
+ ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
+ ei->bei_lrunext = cache->c_eifree;
+ cache->c_eifree = ei;
+ ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
+}
+
+#define LRU_DEL( c, e ) do { \
+ if ( e == (c)->c_lruhead ) (c)->c_lruhead = e->bei_lruprev; \
+ if ( e == (c)->c_lrutail ) (c)->c_lrutail = e->bei_lruprev; \
+ e->bei_lrunext->bei_lruprev = e->bei_lruprev; \
+ e->bei_lruprev->bei_lrunext = e->bei_lrunext; \
+ e->bei_lruprev = NULL; \
+} while ( 0 )
+
+/* Note - we now use a Second-Chance / Clock algorithm instead of
+ * Least-Recently-Used. This tremendously improves concurrency
+ * because we no longer need to manipulate the lists every time an
+ * entry is touched. We only need to lock the lists when adding
+ * or deleting an entry. It's now a circular doubly-linked list.
+ * We always append to the tail, but the head traverses the circle
+ * during a purge operation.
+ */
+static void
+bdb_cache_lru_link( struct bdb_info *bdb, EntryInfo *ei )
+{
+
+ /* Already linked, ignore */
+ if ( ei->bei_lruprev )
+ return;
+
+ /* Insert into circular LRU list */
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
+
+ ei->bei_lruprev = bdb->bi_cache.c_lrutail;
+ if ( bdb->bi_cache.c_lrutail ) {
+ ei->bei_lrunext = bdb->bi_cache.c_lrutail->bei_lrunext;
+ bdb->bi_cache.c_lrutail->bei_lrunext = ei;
+ if ( ei->bei_lrunext )
+ ei->bei_lrunext->bei_lruprev = ei;
+ } else {
+ ei->bei_lrunext = ei->bei_lruprev = ei;
+ bdb->bi_cache.c_lruhead = ei;
+ }
+ bdb->bi_cache.c_lrutail = ei;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
+}
+
+#ifdef NO_THREADS
+#define NO_DB_LOCK
+#endif
+
+/* #define NO_DB_LOCK 1 */
+/* Note: The BerkeleyDB locks are much slower than regular
+ * mutexes or rdwr locks. But the BDB implementation has the
+ * advantage of using a fixed size lock table, instead of
+ * allocating a lock object per entry in the DB. That's a
+ * key benefit for scaling. It also frees us from worrying
+ * about undetectable deadlocks between BDB activity and our
+ * own cache activity. It's still worth exploring faster
+ * alternatives though.
+ */
+
/* Atomically release and reacquire a lock */
int
bdb_cache_entry_db_relock(
- DB_ENV *env,
- u_int32_t locker,
+ struct bdb_info *bdb,
+ DB_TXN *txn,
EntryInfo *ei,
int rw,
int tryOnly,
DB_LOCK *lock )
{
-#ifdef NO_THREADS
+#ifdef NO_DB_LOCK
return 0;
#else
int rc;
list[1].lock = *lock;
list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
list[1].obj = &lockobj;
- rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
+ rc = bdb->bi_dbenv->lock_vec(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
list, 2, NULL );
if (rc && !tryOnly) {
}
static int
-bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
+bdb_cache_entry_db_lock( struct bdb_info *bdb, DB_TXN *txn, EntryInfo *ei,
int rw, int tryOnly, DB_LOCK *lock )
{
-#ifdef NO_THREADS
+#ifdef NO_DB_LOCK
return 0;
#else
int rc;
lockobj.data = &ei->bei_id;
lockobj.size = sizeof(ei->bei_id) + 1;
- rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
+ rc = LOCK_GET(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
&lockobj, db_rw, lock);
if (rc && !tryOnly) {
Debug( LDAP_DEBUG_TRACE,
ei->bei_id, rw, rc );
}
return rc;
-#endif /* NO_THREADS */
+#endif /* NO_DB_LOCK */
}
int
-bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock )
+bdb_cache_entry_db_unlock ( struct bdb_info *bdb, DB_LOCK *lock )
{
-#ifdef NO_THREADS
+#ifdef NO_DB_LOCK
return 0;
#else
int rc;
- if ( !lock ) return 0;
+ if ( !lock || lock->mode == DB_LOCK_NG ) return 0;
- rc = LOCK_PUT ( env, lock );
+ rc = LOCK_PUT ( bdb->bi_dbenv, lock );
return rc;
#endif
}
+void
+bdb_cache_return_entry_rw( struct bdb_info *bdb, Entry *e,
+ int rw, DB_LOCK *lock )
+{
+ EntryInfo *ei;
+ int free = 0;
+
+ ei = e->e_private;
+ if ( ei &&
+ ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) &&
+ ( bdb_cache_entryinfo_trylock( ei ) == 0 )) {
+ if ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) {
+ /* Releasing the entry can only be done when
+ * we know that nobody else is using it, i.e we
+ * should have an entry_db writelock. But the
+ * flag is only set by the thread that loads the
+ * entry, and only if no other threads has found
+ * it while it was working. All other threads
+ * clear the flag, which mean that we should be
+ * the only thread using the entry if the flag
+ * is set here.
+ */
+ ei->bei_e = NULL;
+ ei->bei_state ^= CACHE_ENTRY_NOT_CACHED;
+ free = 1;
+ }
+ bdb_cache_entryinfo_unlock( ei );
+ }
+ bdb_cache_entry_db_unlock( bdb, lock );
+ if ( free ) {
+ e->e_private = NULL;
+ bdb_entry_return( e );
+ }
+}
+
static int
bdb_cache_entryinfo_destroy( EntryInfo *e )
{
return 0;
}
-#define LRU_DELETE( cache, ei ) do { \
- if ( (ei)->bei_lruprev != NULL ) { \
- (ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
- } else { \
- (cache)->c_lruhead = (ei)->bei_lrunext; \
- } \
- if ( (ei)->bei_lrunext != NULL ) { \
- (ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
- } else { \
- (cache)->c_lrutail = (ei)->bei_lruprev; \
- } \
- (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
-} while(0)
-
-#define LRU_ADD( cache, ei ) do { \
- (ei)->bei_lrunext = (cache)->c_lruhead; \
- if ( (ei)->bei_lrunext != NULL ) { \
- (ei)->bei_lrunext->bei_lruprev = (ei); \
- } \
- (cache)->c_lruhead = (ei); \
- (ei)->bei_lruprev = NULL; \
- if ( !ldap_pvt_thread_mutex_trylock( &(cache)->lru_tail_mutex )) { \
- if ( (cache)->c_lrutail == NULL ) \
- (cache)->c_lrutail = (ei); \
- ldap_pvt_thread_mutex_unlock( &(cache)->lru_tail_mutex ); \
- } \
-} while(0)
-
/* Do a length-ordered sort on normalized RDNs */
static int
bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
return e1->bei_id - e2->bei_id;
}
+static int
+bdb_id_dup_err( void *v1, void *v2 )
+{
+ EntryInfo *e2 = v2;
+ e2->bei_lrunext = v1;
+ return -1;
+}
+
/* Create an entryinfo in the cache. Caller must release the locks later.
*/
static int
#endif
/* Add to cache ID tree */
- if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
- EntryInfo *eix;
- eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
- bdb_cache_entryinfo_destroy( ei2 );
+ if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp,
+ bdb_id_dup_err )) {
+ EntryInfo *eix = ei2->bei_lrunext;
+ bdb_cache_entryinfo_free( &bdb->bi_cache, ei2 );
ei2 = eix;
#ifdef BDB_HIER
/* It got freed above because its value was
ei->bei_rdn.bv_val = NULL;
#endif
} else {
+ int rc;
+
bdb->bi_cache.c_eiused++;
ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
*/
if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
bdb->bi_cache.c_leaves++;
- avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
+ rc = avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
avl_dup_error );
+ if ( rc ) {
+ /* This should never happen; entry cache is corrupt */
+ bdb->bi_dbenv->log_flush( bdb->bi_dbenv, NULL );
+ assert( !rc );
+ }
#ifdef BDB_HIER
ei->bei_parent->bei_ckids++;
#endif
}
for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
+ eip->bei_state |= CACHE_ENTRY_REFERENCED;
ei.bei_parent = eip;
ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
if ( !ei2 ) {
+ DB_LOCK lock;
int len = ei.bei_nrdn.bv_len;
if ( BER_BVISEMPTY( ndn )) {
(ei.bei_nrdn.bv_val - ndn->bv_val);
bdb_cache_entryinfo_unlock( eip );
- rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
+ BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Reading %s",
+ ei.bei_nrdn.bv_val );
+
+ lock.mode = DB_LOCK_NG;
+ rc = bdb_dn2id( op, &ei.bei_nrdn, &ei, txn, &lock );
if (rc) {
bdb_cache_entryinfo_lock( eip );
+ bdb_cache_entry_db_unlock( bdb, &lock );
*res = eip;
return rc;
}
+ BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Read got %s(%d)",
+ ei.bei_nrdn.bv_val, ei.bei_id );
+
/* DN exists but needs to be added to cache */
ei.bei_nrdn.bv_len = len;
rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
/* add_internal left eip and c_rwlock locked */
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+ bdb_cache_entry_db_unlock( bdb, &lock );
if ( rc ) {
*res = eip;
return rc;
int
hdb_cache_find_parent(
Operation *op,
- DB_TXN *txn,
- u_int32_t locker,
+ DB_TXN *txn,
ID id,
EntryInfo **res )
{
struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
int rc;
- int addlru = 0;
ei.bei_id = id;
ei.bei_kids = NULL;
ei.bei_ckids = 0;
for (;;) {
- rc = hdb_dn2id_parent( op, txn, locker, &ei, &eip.bei_id );
+ rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id );
if ( rc ) break;
/* Save the previous node, if any */
ei.bei_ckids = 0;
/* This node is not fully connected yet */
- ein->bei_state = CACHE_ENTRY_NOT_LINKED;
+ ein->bei_state |= CACHE_ENTRY_NOT_LINKED;
/* Insert this node into the ID tree */
ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
- bdb_id_cmp, avl_dup_error ) ) {
+ bdb_id_cmp, bdb_id_dup_err ) ) {
+ EntryInfo *eix = ein->bei_lrunext;
/* Someone else created this node just before us.
* Free our new copy and use the existing one.
*/
- bdb_cache_entryinfo_destroy( ein );
- ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
- (caddr_t) &ei, bdb_id_cmp );
+ bdb_cache_entryinfo_free( &bdb->bi_cache, ein );
+ ein = eix;
/* Link in any kids we've already processed */
if ( ei2 ) {
ein->bei_ckids++;
bdb_cache_entryinfo_unlock( ein );
}
- addlru = 0;
-
}
/* If this is the first time, save this node
bdb->bi_cache.c_leaves++;
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
- if ( addlru ) {
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
- bdb_cache_lru_add( bdb, ein );
- }
- addlru = 1;
-
/* Got the parent, link in and we're done. */
if ( ei2 ) {
+ bdb_cache_entryinfo_lock( eir );
bdb_cache_entryinfo_lock( ei2 );
ein->bei_parent = ei2;
+
avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
avl_dup_error);
ei2->bei_ckids++;
- bdb_cache_entryinfo_unlock( ei2 );
- bdb_cache_entryinfo_lock( eir );
/* Reset all the state info */
for (ein = eir; ein != ei2; ein=ein->bei_parent)
ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
+
+ bdb_cache_entryinfo_unlock( ei2 );
+
*res = eir;
break;
}
}
#endif
-/* caller must have lru_head_mutex locked. mutex
- * will be unlocked on return.
+/* This is best-effort only. If all entries in the cache are
+ * busy, they will all be kept. This is unlikely to happen
+ * unless the cache is very much smaller than the working set.
*/
static void
-bdb_cache_lru_add(
- struct bdb_info *bdb,
- EntryInfo *ei )
+bdb_cache_lru_purge( struct bdb_info *bdb )
{
DB_LOCK lock, *lockp;
- EntryInfo *elru, *elprev;
- int count = 0;
+ EntryInfo *elru, *elnext = NULL;
+ int count, islocked, eimax;
+ int efree = 0, eifree = 0, eicount, ecount;
+#ifdef LDAP_DEBUG
+ int iter;
+#endif
- LRU_ADD( &bdb->bi_cache, ei );
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ /* Wait for the mutex; we're the only one trying to purge. */
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
- /* See if we're above the cache size limit */
- if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize )
+ /* maximum number of EntryInfo leaves to cache. In slapcat
+ * we always free all leaf nodes.
+ */
+ if ( slapMode & SLAP_TOOL_READONLY )
+ eimax = 0;
+ else
+ eimax = bdb->bi_cache.c_eimax;
+
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize )
+ efree = bdb->bi_cache.c_minfree;
+ if ( bdb->bi_cache.c_leaves > eimax ) {
+ eifree = bdb->bi_cache.c_minfree * 10;
+ if ( eifree >= eimax )
+ eifree = eimax / 2;
+ }
+
+ if ( !efree && !eifree ) {
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
+ bdb->bi_cache.c_purging = 0;
return;
+ }
- if ( bdb->bi_cache.c_locker ) {
+ if ( bdb->bi_cache.c_txn ) {
lockp = &lock;
} else {
lockp = NULL;
}
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
+ count = 0;
+ eicount = 0;
+ ecount = 0;
+#ifdef LDAP_DEBUG
+ iter = 0;
+#endif
/* Look for an unused entry to remove */
- for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) {
- elprev = elru->bei_lruprev;
+ for ( elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
+ elnext = elru->bei_lrunext;
+
+ if ( bdb_cache_entryinfo_trylock( elru ))
+ goto bottom;
+
+ /* This flag implements the clock replacement behavior */
+ if ( elru->bei_state & ( CACHE_ENTRY_REFERENCED )) {
+ elru->bei_state &= ~CACHE_ENTRY_REFERENCED;
+ bdb_cache_entryinfo_unlock( elru );
+ goto bottom;
+ }
+
+ /* If this node is in the process of linking into the cache,
+ * or this node is being deleted, skip it.
+ */
+ if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
+ CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) ||
+ elru->bei_finders > 0 ) {
+ bdb_cache_entryinfo_unlock( elru );
+ goto bottom;
+ }
+
+ /* entryinfo is locked */
+ islocked = 1;
/* If we can successfully writelock it, then
* the object is idle.
*/
- if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
- bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
-
- int stop = 0;
+ if ( bdb_cache_entry_db_lock( bdb,
+ bdb->bi_cache.c_txn, elru, 1, 1, lockp ) == 0 ) {
- /* If this node is in the process of linking into the cache,
- * or this node is being deleted, skip it.
- */
- if ( elru->bei_state &
- ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) {
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
- continue;
- }
/* Free entry for this node if it's present */
if ( elru->bei_e ) {
- elru->bei_e->e_private = NULL;
+ ecount++;
+ if ( count < efree ) {
+ elru->bei_e->e_private = NULL;
#ifdef SLAP_ZONE_ALLOC
- bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
+ bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
#else
- bdb_entry_return( elru->bei_e );
+ bdb_entry_return( elru->bei_e );
#endif
- elru->bei_e = NULL;
- count++;
- }
- /* ITS#4010 if we're in slapcat, and this node is a leaf
- * node, free it.
- *
- * FIXME: we need to do this for slapd as well, (which is
- * why we compute bi_cache.c_leaves now) but at the moment
- * we can't because it causes unresolvable deadlocks.
- */
- if ( slapMode & SLAP_TOOL_READONLY ) {
- if ( !elru->bei_kids ) {
- /* This does LRU_DELETE for us */
- bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
- bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
+ elru->bei_e = NULL;
+ count++;
+ } else {
+ /* Keep this node cached, skip to next */
+ bdb_cache_entry_db_unlock( bdb, lockp );
+ goto next;
}
- /* Leave node on LRU list for a future pass */
- } else {
- LRU_DELETE( &bdb->bi_cache, elru );
}
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
-
- if ( count == bdb->bi_cache.c_minfree ) {
- ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
- bdb->bi_cache.c_cursize -= bdb->bi_cache.c_minfree;
- if ( bdb->bi_cache.c_maxsize - bdb->bi_cache.c_cursize >=
- bdb->bi_cache.c_minfree )
- stop = 1;
- count = 0;
- ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+ bdb_cache_entry_db_unlock( bdb, lockp );
+
+ /*
+ * If it is a leaf node, and we're over the limit, free it.
+ */
+ if ( elru->bei_kids ) {
+ /* Drop from list, we ignore it... */
+ LRU_DEL( &bdb->bi_cache, elru );
+ } else if ( eicount < eifree ) {
+ /* Too many leaf nodes, free this one */
+ bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
+ bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
+ islocked = 0;
+ eicount++;
+ } /* Leave on list until we need to free it */
+ }
+
+next:
+ if ( islocked )
+ bdb_cache_entryinfo_unlock( elru );
+
+ if ( count >= efree && eicount >= eifree ) {
+ if ( count || ecount > bdb->bi_cache.c_cursize ) {
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
+ /* HACK: we seem to be losing track, fix up now */
+ if ( ecount > bdb->bi_cache.c_cursize )
+ bdb->bi_cache.c_cursize = ecount;
+ bdb->bi_cache.c_cursize -= count;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
}
- if (stop) break;
+ break;
}
+bottom:
+ if ( elnext == bdb->bi_cache.c_lruhead )
+ break;
+#ifdef LDAP_DEBUG
+ iter++;
+#endif
}
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
+ bdb->bi_cache.c_lruhead = elnext;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
+ bdb->bi_cache.c_purging = 0;
}
EntryInfo *
/*
* cache_find_id - find an entry in the cache, given id.
- * The entry is locked for Read upon return. Call with islocked TRUE if
+ * The entry is locked for Read upon return. Call with flag ID_LOCKED if
* the supplied *eip was already locked.
*/
DB_TXN *tid,
ID id,
EntryInfo **eip,
- int islocked,
- u_int32_t locker,
+ int flag,
DB_LOCK *lock )
{
struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
(caddr_t) &ei, bdb_id_cmp );
if ( *eip ) {
/* If the lock attempt fails, the info is in use */
- if ( ldap_pvt_thread_mutex_trylock(
- &(*eip)->bei_kids_mutex )) {
+ if ( bdb_cache_entryinfo_trylock( *eip )) {
ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
/* If this node is being deleted, treat
* as if the delete has already finished
ldap_pvt_thread_yield();
goto again;
}
- islocked = 1;
+ flag |= ID_LOCKED;
}
ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
}
/* See if the ID exists in the database; add it to the cache if so */
if ( !*eip ) {
#ifndef BDB_HIER
- rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
+ rc = bdb_id2entry( op->o_bd, tid, id, &ep );
if ( rc == 0 ) {
rc = bdb_cache_find_ndn( op, tid,
&ep->e_nname, eip );
- if ( *eip ) islocked = 1;
+ if ( *eip ) flag |= ID_LOCKED;
if ( rc ) {
+ ep->e_private = NULL;
#ifdef SLAP_ZONE_ALLOC
bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
#else
}
}
#else
- rc = hdb_cache_find_parent(op, tid, locker, id, eip );
- if ( rc == 0 ) islocked = 1;
+ rc = hdb_cache_find_parent(op, tid, id, eip );
+ if ( rc == 0 ) flag |= ID_LOCKED;
#endif
}
/* Ok, we found the info, do we have the entry? */
if ( rc == 0 ) {
+ if ( !( flag & ID_LOCKED )) {
+ bdb_cache_entryinfo_lock( *eip );
+ flag |= ID_LOCKED;
+ }
+
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
} else {
+ (*eip)->bei_finders++;
+ (*eip)->bei_state |= CACHE_ENTRY_REFERENCED;
/* Make sure only one thread tries to load the entry */
load1:
#ifdef SLAP_ZONE_ALLOC
load = 1;
(*eip)->bei_state |= CACHE_ENTRY_LOADING;
}
- if ( islocked ) {
+
+ if ( !load ) {
+ /* Clear the uncached state if we are not
+ * loading it, i.e it is already cached or
+ * another thread is currently loading it.
+ */
+ (*eip)->bei_state &= ~CACHE_ENTRY_NOT_CACHED;
+ flag &= ~ID_NOCACHE;
+ }
+
+ if ( flag & ID_LOCKED ) {
bdb_cache_entryinfo_unlock( *eip );
- islocked = 0;
+ flag ^= ID_LOCKED;
}
- rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock );
+ rc = bdb_cache_entry_db_lock( bdb, tid, *eip, load, 0, lock );
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
+ bdb_cache_entry_db_unlock( bdb, lock );
} else if ( rc == 0 ) {
if ( load ) {
- /* Give up original read lock, obtain write lock
- */
- if ( rc == 0 ) {
- rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
- *eip, 1, 0, lock );
- }
- if ( rc == 0 && !ep) {
- rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
+ if ( !ep) {
+ rc = bdb_id2entry( op->o_bd, tid, id, &ep );
}
if ( rc == 0 ) {
ep->e_private = *eip;
(*eip)->bei_zseq = *((ber_len_t *)ep - 2);
#endif
ep = NULL;
+ bdb_cache_lru_link( bdb, *eip );
+ if (( flag & ID_NOCACHE ) &&
+ ( bdb_cache_entryinfo_trylock( *eip ) == 0 )) {
+ /* Set the cached state only if no other thread
+ * found the info while we were loading the entry.
+ */
+ if ( (*eip)->bei_finders == 1 )
+ (*eip)->bei_state |= CACHE_ENTRY_NOT_CACHED;
+ bdb_cache_entryinfo_unlock( *eip );
+ }
}
- (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
if ( rc == 0 ) {
/* If we succeeded, downgrade back to a readlock. */
- rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
+ rc = bdb_cache_entry_db_relock( bdb, tid,
*eip, 0, 0, lock );
} else {
/* Otherwise, release the lock. */
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
+ bdb_cache_entry_db_unlock( bdb, lock );
}
} else if ( !(*eip)->bei_e ) {
/* Some other thread is trying to load the entry,
- * give it a chance to finish.
+ * wait for it to finish.
*/
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
- ldap_pvt_thread_yield();
+ bdb_cache_entry_db_unlock( bdb, lock );
bdb_cache_entryinfo_lock( *eip );
- islocked = 1;
+ flag |= ID_LOCKED;
goto load1;
#ifdef BDB_HIER
} else {
*/
rc = bdb_fix_dn( (*eip)->bei_e, 1 );
if ( rc ) {
- bdb_cache_entry_db_relock( bdb->bi_dbenv,
- locker, *eip, 1, 0, lock );
+ bdb_cache_entry_db_relock( bdb,
+ tid, *eip, 1, 0, lock );
/* check again in case other modifier did it already */
if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
rc = bdb_fix_dn( (*eip)->bei_e, 2 );
- bdb_cache_entry_db_relock( bdb->bi_dbenv,
- locker, *eip, 0, 0, lock );
+ bdb_cache_entry_db_relock( bdb,
+ tid, *eip, 0, 0, lock );
}
#endif
}
-
}
+ bdb_cache_entryinfo_lock( *eip );
+ (*eip)->bei_finders--;
+ if ( load )
+ (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
+ bdb_cache_entryinfo_unlock( *eip );
}
}
- if ( islocked ) {
+ if ( flag & ID_LOCKED ) {
bdb_cache_entryinfo_unlock( *eip );
}
if ( ep ) {
+ ep->e_private = NULL;
#ifdef SLAP_ZONE_ALLOC
bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
#else
#endif
}
if ( rc == 0 ) {
+ int purge = 0;
if ( load ) {
- ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
- bdb->bi_cache.c_cursize++;
- ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
- }
-
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
-
- /* If the LRU list has only one entry and this is it, it
- * doesn't need to be added again.
- */
- if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail &&
- bdb->bi_cache.c_lruhead == *eip ) {
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
- } else {
- /* if entry is on LRU list, remove from old spot */
- if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) {
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
- LRU_DELETE( &bdb->bi_cache, *eip );
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
+ if ( !( flag & ID_NOCACHE )) {
+ bdb->bi_cache.c_cursize++;
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
+ !bdb->bi_cache.c_purging ) {
+ purge = 1;
+ bdb->bi_cache.c_purging = 1;
+ }
+ } else if ( bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax && !bdb->bi_cache.c_purging ) {
+ purge = 1;
+ bdb->bi_cache.c_purging = 1;
}
- /* lru_head_mutex is unlocked for us */
- bdb_cache_lru_add( bdb, *eip );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
}
+ if ( purge )
+ bdb_cache_lru_purge( bdb );
}
#ifdef SLAP_ZONE_ALLOC
EntryInfo *eip,
Entry *e,
struct berval *nrdn,
- u_int32_t locker )
+ DB_TXN *txn,
+ DB_LOCK *lock )
{
EntryInfo *new, ei;
- DB_LOCK lock;
- int rc;
+ int rc, purge = 0;
#ifdef BDB_HIER
struct berval rdn = e->e_name;
#endif
/* Lock this entry so that bdb_add can run to completion.
* It can only fail if BDB has run out of lock resources.
*/
- rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock );
+ rc = bdb_cache_entry_db_lock( bdb, txn, &ei, 0, 0, lock );
if ( rc ) {
bdb_cache_entryinfo_unlock( eip );
return rc;
}
new->bei_e = e;
e->e_private = new;
- new->bei_state = CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
+ new->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
if (eip->bei_parent) {
eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
}
bdb_cache_entryinfo_unlock( eip );
- ++bdb->bi_cache.c_cursize;
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
+ ++bdb->bi_cache.c_cursize;
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
+ !bdb->bi_cache.c_purging ) {
+ purge = 1;
+ bdb->bi_cache.c_purging = 1;
+ }
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
- /* set lru mutex */
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
+ bdb_cache_lru_link( bdb, new );
- /* lru_head_mutex is unlocked for us */
- bdb_cache_lru_add( bdb, new );
+ if ( purge )
+ bdb_cache_lru_purge( bdb );
return rc;
}
int
bdb_cache_modify(
+ struct bdb_info *bdb,
Entry *e,
Attribute *newAttrs,
- DB_ENV *env,
- u_int32_t locker,
+ DB_TXN *txn,
DB_LOCK *lock )
{
EntryInfo *ei = BEI(e);
int rc;
/* Get write lock on data */
- rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
+ rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
/* If we've done repeated mods on a cached entry, then e_attrs
* is no longer contiguous with the entry, and must be freed.
struct berval *nrdn,
Entry *new,
EntryInfo *ein,
- u_int32_t locker,
+ DB_TXN *txn,
DB_LOCK *lock )
{
EntryInfo *ei = BEI(e), *pei;
#endif
/* Get write lock on data */
- rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, ei, 1, 0, lock );
+ rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
if ( rc ) return rc;
/* If we've done repeated mods on a cached entry, then e_attrs
avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
free( ei->bei_nrdn.bv_val );
ber_dupbv( &ei->bei_nrdn, nrdn );
+
#ifdef BDB_HIER
free( ei->bei_rdn.bv_val );
rdn.bv_len = ptr - rdn.bv_val;
}
ber_dupbv( &ei->bei_rdn, &rdn );
+
+ /* If new parent, decrement kid counts */
+ if ( ein ) {
+ pei->bei_ckids--;
+ if ( pei->bei_dkids ) {
+ pei->bei_dkids--;
+ if ( pei->bei_dkids < 2 )
+ pei->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
+ }
+ }
#endif
if (!ein) {
ei->bei_parent = ein;
bdb_cache_entryinfo_unlock( pei );
bdb_cache_entryinfo_lock( ein );
- }
+
+ /* new parent now has kids */
+ if ( ein->bei_state & CACHE_ENTRY_NO_KIDS )
+ ein->bei_state ^= CACHE_ENTRY_NO_KIDS;
+ /* grandparent has grandkids */
+ if ( ein->bei_parent )
+ ein->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
#ifdef BDB_HIER
- {
- /* Record the generation number of this change */
- ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
- bdb->bi_modrdns++;
- ei->bei_modrdns = bdb->bi_modrdns;
- ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
+ /* parent might now have grandkids */
+ if ( ein->bei_state & CACHE_ENTRY_NO_GRANDKIDS &&
+ !(ei->bei_state & CACHE_ENTRY_NO_KIDS))
+ ein->bei_state ^= CACHE_ENTRY_NO_GRANDKIDS;
+
+ ein->bei_ckids++;
+ if ( ein->bei_dkids ) ein->bei_dkids++;
+#endif
}
+
+#ifdef BDB_HIER
+ /* Record the generation number of this change */
+ ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
+ bdb->bi_modrdns++;
+ ei->bei_modrdns = bdb->bi_modrdns;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
#endif
+
avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
bdb_cache_entryinfo_unlock( ein );
return rc;
*/
int
bdb_cache_delete(
- Cache *cache,
+ struct bdb_info *bdb,
Entry *e,
- DB_ENV *env,
- u_int32_t locker,
+ DB_TXN *txn,
DB_LOCK *lock )
{
EntryInfo *ei = BEI(e);
assert( e->e_private != NULL );
+ /* Lock the entry's info */
+ bdb_cache_entryinfo_lock( ei );
+
/* Set this early, warn off any queriers */
ei->bei_state |= CACHE_ENTRY_DELETED;
- /* Lock the entry's info */
- bdb_cache_entryinfo_lock( ei );
+ bdb_cache_entryinfo_unlock( ei );
/* Get write lock on the data */
- rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
+ rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
if ( rc ) {
/* couldn't lock, undo and give up */
ei->bei_state ^= CACHE_ENTRY_DELETED;
- bdb_cache_entryinfo_unlock( ei );
return rc;
}
e->e_id, 0, 0 );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
- /* set cache write lock */
- ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
-
- rc = bdb_cache_delete_internal( cache, e->e_private, 1 );
-
- /* free cache write lock */
- ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
+ rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
-
- /* Leave entry info locked */
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
return( rc );
}
Cache *cache,
EntryInfo *ei )
{
+ /* Enter with ei locked */
+
if ( ei->bei_e ) {
ei->bei_e->e_private = NULL;
#ifdef SLAP_ZONE_ALLOC
ei->bei_e = NULL;
}
- free( ei->bei_nrdn.bv_val );
- ei->bei_nrdn.bv_val = NULL;
-#ifdef BDB_HIER
- free( ei->bei_rdn.bv_val );
- ei->bei_rdn.bv_val = NULL;
- ei->bei_modrdns = 0;
- ei->bei_ckids = 0;
- ei->bei_dkids = 0;
-#endif
- ei->bei_parent = NULL;
- ei->bei_kids = NULL;
- ei->bei_lruprev = NULL;
-
- ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
- ei->bei_lrunext = cache->c_eifree;
- cache->c_eifree = ei;
- ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
+ bdb_cache_entryinfo_free( cache, ei );
bdb_cache_entryinfo_unlock( ei );
}
int decr )
{
int rc = 0; /* return code */
+ int decr_leaf = 0;
/* Lock the parent's kids tree */
bdb_cache_entryinfo_lock( e->bei_parent );
rc = -1;
}
if ( e->bei_parent->bei_kids )
- cache->c_leaves--;
+ decr_leaf = 1;
+ bdb_cache_entryinfo_unlock( e->bei_parent );
+
+ ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
/* id tree */
- if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) {
+ if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp )) {
+ cache->c_eiused--;
+ if ( decr_leaf )
+ cache->c_leaves--;
+ } else {
rc = -1;
}
+ ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
if ( rc == 0 ){
- cache->c_eiused--;
-
/* lru */
- LRU_DELETE( cache, e );
- if ( e->bei_e ) cache->c_cursize--;
- }
+ LRU_DEL( cache, e );
- bdb_cache_entryinfo_unlock( e->bei_parent );
+ if ( e->bei_e ) {
+ ldap_pvt_thread_mutex_lock( &cache->c_count_mutex );
+ cache->c_cursize--;
+ ldap_pvt_thread_mutex_unlock( &cache->c_count_mutex );
+ }
+ }
return( rc );
}
/* set cache write lock */
ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
cache->c_dntree.bei_kids = NULL;
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
/* free cache write lock */
ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
}
{
EntryInfo *e;
- fprintf( stderr, "LRU queue (head to tail):\n" );
- for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
- fprintf( stderr, "\trdn \"%20s\" id %ld\n",
- e->bei_nrdn.bv_val, e->bei_id );
+ fprintf( stderr, "LRU circle head: %p\n", (void *) cache->c_lruhead );
+ fprintf( stderr, "LRU circle (tail forward):\n" );
+ for ( e = cache->c_lrutail; ; ) {
+ fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
+ (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
+ e = e->bei_lrunext;
+ if ( e == cache->c_lrutail )
+ break;
}
- fprintf( stderr, "LRU queue (tail to head):\n" );
- for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
- fprintf( stderr, "\trdn \"%20s\" id %ld\n",
- e->bei_nrdn.bv_val, e->bei_id );
+ fprintf( stderr, "LRU circle (tail backward):\n" );
+ for ( e = cache->c_lrutail; ; ) {
+ fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
+ (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
+ e = e->bei_lruprev;
+ if ( e == cache->c_lrutail )
+ break;
}
}
#endif
#endif
-#ifdef BDB_REUSE_LOCKERS
static void
-bdb_locker_id_free( void *key, void *data )
+bdb_reader_free( void *key, void *data )
{
- DB_ENV *env = key;
- u_int32_t lockid = (long)data;
- int rc;
+ /* DB_ENV *env = key; */
+ DB_TXN *txn = data;
+
+ TXN_ABORT( txn );
+}
- rc = XLOCK_ID_FREE( env, lockid );
- if ( rc == EINVAL ) {
- DB_LOCKREQ lr;
- Debug( LDAP_DEBUG_ANY,
- "bdb_locker_id_free: %lu err %s(%d)\n",
- (unsigned long) lockid, db_strerror(rc), rc );
- /* release all locks held by this locker. */
- lr.op = DB_LOCK_PUT_ALL;
- lr.obj = NULL;
- env->lock_vec( env, lockid, 0, &lr, 1, NULL );
- XLOCK_ID_FREE( env, lockid );
+/* free up any keys used by the main thread */
+void
+bdb_reader_flush( DB_ENV *env )
+{
+ void *data;
+ void *ctx = ldap_pvt_thread_pool_context();
+
+ if ( !ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
+ ldap_pvt_thread_pool_setkey( ctx, env, NULL, 0, NULL, NULL );
+ bdb_reader_free( env, data );
}
}
int
-bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
+bdb_reader_get( Operation *op, DB_ENV *env, DB_TXN **txn )
{
int i, rc;
- u_int32_t lockid;
void *data;
void *ctx;
- if ( !env || !locker ) return -1;
+ if ( !env || !txn ) return -1;
/* If no op was provided, try to find the ctx anyway... */
if ( op ) {
/* Shouldn't happen unless we're single-threaded */
if ( !ctx ) {
- *locker = 0;
+ *txn = NULL;
return 0;
}
if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
- rc = XLOCK_ID( env, &lockid );
+ rc = TXN_BEGIN( env, NULL, txn, DB_READ_COMMITTED );
if (rc) ldap_pvt_thread_yield();
}
if ( rc != 0) {
return rc;
}
- data = (void *)((long)lockid);
+ data = *txn;
if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
- data, bdb_locker_id_free ) ) ) {
- XLOCK_ID_FREE( env, lockid );
- Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
+ data, bdb_reader_free, NULL, NULL ) ) ) {
+ TXN_ABORT( *txn );
+ Debug( LDAP_DEBUG_ANY, "bdb_reader_get: err %s(%d)\n",
db_strerror(rc), rc, 0 );
return rc;
}
} else {
- lockid = (long)data;
+ *txn = data;
}
- *locker = lockid;
return 0;
}
-#endif /* BDB_REUSE_LOCKERS */
-
-void
-bdb_cache_delete_entry(
- struct bdb_info *bdb,
- EntryInfo *ei,
- u_int32_t locker,
- DB_LOCK *lock )
-{
- ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
- if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, ei, 1, 1, lock ) == 0 )
- {
- if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) {
- LRU_DELETE( &bdb->bi_cache, ei );
- ei->bei_e->e_private = NULL;
-#ifdef SLAP_ZONE_ALLOC
- bdb_entry_return( bdb, ei->bei_e, ei->bei_zseq );
-#else
- bdb_entry_return( ei->bei_e );
-#endif
- ei->bei_e = NULL;
- --bdb->bi_cache.c_cursize;
- }
- bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
- }
- ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
-}