/* $OpenLDAP$ */
/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
*
- * Copyright 2000-2006 The OpenLDAP Foundation.
+ * Copyright 2000-2007 The OpenLDAP Foundation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
#ifdef LDAP_DEBUG
+#define SLAPD_UNUSED
#ifdef SLAPD_UNUSED
static void bdb_lru_print(Cache *cache);
#endif
cache->c_eifree = ei->bei_lrunext;
}
ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
+ ei->bei_finders = 0;
}
if ( !ei ) {
ei = ch_calloc(1, sizeof(EntryInfo));
return ei;
}
+static void
+bdb_cache_entryinfo_free( Cache *cache, EntryInfo *ei )
+{
+ free( ei->bei_nrdn.bv_val );
+ ei->bei_nrdn.bv_val = NULL;
+#ifdef BDB_HIER
+ free( ei->bei_rdn.bv_val );
+ ei->bei_rdn.bv_val = NULL;
+ ei->bei_modrdns = 0;
+ ei->bei_ckids = 0;
+ ei->bei_dkids = 0;
+#endif
+ ei->bei_parent = NULL;
+ ei->bei_kids = NULL;
+ ei->bei_lruprev = NULL;
+
+ ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
+ ei->bei_lrunext = cache->c_eifree;
+ cache->c_eifree = ei;
+ ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
+}
+
+#define LRU_DEL( c, e ) do { \
+ if ( e == (c)->c_lruhead ) (c)->c_lruhead = e->bei_lruprev; \
+ if ( e == (c)->c_lrutail ) (c)->c_lrutail = e->bei_lruprev; \
+ e->bei_lrunext->bei_lruprev = e->bei_lruprev; \
+ e->bei_lruprev->bei_lrunext = e->bei_lrunext; \
+ e->bei_lruprev = NULL; \
+} while ( 0 )
+
/* Note - we now use a Second-Chance / Clock algorithm instead of
* Least-Recently-Used. This tremendously improves concurrency
* because we no longer need to manipulate the lists every time an
* during a purge operation.
*/
static void
-bdb_cache_lru_link( Cache *cache, EntryInfo *ei )
+bdb_cache_lru_link( struct bdb_info *bdb, EntryInfo *ei )
{
+
/* Insert into circular LRU list */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
- ei->bei_lruprev = cache->c_lrutail;
- if ( cache->c_lrutail ) {
- ei->bei_lrunext = cache->c_lrutail->bei_lrunext;
- cache->c_lrutail->bei_lrunext = ei;
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
+
+ /* Still linked, remove */
+ if ( ei->bei_lruprev ) {
+ LRU_DEL( &bdb->bi_cache, ei );
+ }
+ ei->bei_lruprev = bdb->bi_cache.c_lrutail;
+ if ( bdb->bi_cache.c_lrutail ) {
+ ei->bei_lrunext = bdb->bi_cache.c_lrutail->bei_lrunext;
+ bdb->bi_cache.c_lrutail->bei_lrunext = ei;
if ( ei->bei_lrunext )
ei->bei_lrunext->bei_lruprev = ei;
} else {
ei->bei_lrunext = ei->bei_lruprev = ei;
- cache->c_lruhead = ei;
+ bdb->bi_cache.c_lruhead = ei;
}
- cache->c_lrutail = ei;
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
+ bdb->bi_cache.c_lrutail = ei;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
}
#ifdef NO_THREADS
return e1->bei_id - e2->bei_id;
}
+static int
+bdb_id_dup_err( void *v1, void *v2 )
+{
+ EntryInfo *e2 = v2;
+ e2->bei_lrunext = v1;
+ return -1;
+}
+
/* Create an entryinfo in the cache. Caller must release the locks later.
*/
static int
#endif
/* Add to cache ID tree */
- if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
- EntryInfo *eix;
- eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
- bdb_cache_entryinfo_destroy( ei2 );
+ if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp,
+ bdb_id_dup_err )) {
+ EntryInfo *eix = ei2->bei_lrunext;
+ bdb_cache_entryinfo_free( &bdb->bi_cache, ei2 );
ei2 = eix;
#ifdef BDB_HIER
/* It got freed above because its value was
ei->bei_parent->bei_ckids++;
#endif
}
- bdb_cache_lru_link( &bdb->bi_cache, ei2 );
*res = ei2;
return 0;
/* Insert this node into the ID tree */
ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
- bdb_id_cmp, avl_dup_error ) ) {
+ bdb_id_cmp, bdb_id_dup_err ) ) {
+ EntryInfo *eix = ein->bei_lrunext;
/* Someone else created this node just before us.
* Free our new copy and use the existing one.
*/
- bdb_cache_entryinfo_destroy( ein );
- ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
- (caddr_t) &ei, bdb_id_cmp );
+ bdb_cache_entryinfo_free( &bdb->bi_cache, ein );
+ ein = eix;
/* Link in any kids we've already processed */
if ( ei2 ) {
ein->bei_ckids++;
bdb_cache_entryinfo_unlock( ein );
}
- } else {
- bdb_cache_lru_link( &bdb->bi_cache, ein );
}
/* If this is the first time, save this node
bdb_cache_entryinfo_lock( ei2 );
ein->bei_parent = ei2;
+ avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
+ avl_dup_error);
+ ei2->bei_ckids++;
+
/* Reset all the state info */
for (ein = eir; ein != ei2; ein=ein->bei_parent)
ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
- avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
- avl_dup_error);
- ei2->bei_ckids++;
bdb_cache_entryinfo_unlock( ei2 );
bdb_cache_entryinfo_lock( eir );
}
#endif
+/* This is best-effort only. If all entries in the cache are
+ * busy, they will all be kept. This is unlikely to happen
+ * unless the cache is very much smaller than the working set.
+ */
static void
bdb_cache_lru_purge( struct bdb_info *bdb )
{
DB_LOCK lock, *lockp;
- EntryInfo *elru, *elnext;
- int count, islocked;
+ EntryInfo *elru, *elnext = NULL;
+ int count, islocked, eimax;
- /* Don't bother if we can't get the lock */
- if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.lru_head_mutex ) )
- return;
+ /* Wait for the mutex; we're the only one trying to purge. */
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize ) {
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
+ bdb->bi_cache.c_purging = 0;
return;
}
}
count = 0;
+
+ /* maximum number of EntryInfo leaves to cache. In slapcat
+ * we always free all leaf nodes.
+ */
+ if ( slapMode & SLAP_TOOL_READONLY )
+ eimax = 0;
+ else
+ eimax = bdb->bi_cache.c_maxsize * 4;
+
/* Look for an unused entry to remove */
- for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
+ for ( elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
elnext = elru->bei_lrunext;
- if ( ldap_pvt_thread_mutex_trylock( &elru->bei_kids_mutex ))
- continue;
+ if ( bdb_cache_entryinfo_trylock( elru ))
+ goto bottom;
/* This flag implements the clock replacement behavior */
if ( elru->bei_state & ( CACHE_ENTRY_REFERENCED )) {
elru->bei_state &= ~CACHE_ENTRY_REFERENCED;
bdb_cache_entryinfo_unlock( elru );
- continue;
+ goto bottom;
}
/* If this node is in the process of linking into the cache,
* or this node is being deleted, skip it.
*/
- if ( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
- CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) {
+ if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
+ CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) ||
+ elru->bei_finders > 0 ) {
bdb_cache_entryinfo_unlock( elru );
- continue;
+ goto bottom;
}
/* entryinfo is locked */
elru->bei_e = NULL;
count++;
}
- bdb_cache_entry_dbunlock( bdb, lockp );
-
- /* ITS#4010 if we're in slapcat, and this node is a leaf
- * node, free it.
- *
- * FIXME: we need to do this for slapd as well, (which is
- * why we compute bi_cache.c_leaves now) but at the moment
- * we can't because it causes unresolvable deadlocks.
+ bdb_cache_entry_db_unlock( bdb, lockp );
+
+ /*
+ * If it is a leaf node, and we're over the limit, free it.
*/
- if ( slapMode & SLAP_TOOL_READONLY ) {
- if ( !elru->bei_kids ) {
- bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
- bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
- islocked = 0;
- }
- /* Leave node on LRU list for a future pass */
- }
+ if ( elru->bei_kids ) {
+ /* Drop from list, we ignore it... */
+ LRU_DEL( &bdb->bi_cache, elru );
+ } else if ( bdb->bi_cache.c_leaves > eimax ) {
+ /* Too many leaf nodes, free this one */
+ bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
+ bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
+ islocked = 0;
+ } /* Leave on list until we need to free it */
}
if ( islocked )
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
break;
}
+bottom:
+ if ( elnext == bdb->bi_cache.c_lruhead )
+ break;
}
- bdb->bi_cache.c_lruhead = elru;
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ bdb->bi_cache.c_lruhead = elnext;
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
+ bdb->bi_cache.c_purging = 0;
}
EntryInfo *
(caddr_t) &ei, bdb_id_cmp );
if ( *eip ) {
/* If the lock attempt fails, the info is in use */
- if ( ldap_pvt_thread_mutex_trylock(
- &(*eip)->bei_kids_mutex )) {
+ if ( bdb_cache_entryinfo_trylock( *eip )) {
ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
/* If this node is being deleted, treat
* as if the delete has already finished
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
} else {
+ (*eip)->bei_finders++;
+ (*eip)->bei_state |= CACHE_ENTRY_REFERENCED;
/* Make sure only one thread tries to load the entry */
load1:
#ifdef SLAP_ZONE_ALLOC
load = 1;
(*eip)->bei_state |= CACHE_ENTRY_LOADING;
}
+
if ( islocked ) {
bdb_cache_entryinfo_unlock( *eip );
islocked = 0;
}
- rc = bdb_cache_entry_db_lock( bdb, locker, *eip, 0, 0, lock );
+ rc = bdb_cache_entry_db_lock( bdb, locker, *eip, load, 0, lock );
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
bdb_cache_entry_db_unlock( bdb, lock );
} else if ( rc == 0 ) {
if ( load ) {
- /* Give up original read lock, obtain write lock
- */
- if ( rc == 0 ) {
- rc = bdb_cache_entry_db_relock( bdb, locker,
- *eip, 1, 0, lock );
- }
- if ( rc == 0 && !ep) {
+ if ( !ep) {
rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
}
if ( rc == 0 ) {
(*eip)->bei_zseq = *((ber_len_t *)ep - 2);
#endif
ep = NULL;
+ bdb_cache_lru_link( bdb, *eip );
}
- bdb_cache_entryinfo_lock( *eip );
- (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
- bdb_cache_entryinfo_unlock( *eip );
if ( rc == 0 ) {
/* If we succeeded, downgrade back to a readlock. */
rc = bdb_cache_entry_db_relock( bdb, locker,
}
#endif
}
-
+ bdb_cache_entryinfo_lock( *eip );
+ (*eip)->bei_finders--;
+ if ( load )
+ (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
+ bdb_cache_entryinfo_unlock( *eip );
}
}
}
if ( load ) {
ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
bdb->bi_cache.c_cursize++;
- if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize )
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
+ !bdb->bi_cache.c_purging ) {
purge = 1;
+ bdb->bi_cache.c_purging = 1;
+ }
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
}
if ( purge )
ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
++bdb->bi_cache.c_cursize;
- if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize )
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
+ !bdb->bi_cache.c_purging ) {
purge = 1;
+ bdb->bi_cache.c_purging = 1;
+ }
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
+ bdb_cache_lru_link( bdb, new );
+
if ( purge )
bdb_cache_lru_purge( bdb );
e->e_id, 0, 0 );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
/* Leave entry info locked */
ei->bei_e = NULL;
}
- free( ei->bei_nrdn.bv_val );
- ei->bei_nrdn.bv_val = NULL;
-#ifdef BDB_HIER
- free( ei->bei_rdn.bv_val );
- ei->bei_rdn.bv_val = NULL;
- ei->bei_modrdns = 0;
- ei->bei_ckids = 0;
- ei->bei_dkids = 0;
-#endif
- ei->bei_parent = NULL;
- ei->bei_kids = NULL;
- ei->bei_lruprev = NULL;
-
- ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
- ei->bei_lrunext = cache->c_eifree;
- cache->c_eifree = ei;
- ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
+ bdb_cache_entryinfo_free( cache, ei );
bdb_cache_entryinfo_unlock( ei );
}
if ( rc == 0 ){
/* lru */
- if ( e == cache->c_lruhead ) cache->c_lruhead = e->bei_lrunext;
- if ( e == cache->c_lrutail ) {
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
- if ( e == cache->c_lrutail ) cache->c_lrutail = e->bei_lruprev;
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
- }
-
- if ( e->bei_lrunext ) e->bei_lrunext->bei_lruprev = e->bei_lruprev;
- if ( e->bei_lruprev ) e->bei_lruprev->bei_lrunext = e->bei_lrunext;
+ LRU_DEL( cache, e );
if ( e->bei_e ) {
ldap_pvt_thread_mutex_lock( &cache->c_count_mutex );
/* set cache write lock */
ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
cache->c_dntree.bei_kids = NULL;
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
/* free cache write lock */
ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
}
{
EntryInfo *e;
- fprintf( stderr, "LRU queue (head to tail):\n" );
- for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
- fprintf( stderr, "\trdn \"%20s\" id %ld\n",
- e->bei_nrdn.bv_val, e->bei_id );
+ fprintf( stderr, "LRU circle head: %p\n", cache->c_lruhead );
+ fprintf( stderr, "LRU circle (tail forward):\n" );
+ for ( e = cache->c_lrutail; ; ) {
+ fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
+ e, e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
+ e = e->bei_lrunext;
+ if ( e == cache->c_lrutail )
+ break;
}
- fprintf( stderr, "LRU queue (tail to head):\n" );
- for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
- fprintf( stderr, "\trdn \"%20s\" id %ld\n",
- e->bei_nrdn.bv_val, e->bei_id );
+ fprintf( stderr, "LRU circle (tail backward):\n" );
+ for ( e = cache->c_lrutail; ; ) {
+ fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
+ e, e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
+ e = e->bei_lruprev;
+ if ( e == cache->c_lrutail )
+ break;
}
}
#endif
}
}
+/* free up any keys used by the main thread */
+void
+bdb_locker_flush( DB_ENV *env )
+{
+ void *data;
+ void *ctx = ldap_pvt_thread_pool_context();
+
+ if ( !ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
+ ldap_pvt_thread_pool_setkey( ctx, env, NULL, NULL );
+ bdb_locker_id_free( env, data );
+ }
+}
+
int
bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
{