#ifdef BDB_HIER
#define bdb_cache_lru_purge hdb_cache_lru_purge
#endif
-static void bdb_cache_lru_purge( struct bdb_info *bdb, uint32_t locker );
+static void bdb_cache_lru_purge( struct bdb_info *bdb );
static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
#ifdef LDAP_DEBUG
}
ei->bei_state = CACHE_ENTRY_REFERENCED;
+ ei->bei_finders = 0;
return ei;
}
bdb_cache_lru_link( Cache *cache, EntryInfo *ei )
{
/* Insert into circular LRU list */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
ei->bei_lruprev = cache->c_lrutail;
if ( cache->c_lrutail ) {
ei->bei_lrunext = cache->c_lrutail->bei_lrunext;
cache->c_lruhead = ei;
}
cache->c_lrutail = ei;
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
}
#ifdef NO_THREADS
#endif
static void
-bdb_cache_lru_purge( struct bdb_info *bdb, uint32_t locker )
+bdb_cache_lru_purge( struct bdb_info *bdb )
{
- DB_LOCK lock;
+ DB_LOCK lock, *lockp;
EntryInfo *elru, *elnext;
- int count, islocked;
+ int i, count, islocked, tests;
/* Don't bother if we can't get the lock */
- if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.lru_head_mutex ) )
+ if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.c_lru_mutex ) )
return;
if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize ) {
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
return;
}
+ if ( bdb->bi_cache.c_locker ) {
+ lockp = &lock;
+ } else {
+ lockp = NULL;
+ }
+
count = 0;
+
+ /* Give up after two loops around the circle */
+ tests = bdb->bi_cache.c_cursize * 2;
+
/* Look for an unused entry to remove */
- for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
+ for ( i = 0, elru = bdb->bi_cache.c_lruhead; i < tests;
+ i++, elru = elnext ) {
elnext = elru->bei_lrunext;
if ( ldap_pvt_thread_mutex_trylock( &elru->bei_kids_mutex ))
*/
if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) ||
- !elru->bei_e ) {
+ elru->bei_finders > 0 || !elru->bei_e ) {
bdb_cache_entryinfo_unlock( elru );
continue;
}
/* If we can successfully writelock it, then
* the object is idle.
*/
- if ( bdb_cache_entry_db_lock( bdb, locker, elru, 1, 1, &lock ) == 0 ) {
+ if ( bdb_cache_entry_db_lock( bdb,
+ bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
/* Free entry for this node if it's present */
if ( elru->bei_e ) {
elru->bei_e = NULL;
count++;
}
- bdb_cache_entry_db_unlock( bdb, &lock );
+ bdb_cache_entry_db_unlock( bdb, lockp );
/* ITS#4010 if we're in slapcat, and this node is a leaf
* node, free it.
}
bdb->bi_cache.c_lruhead = elnext;
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
}
EntryInfo *
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
} else {
+ (*eip)->bei_finders++;
/* Make sure only one thread tries to load the entry */
load1:
#ifdef SLAP_ZONE_ALLOC
load = 1;
(*eip)->bei_state |= CACHE_ENTRY_LOADING;
}
+
if ( islocked ) {
bdb_cache_entryinfo_unlock( *eip );
islocked = 0;
/* Otherwise, release the lock. */
bdb_cache_entry_db_unlock( bdb, lock );
}
- bdb_cache_entryinfo_lock( *eip );
- (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
- bdb_cache_entryinfo_unlock( *eip );
} else if ( !(*eip)->bei_e ) {
/* Some other thread is trying to load the entry,
* wait for it to finish.
}
#endif
}
-
+ bdb_cache_entryinfo_lock( *eip );
+ (*eip)->bei_finders--;
+ if ( load )
+ (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
+ bdb_cache_entryinfo_unlock( *eip );
}
}
}
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
}
if ( purge )
- bdb_cache_lru_purge( bdb, locker );
+ bdb_cache_lru_purge( bdb );
}
#ifdef SLAP_ZONE_ALLOC
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
if ( purge )
- bdb_cache_lru_purge( bdb, locker );
+ bdb_cache_lru_purge( bdb );
return rc;
}
e->e_id, 0, 0 );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
/* Leave entry info locked */
if ( rc == 0 ){
/* lru */
if ( e == cache->c_lruhead ) cache->c_lruhead = e->bei_lrunext;
- if ( e == cache->c_lrutail ) {
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
- if ( e == cache->c_lrutail ) cache->c_lrutail = e->bei_lruprev;
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
- }
+ if ( e == cache->c_lrutail ) cache->c_lrutail = e->bei_lruprev;
if ( e->bei_lrunext ) e->bei_lrunext->bei_lruprev = e->bei_lruprev;
if ( e->bei_lruprev ) e->bei_lruprev->bei_lrunext = e->bei_lrunext;
/* set cache write lock */
ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
/* set lru mutex */
- ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
cache->c_dntree.bei_kids = NULL;
/* free lru mutex */
- ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
+ ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
/* free cache write lock */
ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
}
#ifdef BDB_HIER
ldap_pvt_thread_mutex_init( &bdb->bi_modrdns_mutex );
#endif
- ldap_pvt_thread_mutex_init( &bdb->bi_cache.lru_head_mutex );
- ldap_pvt_thread_mutex_init( &bdb->bi_cache.lru_tail_mutex );
+ ldap_pvt_thread_mutex_init( &bdb->bi_cache.c_lru_mutex );
ldap_pvt_thread_mutex_init( &bdb->bi_cache.c_count_mutex );
ldap_pvt_thread_mutex_init( &bdb->bi_cache.c_eifree_mutex );
ldap_pvt_thread_mutex_init( &bdb->bi_cache.c_dntree.bei_kids_mutex );
goto fail;
}
+ if ( !quick ) {
+ XLOCK_ID(bdb->bi_dbenv, &bdb->bi_cache.c_locker);
+ }
+
/* monitor setup */
rc = bdb_monitor_db_open( be );
if ( rc != 0 ) {
/* close db environment */
if( bdb->bi_dbenv ) {
+ /* Free cache locker if we enabled locking */
+ if ( !( slapMode & SLAP_TOOL_QUICK )) {
+ XLOCK_ID_FREE(bdb->bi_dbenv, bdb->bi_cache.c_locker);
+ bdb->bi_cache.c_locker = 0;
+ }
+
/* force a checkpoint, but not if we were ReadOnly,
* and not in Quick mode since there are no transactions there.
*/
bdb_attr_index_destroy( bdb );
ldap_pvt_thread_rdwr_destroy ( &bdb->bi_cache.c_rwlock );
- ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.lru_head_mutex );
- ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.lru_tail_mutex );
+ ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.c_lru_mutex );
ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.c_count_mutex );
ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.c_eifree_mutex );
ldap_pvt_thread_mutex_destroy( &bdb->bi_cache.c_dntree.bei_kids_mutex );