#endif
}
+/* These should move behind SLAPD_UNUSED */
+static ldap_pvt_thread_mutex_t bdb_ncmutex;
+static int bdb_notcached;
+static int bdb_ncfreed;
+
void
bdb_cache_return_entry_rw( struct bdb_info *bdb, Entry *e,
int rw, DB_LOCK *lock )
int free = 0;
ei = e->e_private;
- if ( ei &&
- ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) &&
- ( bdb_cache_entryinfo_trylock( ei ) == 0 )) {
+ if ( ei && ( ei->bei_state & CACHE_ENTRY_NOT_CACHED )) {
+ ldap_pvt_thread_mutex_lock(&bdb_ncmutex);
+ bdb_notcached++;
+ bdb_cache_entryinfo_lock( ei );
if ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) {
/* Releasing the entry can only be done when
* we know that nobody else is using it, i.e we
ei->bei_e = NULL;
ei->bei_state ^= CACHE_ENTRY_NOT_CACHED;
free = 1;
+ bdb_ncfreed++;
}
bdb_cache_entryinfo_unlock( ei );
+ ldap_pvt_thread_mutex_unlock(&bdb_ncmutex);
}
bdb_cache_entry_db_unlock( bdb, lock );
if ( free ) {
if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
load = 1;
(*eip)->bei_state |= CACHE_ENTRY_LOADING;
- }
-
- if ( !load ) {
- /* Clear the uncached state if we are not
- * loading it, i.e it is already cached or
- * another thread is currently loading it.
- */
- if ( (*eip)->bei_state & CACHE_ENTRY_NOT_CACHED ) {
- (*eip)->bei_state &= ~CACHE_ENTRY_NOT_CACHED;
- ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
- ++bdb->bi_cache.c_cursize;
- ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
- }
- flag &= ~ID_NOCACHE;
+ flag |= ID_CHKPURGE;
}
if ( flag & ID_LOCKED ) {
ldap_pvt_thread_yield();
bdb_fix_dn( ep, 0 );
#endif
+ bdb_cache_entryinfo_lock( *eip );
+
(*eip)->bei_e = ep;
#ifdef SLAP_ZONE_ALLOC
(*eip)->bei_zseq = *((ber_len_t *)ep - 2);
#endif
ep = NULL;
- bdb_cache_lru_link( bdb, *eip );
- if (( flag & ID_NOCACHE ) &&
- ( bdb_cache_entryinfo_trylock( *eip ) == 0 )) {
+ if ( flag & ID_NOCACHE ) {
/* Set the cached state only if no other thread
* found the info while we were loading the entry.
*/
- if ( (*eip)->bei_finders == 1 )
+ if ( (*eip)->bei_finders == 1 ) {
(*eip)->bei_state |= CACHE_ENTRY_NOT_CACHED;
- bdb_cache_entryinfo_unlock( *eip );
+ flag ^= ID_CHKPURGE;
+ }
}
+ bdb_cache_entryinfo_unlock( *eip );
+ bdb_cache_lru_link( bdb, *eip );
}
if ( rc == 0 ) {
/* If we succeeded, downgrade back to a readlock. */
}
bdb_cache_entryinfo_lock( *eip );
(*eip)->bei_finders--;
- if ( load )
+ if ( load ) {
(*eip)->bei_state ^= CACHE_ENTRY_LOADING;
+ } else {
+ /* Clear the uncached state if we didn't
+ * load it, i.e it was already cached.
+ */
+ if ( (*eip)->bei_state & CACHE_ENTRY_NOT_CACHED ) {
+ (*eip)->bei_state ^= CACHE_ENTRY_NOT_CACHED;
+ flag |= ID_CHKPURGE;
+ }
+ }
bdb_cache_entryinfo_unlock( *eip );
}
}
if ( rc == 0 ) {
int purge = 0;
- if (( load && !( flag & ID_NOCACHE )) || bdb->bi_cache.c_eimax ) {
+ if (( flag & ID_CHKPURGE ) || bdb->bi_cache.c_eimax ) {
ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
- if ( load && !( flag & ID_NOCACHE )) {
+ if ( flag & ID_CHKPURGE ) {
bdb->bi_cache.c_cursize++;
if ( !bdb->bi_cache.c_purging && bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
purge = 1;
}
#ifdef LDAP_DEBUG
+static void
+bdb_lru_count( Cache *cache )
+{
+ EntryInfo *e;
+ int ei = 0, ent = 0, nc = 0;
+
+ for ( e = cache->c_lrutail; ; ) {
+ ei++;
+ if ( e->bei_e ) {
+ ent++;
+ if ( e->bei_state & CACHE_ENTRY_NOT_CACHED )
+ nc++;
+ fprintf( stderr, "ei %d entry %p dn %s\n", ei, e->bei_e, e->bei_e->e_name.bv_val );
+ }
+ e = e->bei_lrunext;
+ if ( e == cache->c_lrutail )
+ break;
+ }
+ fprintf( stderr, "counted %d entryInfos and %d entries, %d notcached\n",
+ ei, ent, nc );
+ ei = 0;
+ for ( e = cache->c_lrutail; ; ) {
+ ei++;
+ e = e->bei_lruprev;
+ if ( e == cache->c_lrutail )
+ break;
+ }
+ fprintf( stderr, "counted %d entryInfos (on lruprev)\n", ei );
+}
+
#ifdef SLAPD_UNUSED
static void
bdb_lru_print( Cache *cache )