static void
 bdb_cache_lru_purge( struct bdb_info *bdb )
 {
+       DB_LOCK         lock, *lockp;
        EntryInfo *elru, *elnext;
        int count, islocked;
 
                return;
        }
 
+       if ( bdb->bi_cache.c_locker ) {
+               lockp = &lock;
+       } else {
+               lockp = NULL;
+       }
+
        count = 0;
        /* Look for an unused entry to remove */
        for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
                        continue;
                }
 
+               /* entryinfo is locked */
                islocked = 1;
 
-               /* Free entry for this node if it's present */
-               if ( elru->bei_e ) {
-                       elru->bei_e->e_private = NULL;
+               /* If we can successfully writelock it, then
+                * the object is idle.
+                */
+               if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
+                       bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
+
+                       /* Free entry for this node if it's present */
+                       if ( elru->bei_e ) {
+                               elru->bei_e->e_private = NULL;
 #ifdef SLAP_ZONE_ALLOC
-                       bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
+                               bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
 #else
-                       bdb_entry_return( elru->bei_e );
+                               bdb_entry_return( elru->bei_e );
 #endif
-                       elru->bei_e = NULL;
-                       count++;
-               }
-               /* ITS#4010 if we're in slapcat, and this node is a leaf
-                * node, free it.
-                *
-                * FIXME: we need to do this for slapd as well, (which is
-                * why we compute bi_cache.c_leaves now) but at the moment
-                * we can't because it causes unresolvable deadlocks. 
-                */
-               if ( slapMode & SLAP_TOOL_READONLY ) {
-                       if ( !elru->bei_kids ) {
-                               bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
-                               bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
-                               islocked = 0;
+                               elru->bei_e = NULL;
+                               count++;
+                       }
+                       bdb_cache_entry_dbunlock( bdb, lockp );
+
+                       /* ITS#4010 if we're in slapcat, and this node is a leaf
+                        * node, free it.
+                        *
+                        * FIXME: we need to do this for slapd as well, (which is
+                        * why we compute bi_cache.c_leaves now) but at the moment
+                        * we can't because it causes unresolvable deadlocks. 
+                        */
+                       if ( slapMode & SLAP_TOOL_READONLY ) {
+                               if ( !elru->bei_kids ) {
+                                       bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
+                                       bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
+                                       islocked = 0;
+                               }
+                               /* Leave node on LRU list for a future pass */
                        }
-                       /* Leave node on LRU list for a future pass */
                }
 
                if ( islocked )
 
                goto fail;
        }
 
+       if ( !quick ) {
+               XLOCK_ID(bdb->bi_dbenv, &bdb->bi_cache.c_locker);
+       }
+
        /* monitor setup */
        rc = bdb_monitor_db_open( be );
        if ( rc != 0 ) {
 
        /* close db environment */
        if( bdb->bi_dbenv ) {
+               /* Free cache locker if we enabled locking */
+               if ( !( slapMode & SLAP_TOOL_QUICK )) {
+                       XLOCK_ID_FREE(bdb->bi_dbenv, bdb->bi_cache.c_locker);
+                       bdb->bi_cache.c_locker = 0;
+               }
+
                /* force a checkpoint, but not if we were ReadOnly,
                 * and not in Quick mode since there are no transactions there.
                 */