]> git.sur5r.net Git - openldap/commitdiff
partial revert - keep acquiring BDB lock in lru_purge.
authorHoward Chu <hyc@openldap.org>
Mon, 1 Jan 2007 01:00:19 +0000 (01:00 +0000)
committerHoward Chu <hyc@openldap.org>
Mon, 1 Jan 2007 01:00:19 +0000 (01:00 +0000)
servers/slapd/back-bdb/back-bdb.h
servers/slapd/back-bdb/cache.c
servers/slapd/back-bdb/init.c

index ae25890a90f1d096740b0af0a367ac70237cb852..f25f247da107b8be5fd097837956a3fcd3c5ba8e 100644 (file)
@@ -127,9 +127,10 @@ typedef struct bdb_cache {
        int             c_minfree;
        int             c_eiused;       /* EntryInfo's in use */
        int             c_leaves;       /* EntryInfo leaf nodes */
+       u_int32_t       c_locker;       /* used by lru cleaner */
        EntryInfo       c_dntree;
        EntryInfo       *c_eifree;      /* free list */
-       Avlnode         *c_idtree;
+       Avlnode         *c_idtree;
        EntryInfo       *c_lruhead;     /* lru - add accessed entries here */
        EntryInfo       *c_lrutail;     /* lru - rem lru entries from here */
        ldap_pvt_thread_rdwr_t c_rwlock;
index 160de89ae19e97ce7b86889bf43011c540c1c3ef..77b816b1e85a229f497f4206fac92ed5a3779c41 100644 (file)
@@ -556,6 +556,7 @@ int hdb_cache_load(
 static void
 bdb_cache_lru_purge( struct bdb_info *bdb )
 {
+       DB_LOCK         lock, *lockp;
        EntryInfo *elru, *elnext;
        int count, islocked;
 
@@ -568,6 +569,12 @@ bdb_cache_lru_purge( struct bdb_info *bdb )
                return;
        }
 
+       if ( bdb->bi_cache.c_locker ) {
+               lockp = &lock;
+       } else {
+               lockp = NULL;
+       }
+
        count = 0;
        /* Look for an unused entry to remove */
        for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
@@ -592,33 +599,43 @@ bdb_cache_lru_purge( struct bdb_info *bdb )
                        continue;
                }
 
+               /* entryinfo is locked */
                islocked = 1;
 
-               /* Free entry for this node if it's present */
-               if ( elru->bei_e ) {
-                       elru->bei_e->e_private = NULL;
+               /* If we can successfully writelock it, then
+                * the object is idle.
+                */
+               if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
+                       bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
+
+                       /* Free entry for this node if it's present */
+                       if ( elru->bei_e ) {
+                               elru->bei_e->e_private = NULL;
 #ifdef SLAP_ZONE_ALLOC
-                       bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
+                               bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
 #else
-                       bdb_entry_return( elru->bei_e );
+                               bdb_entry_return( elru->bei_e );
 #endif
-                       elru->bei_e = NULL;
-                       count++;
-               }
-               /* ITS#4010 if we're in slapcat, and this node is a leaf
-                * node, free it.
-                *
-                * FIXME: we need to do this for slapd as well, (which is
-                * why we compute bi_cache.c_leaves now) but at the moment
-                * we can't because it causes unresolvable deadlocks. 
-                */
-               if ( slapMode & SLAP_TOOL_READONLY ) {
-                       if ( !elru->bei_kids ) {
-                               bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
-                               bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
-                               islocked = 0;
+                               elru->bei_e = NULL;
+                               count++;
+                       }
+                       bdb_cache_entry_dbunlock( bdb, lockp );
+
+                       /* ITS#4010 if we're in slapcat, and this node is a leaf
+                        * node, free it.
+                        *
+                        * FIXME: we need to do this for slapd as well, (which is
+                        * why we compute bi_cache.c_leaves now) but at the moment
+                        * we can't because it causes unresolvable deadlocks. 
+                        */
+                       if ( slapMode & SLAP_TOOL_READONLY ) {
+                               if ( !elru->bei_kids ) {
+                                       bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
+                                       bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
+                                       islocked = 0;
+                               }
+                               /* Leave node on LRU list for a future pass */
                        }
-                       /* Leave node on LRU list for a future pass */
                }
 
                if ( islocked )
index 194c833cffc004fce064f3879f51a82dd0326de0..624f157e77d0a45081c9c5911ec60d33517c1d23 100644 (file)
@@ -423,6 +423,10 @@ bdb_db_open( BackendDB *be )
                goto fail;
        }
 
+       if ( !quick ) {
+               XLOCK_ID(bdb->bi_dbenv, &bdb->bi_cache.c_locker);
+       }
+
        /* monitor setup */
        rc = bdb_monitor_db_open( be );
        if ( rc != 0 ) {
@@ -486,6 +490,12 @@ bdb_db_close( BackendDB *be )
 
        /* close db environment */
        if( bdb->bi_dbenv ) {
+               /* Free cache locker if we enabled locking */
+               if ( !( slapMode & SLAP_TOOL_QUICK )) {
+                       XLOCK_ID_FREE(bdb->bi_dbenv, bdb->bi_cache.c_locker);
+                       bdb->bi_cache.c_locker = 0;
+               }
+
                /* force a checkpoint, but not if we were ReadOnly,
                 * and not in Quick mode since there are no transactions there.
                 */