+#ifdef BDB_HIER
+/* Walk up the tree from a child node, looking for an ID that's already
+ * been linked into the cache.
+ */
+static int
+hdb_cache_find_parent(
+ Operation *op,
+ DB_TXN *txn,
+ ID id,
+ EntryInfo **res )
+{
+ struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
+ EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
+ char ndn[SLAP_LDAPDN_MAXLEN];
+ ID parent;
+ int rc;
+ int addlru = 1;
+
+ ei.bei_id = id;
+ ei.bei_kids = NULL;
+
+ for (;;) {
+ rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id );
+ if ( rc ) break;
+
+ /* Save the previous node, if any */
+ ei2 = ein;
+
+ /* Create a new node for the current ID */
+ ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
+ ein->bei_id = ei.bei_id;
+ ein->bei_kids = ei.bei_kids;
+ ein->bei_nrdn = ei.bei_nrdn;
+ ein->bei_rdn = ei.bei_rdn;
+
+ /* This node is not fully connected yet */
+ ein->bei_state = CACHE_ENTRY_NOT_LINKED;
+
+ /* Insert this node into the ID tree */
+ ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
+ if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
+ bdb_id_cmp, avl_dup_error ) ) {
+
+ /* Someone else created this node just before us.
+ * Free our new copy and use the existing one.
+ */
+ bdb_cache_entryinfo_destroy( ein );
+ ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
+ (caddr_t) &ei, bdb_id_cmp );
+
+ /* Link in any kids we've already processed */
+ if ( ei2 ) {
+ bdb_cache_entryinfo_lock( ein );
+ avl_insert( &ein->bei_kids, (caddr_t)ei2,
+ bdb_rdn_cmp, avl_dup_error );
+ bdb_cache_entryinfo_unlock( ein );
+ }
+
+ if ( !eir ) {
+ addlru = 0;
+ }
+ }
+
+ /* If this is the first time, save this node
+ * to be returned later.
+ */
+ if ( eir == NULL ) eir = ein;
+
+ /* If there was a previous node, link it to this one */
+ if ( ei2 ) ei2->bei_parent = ein;
+
+ /* Look for this node's parent */
+ if ( eip.bei_id ) {
+ ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
+ (caddr_t) &eip, bdb_id_cmp );
+ } else {
+ ei2 = &bdb->bi_cache.c_dntree;
+ }
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+
+ /* Got the parent, link in and we're done. */
+ if ( ei2 ) {
+ bdb_cache_entryinfo_lock( ei2 );
+ ein->bei_parent = ei2;
+ avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
+ avl_dup_error);
+ bdb_cache_entryinfo_unlock( ei2 );
+ bdb_cache_entryinfo_lock( eir );
+
+ /* Reset all the state info */
+ for (ein = eir; ein != ei2; ein=ein->bei_parent)
+ ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
+ *res = eir;
+ break;
+ }
+ ei.bei_kids = NULL;
+ ei.bei_id = eip.bei_id;
+ avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
+ avl_dup_error );
+ }
+ return rc;
+}
+
+/* Used by hdb_dn2idl when loading the EntryInfo for all the children
+ * of a given node
+ */
+int hdb_cache_load(
+ struct bdb_info *bdb,
+ EntryInfo *ei,
+ EntryInfo **res )
+{
+ EntryInfo *ei2;
+ int rc;
+
+ /* See if we already have this one */
+ bdb_cache_entryinfo_lock( ei->bei_parent );
+ ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
+ bdb_cache_entryinfo_unlock( ei->bei_parent );
+
+ if ( !ei2 ) {
+ /* Not found, add it */
+ struct berval bv;
+
+ /* bei_rdn was not malloc'd before, do it now */
+ ber_dupbv( &bv, &ei->bei_rdn );
+ ei->bei_rdn = bv;
+
+ rc = bdb_entryinfo_add_internal( bdb, ei, res );
+ bdb_cache_entryinfo_unlock( ei->bei_parent );
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
+ } else {
+ /* Found, return it */
+ *res = ei2;
+ return 0;
+ }
+ return rc;
+}
+#endif
+
+/* caller must have lru_mutex locked. mutex
+ * will be unlocked on return.
+ */
+static void
+bdb_cache_lru_add(
+ struct bdb_info *bdb,
+ u_int32_t locker,
+ EntryInfo *ei )
+{
+ DB_LOCK lock, *lockp;
+
+ if ( locker ) {
+ lockp = &lock;
+ } else {
+ lockp = NULL;
+ }
+
+ /* See if we're above the cache size limit */
+ if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
+ EntryInfo *elru, *elprev;
+ int i = 0;
+
+ /* Look for an unused entry to remove */
+ for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev, i++ ) {
+ elprev = elru->bei_lruprev;
+
+ /* Too many probes, not enough idle, give up */
+ if (i > 10) break;
+
+ /* If we can successfully writelock it, then
+ * the object is idle.
+ */
+ if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, elru, 1, 1,
+ lockp ) == 0 ) {
+ /* If there's no entry, or this node is in
+ * the process of linking into the cache,
+ * skip it.
+ */
+ if ( !elru->bei_e || (elru->bei_state & CACHE_ENTRY_NOT_LINKED) ) {
+ bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
+ continue;
+ }
+ LRU_DELETE( &bdb->bi_cache, elru );
+ elru->bei_e->e_private = NULL;
+ bdb_entry_return( elru->bei_e );
+ elru->bei_e = NULL;
+ bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
+ --bdb->bi_cache.c_cursize;
+ if (bdb->bi_cache.c_cursize < bdb->bi_cache.c_maxsize)
+ break;
+ }
+ }
+ }
+ LRU_ADD( &bdb->bi_cache, ei );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_mutex );
+}
+
+EntryInfo *
+bdb_cache_find_info(
+ struct bdb_info *bdb,
+ ID id )
+{
+ EntryInfo ei, *ei2;
+
+ ei.bei_id = id;
+
+ ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
+ ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
+ (caddr_t) &ei, bdb_id_cmp );
+ ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
+ return ei2;
+}
+