1 /* cache.c - routines to maintain an in-core cache of entries */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2009 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
22 #include <ac/string.h>
23 #include <ac/socket.h>
32 #define bdb_cache_lru_purge hdb_cache_lru_purge
34 static void bdb_cache_lru_purge( struct bdb_info *bdb );
36 static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
40 static void bdb_lru_print(Cache *cache);
44 /* For concurrency experiments only! */
46 #define ldap_pvt_thread_rdwr_wlock(a) 0
47 #define ldap_pvt_thread_rdwr_wunlock(a) 0
48 #define ldap_pvt_thread_rdwr_rlock(a) 0
49 #define ldap_pvt_thread_rdwr_runlock(a) 0
53 #define ldap_pvt_thread_mutex_trylock(a) 0
57 bdb_cache_entryinfo_new( Cache *cache )
61 if ( cache->c_eifree ) {
62 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
63 if ( cache->c_eifree ) {
65 cache->c_eifree = ei->bei_lrunext;
67 ei->bei_lrunext = NULL;
69 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
72 ei = ch_calloc(1, sizeof(EntryInfo));
73 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
76 ei->bei_state = CACHE_ENTRY_REFERENCED;
82 bdb_cache_entryinfo_free( Cache *cache, EntryInfo *ei )
84 free( ei->bei_nrdn.bv_val );
85 BER_BVZERO( &ei->bei_nrdn );
87 free( ei->bei_rdn.bv_val );
88 BER_BVZERO( &ei->bei_rdn );
93 ei->bei_parent = NULL;
95 ei->bei_lruprev = NULL;
98 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
99 ei->bei_lrunext = cache->c_eifree;
100 cache->c_eifree = ei;
101 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
107 #define LRU_DEL( c, e ) do { \
108 if ( e == (c)->c_lruhead ) (c)->c_lruhead = e->bei_lruprev; \
109 if ( e == (c)->c_lrutail ) (c)->c_lrutail = e->bei_lruprev; \
110 e->bei_lrunext->bei_lruprev = e->bei_lruprev; \
111 e->bei_lruprev->bei_lrunext = e->bei_lrunext; \
112 e->bei_lruprev = NULL; \
115 /* Note - we now use a Second-Chance / Clock algorithm instead of
116 * Least-Recently-Used. This tremendously improves concurrency
117 * because we no longer need to manipulate the lists every time an
118 * entry is touched. We only need to lock the lists when adding
119 * or deleting an entry. It's now a circular doubly-linked list.
120 * We always append to the tail, but the head traverses the circle
121 * during a purge operation.
124 bdb_cache_lru_link( struct bdb_info *bdb, EntryInfo *ei )
127 /* Already linked, ignore */
128 if ( ei->bei_lruprev )
131 /* Insert into circular LRU list */
132 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
134 ei->bei_lruprev = bdb->bi_cache.c_lrutail;
135 if ( bdb->bi_cache.c_lrutail ) {
136 ei->bei_lrunext = bdb->bi_cache.c_lrutail->bei_lrunext;
137 bdb->bi_cache.c_lrutail->bei_lrunext = ei;
138 if ( ei->bei_lrunext )
139 ei->bei_lrunext->bei_lruprev = ei;
141 ei->bei_lrunext = ei->bei_lruprev = ei;
142 bdb->bi_cache.c_lruhead = ei;
144 bdb->bi_cache.c_lrutail = ei;
145 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
152 /* #define NO_DB_LOCK 1 */
153 /* Note: The BerkeleyDB locks are much slower than regular
154 * mutexes or rdwr locks. But the BDB implementation has the
155 * advantage of using a fixed size lock table, instead of
156 * allocating a lock object per entry in the DB. That's a
157 * key benefit for scaling. It also frees us from worrying
158 * about undetectable deadlocks between BDB activity and our
159 * own cache activity. It's still worth exploring faster
160 * alternatives though.
163 /* Atomically release and reacquire a lock */
165 bdb_cache_entry_db_relock(
166 struct bdb_info *bdb,
180 if ( !lock ) return 0;
182 lockobj.data = &ei->bei_id;
183 lockobj.size = sizeof(ei->bei_id) + 1;
185 list[0].op = DB_LOCK_PUT;
186 list[0].lock = *lock;
187 list[1].op = DB_LOCK_GET;
188 list[1].lock = *lock;
189 list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
190 list[1].obj = &lockobj;
191 rc = bdb->bi_dbenv->lock_vec(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
194 if (rc && !tryOnly) {
195 Debug( LDAP_DEBUG_TRACE,
196 "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
197 ei->bei_id, rw, rc );
199 *lock = list[1].lock;
206 bdb_cache_entry_db_lock( struct bdb_info *bdb, DB_TXN *txn, EntryInfo *ei,
207 int rw, int tryOnly, DB_LOCK *lock )
216 if ( !lock ) return 0;
219 db_rw = DB_LOCK_WRITE;
221 db_rw = DB_LOCK_READ;
223 lockobj.data = &ei->bei_id;
224 lockobj.size = sizeof(ei->bei_id) + 1;
226 rc = LOCK_GET(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
227 &lockobj, db_rw, lock);
228 if (rc && !tryOnly) {
229 Debug( LDAP_DEBUG_TRACE,
230 "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
231 ei->bei_id, rw, rc );
234 #endif /* NO_DB_LOCK */
238 bdb_cache_entry_db_unlock ( struct bdb_info *bdb, DB_LOCK *lock )
245 if ( !lock || lock->mode == DB_LOCK_NG ) return 0;
247 rc = LOCK_PUT ( bdb->bi_dbenv, lock );
253 bdb_cache_return_entry_rw( struct bdb_info *bdb, Entry *e,
254 int rw, DB_LOCK *lock )
261 ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) &&
262 ( bdb_cache_entryinfo_trylock( ei ) == 0 )) {
263 if ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) {
264 /* Releasing the entry can only be done when
265 * we know that nobody else is using it, i.e we
266 * should have an entry_db writelock. But the
267 * flag is only set by the thread that loads the
268 * entry, and only if no other threads has found
269 * it while it was working. All other threads
270 * clear the flag, which mean that we should be
271 * the only thread using the entry if the flag
275 ei->bei_state ^= CACHE_ENTRY_NOT_CACHED;
278 bdb_cache_entryinfo_unlock( ei );
280 bdb_cache_entry_db_unlock( bdb, lock );
283 bdb_entry_return( e );
288 bdb_cache_entryinfo_destroy( EntryInfo *e )
290 ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
291 free( e->bei_nrdn.bv_val );
293 free( e->bei_rdn.bv_val );
299 /* Do a length-ordered sort on normalized RDNs */
301 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
303 const EntryInfo *e1 = v_e1, *e2 = v_e2;
304 int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
306 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
307 e1->bei_nrdn.bv_len );
313 bdb_id_cmp( const void *v_e1, const void *v_e2 )
315 const EntryInfo *e1 = v_e1, *e2 = v_e2;
316 return e1->bei_id - e2->bei_id;
320 bdb_id_dup_err( void *v1, void *v2 )
323 e2->bei_lrunext = v1;
327 /* Create an entryinfo in the cache. Caller must release the locks later.
330 bdb_entryinfo_add_internal(
331 struct bdb_info *bdb,
335 EntryInfo *ei2 = NULL;
339 ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
341 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
342 bdb_cache_entryinfo_lock( ei->bei_parent );
344 ei2->bei_id = ei->bei_id;
345 ei2->bei_parent = ei->bei_parent;
347 ei2->bei_rdn = ei->bei_rdn;
349 #ifdef SLAP_ZONE_ALLOC
353 /* Add to cache ID tree */
354 if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp,
356 EntryInfo *eix = ei2->bei_lrunext;
357 bdb_cache_entryinfo_free( &bdb->bi_cache, ei2 );
360 /* It got freed above because its value was
363 ei->bei_rdn.bv_val = NULL;
368 bdb->bi_cache.c_eiused++;
369 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
371 /* This is a new leaf node. But if parent had no kids, then it was
372 * a leaf and we would be decrementing that. So, only increment if
373 * the parent already has kids.
375 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
376 bdb->bi_cache.c_leaves++;
377 rc = avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
380 /* it's possible for hdb_cache_find_parent to beat us to it */
382 ei->bei_parent->bei_ckids++;
391 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
392 * the info for its closest ancestor. *res should be NULL to process a
393 * complete DN starting from the tree root. Otherwise *res must be the
394 * immediate parent of the requested DN, and only the RDN will be searched.
395 * The EntryInfo is locked upon return and must be unlocked by the caller.
404 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
405 EntryInfo ei, *eip, *ei2;
409 /* this function is always called with normalized DN */
411 /* we're doing a onelevel search for an RDN */
412 ei.bei_nrdn.bv_val = ndn->bv_val;
413 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
416 /* we're searching a full DN from the root */
417 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
418 ei.bei_nrdn.bv_val = ptr;
419 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
420 /* Skip to next rdn if suffix is empty */
421 if ( ei.bei_nrdn.bv_len == 0 ) {
422 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
423 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
424 if ( ptr >= ndn->bv_val ) {
425 if (DN_SEPARATOR(*ptr)) ptr++;
426 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
427 ei.bei_nrdn.bv_val = ptr;
430 eip = &bdb->bi_cache.c_dntree;
433 for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
434 eip->bei_state |= CACHE_ENTRY_REFERENCED;
436 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
437 if ( ei2 && ( ei2->bei_state & CACHE_ENTRY_PURGED ))
441 int len = ei.bei_nrdn.bv_len;
443 if ( BER_BVISEMPTY( ndn )) {
448 ei.bei_nrdn.bv_len = ndn->bv_len -
449 (ei.bei_nrdn.bv_val - ndn->bv_val);
451 bdb_cache_entryinfo_unlock( eip );
453 BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Reading %s",
454 ei.bei_nrdn.bv_val );
456 lock.mode = DB_LOCK_NG;
457 rc = bdb_dn2id( op, &ei.bei_nrdn, &ei, txn, &lock );
459 bdb_cache_entryinfo_lock( eip );
461 bdb_cache_entry_db_unlock( bdb, &lock );
466 BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Read got %s(%d)",
467 ei.bei_nrdn.bv_val, ei.bei_id );
469 /* DN exists but needs to be added to cache */
470 ei.bei_nrdn.bv_len = len;
471 rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
472 /* add_internal left eip and c_rwlock locked */
474 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
475 bdb_cache_entry_db_unlock( bdb, &lock );
480 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
481 /* In the midst of deleting? Give it a chance to
484 bdb_cache_entryinfo_unlock( eip );
485 ldap_pvt_thread_yield();
486 bdb_cache_entryinfo_lock( eip );
490 bdb_cache_entryinfo_lock( ei2 );
491 bdb_cache_entryinfo_unlock( eip );
495 /* Advance to next lower RDN */
496 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
497 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
498 if ( ptr >= ndn->bv_val ) {
499 if (DN_SEPARATOR(*ptr)) ptr++;
500 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
501 ei.bei_nrdn.bv_val = ptr;
503 if ( ptr < ndn->bv_val ) {
513 /* Walk up the tree from a child node, looking for an ID that's already
514 * been linked into the cache.
517 hdb_cache_find_parent(
523 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
524 EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
532 rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id );
535 /* Save the previous node, if any */
538 /* Create a new node for the current ID */
539 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
540 ein->bei_id = ei.bei_id;
541 ein->bei_kids = ei.bei_kids;
542 ein->bei_nrdn = ei.bei_nrdn;
543 ein->bei_rdn = ei.bei_rdn;
544 ein->bei_ckids = ei.bei_ckids;
545 #ifdef SLAP_ZONE_ALLOC
550 /* This node is not fully connected yet */
551 ein->bei_state |= CACHE_ENTRY_NOT_LINKED;
553 /* If this is the first time, save this node
554 * to be returned later.
556 if ( eir == NULL ) eir = ein;
559 /* Insert this node into the ID tree */
560 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
561 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
562 bdb_id_cmp, bdb_id_dup_err ) ) {
563 EntryInfo *eix = ein->bei_lrunext;
565 if ( eix->bei_state & CACHE_ENTRY_PURGED ) {
566 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
567 ldap_pvt_thread_yield();
571 /* Someone else created this node just before us.
572 * Free our new copy and use the existing one.
574 bdb_cache_entryinfo_free( &bdb->bi_cache, ein );
576 /* if it was the node we were looking for, just return it */
580 bdb_cache_entryinfo_lock( eix );
581 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
588 /* otherwise, link up what we have and return */
592 /* If there was a previous node, link it to this one */
593 if ( ei2 ) ei2->bei_parent = ein;
595 /* Look for this node's parent */
597 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
598 (caddr_t) &eip, bdb_id_cmp );
600 ei2 = &bdb->bi_cache.c_dntree;
602 bdb->bi_cache.c_eiused++;
603 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
604 bdb->bi_cache.c_leaves++;
607 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
609 /* Got the parent, link in and we're done. */
611 bdb_cache_entryinfo_lock( ei2 );
612 bdb_cache_entryinfo_lock( eir );
613 ein->bei_parent = ei2;
615 if ( avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
616 avl_dup_error) == 0 )
619 /* Reset all the state info */
620 for (ein = eir; ein != ei2; ein=ein->bei_parent)
621 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
623 bdb_cache_entryinfo_unlock( ei2 );
629 ei.bei_id = eip.bei_id;
631 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
637 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
641 struct bdb_info *bdb,
648 /* See if we already have this one */
649 bdb_cache_entryinfo_lock( ei->bei_parent );
650 ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
651 bdb_cache_entryinfo_unlock( ei->bei_parent );
654 /* Not found, add it */
657 /* bei_rdn was not malloc'd before, do it now */
658 ber_dupbv( &bv, &ei->bei_rdn );
661 rc = bdb_entryinfo_add_internal( bdb, ei, res );
662 bdb_cache_entryinfo_unlock( ei->bei_parent );
663 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
665 /* Found, return it */
673 /* This is best-effort only. If all entries in the cache are
674 * busy, they will all be kept. This is unlikely to happen
675 * unless the cache is very much smaller than the working set.
678 bdb_cache_lru_purge( struct bdb_info *bdb )
680 DB_LOCK lock, *lockp;
681 EntryInfo *elru, *elnext = NULL;
682 int count, islocked, eimax;
683 int efree = 0, eifree = 0, eicount, ecount;
688 /* Wait for the mutex; we're the only one trying to purge. */
689 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
691 /* maximum number of EntryInfo leaves to cache. In slapcat
692 * we always free all leaf nodes.
694 if ( slapMode & SLAP_TOOL_READONLY )
697 eimax = bdb->bi_cache.c_eimax;
699 efree = bdb->bi_cache.c_cursize - bdb->bi_cache.c_maxsize;
703 efree += bdb->bi_cache.c_minfree;
705 if ( bdb->bi_cache.c_leaves > eimax ) {
706 eifree = bdb->bi_cache.c_minfree * 10;
707 if ( eifree >= eimax )
711 if ( !efree && !eifree ) {
712 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
713 bdb->bi_cache.c_purging = 0;
717 if ( bdb->bi_cache.c_txn ) {
730 /* Look for an unused entry to remove */
731 for ( elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
732 elnext = elru->bei_lrunext;
734 if ( bdb_cache_entryinfo_trylock( elru ))
737 /* This flag implements the clock replacement behavior */
738 if ( elru->bei_state & ( CACHE_ENTRY_REFERENCED )) {
739 elru->bei_state &= ~CACHE_ENTRY_REFERENCED;
740 bdb_cache_entryinfo_unlock( elru );
744 /* If this node is in the process of linking into the cache,
745 * or this node is being deleted, skip it.
747 if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
748 CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING |
749 CACHE_ENTRY_ONELEVEL )) ||
750 elru->bei_finders > 0 ) {
751 bdb_cache_entryinfo_unlock( elru );
755 if ( bdb_cache_entryinfo_trylock( elru->bei_parent )) {
756 bdb_cache_entryinfo_unlock( elru );
760 /* entryinfo is locked */
763 /* If we can successfully writelock it, then
764 * the object is idle.
766 if ( bdb_cache_entry_db_lock( bdb,
767 bdb->bi_cache.c_txn, elru, 1, 1, lockp ) == 0 ) {
769 /* Free entry for this node if it's present */
773 /* the cache may have gone over the limit while we
774 * weren't looking, so double check.
776 if ( !efree && ecount > bdb->bi_cache.c_maxsize )
777 efree = bdb->bi_cache.c_minfree;
779 if ( count < efree ) {
780 elru->bei_e->e_private = NULL;
781 #ifdef SLAP_ZONE_ALLOC
782 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
784 bdb_entry_return( elru->bei_e );
789 /* Keep this node cached, skip to next */
790 bdb_cache_entry_db_unlock( bdb, lockp );
794 bdb_cache_entry_db_unlock( bdb, lockp );
797 * If it is a leaf node, and we're over the limit, free it.
799 if ( elru->bei_kids ) {
800 /* Drop from list, we ignore it... */
801 LRU_DEL( &bdb->bi_cache, elru );
802 } else if ( eicount < eifree ) {
803 /* Too many leaf nodes, free this one */
804 bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
805 bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
808 } /* Leave on list until we need to free it */
813 bdb_cache_entryinfo_unlock( elru );
814 bdb_cache_entryinfo_unlock( elru->bei_parent );
817 if ( count >= efree && eicount >= eifree ) {
818 if ( count || ecount > bdb->bi_cache.c_cursize ) {
819 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
820 /* HACK: we seem to be losing track, fix up now */
821 if ( ecount > bdb->bi_cache.c_cursize )
822 bdb->bi_cache.c_cursize = ecount;
823 bdb->bi_cache.c_cursize -= count;
824 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
829 if ( elnext == bdb->bi_cache.c_lruhead )
836 bdb->bi_cache.c_lruhead = elnext;
837 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
838 bdb->bi_cache.c_purging = 0;
843 struct bdb_info *bdb,
846 EntryInfo ei = { 0 },
851 ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
852 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
853 (caddr_t) &ei, bdb_id_cmp );
855 if ( ei2->bei_state & CACHE_ENTRY_PURGED ) {
858 bdb_cache_entryinfo_lock( ei2 );
860 bdb_cache_entryinfo_unlock( ei2 );
863 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
868 * cache_find_id - find an entry in the cache, given id.
869 * The entry is locked for Read upon return. Call with flag ID_LOCKED if
870 * the supplied *eip was already locked.
882 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
884 int rc = 0, load = 0;
885 EntryInfo ei = { 0 };
889 #ifdef SLAP_ZONE_ALLOC
890 slap_zh_rlock(bdb->bi_cache.c_zctx);
892 /* If we weren't given any info, see if we have it already cached */
894 again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
895 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
896 (caddr_t) &ei, bdb_id_cmp );
898 if ( (*eip)->bei_state & CACHE_ENTRY_PURGED ) {
900 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
903 /* If the lock attempt fails, the info is in use */
904 if ( bdb_cache_entryinfo_trylock( *eip )) {
905 int del = (*eip)->bei_state & CACHE_ENTRY_DELETED;
906 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
907 /* If this node is being deleted, treat
908 * as if the delete has already finished
913 /* otherwise, wait for the info to free up */
914 ldap_pvt_thread_yield();
917 /* If this info isn't hooked up to its parent yet,
918 * unlock and wait for it to be fully initialized
920 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
921 bdb_cache_entryinfo_unlock( *eip );
922 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
923 ldap_pvt_thread_yield();
928 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
931 /* See if the ID exists in the database; add it to the cache if so */
935 rc = bdb_id2entry( op->o_bd, tid, id, &ep );
937 rc = bdb_cache_find_ndn( op, tid,
939 if ( *eip ) flag |= ID_LOCKED;
941 ep->e_private = NULL;
942 #ifdef SLAP_ZONE_ALLOC
943 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
945 bdb_entry_return( ep );
951 rc = hdb_cache_find_parent(op, tid, id, eip );
952 if ( rc == 0 ) flag |= ID_LOCKED;
956 /* Ok, we found the info, do we have the entry? */
958 if ( !( flag & ID_LOCKED )) {
959 bdb_cache_entryinfo_lock( *eip );
961 if ( (*eip)->bei_state & CACHE_ENTRY_PURGED ) {
962 bdb_cache_entryinfo_unlock( *eip );
968 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
971 (*eip)->bei_finders++;
972 (*eip)->bei_state |= CACHE_ENTRY_REFERENCED;
973 /* Make sure only one thread tries to load the entry */
975 #ifdef SLAP_ZONE_ALLOC
976 if ((*eip)->bei_e && !slap_zn_validate(
977 bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
978 (*eip)->bei_e = NULL;
979 (*eip)->bei_zseq = 0;
982 if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
984 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
988 /* Clear the uncached state if we are not
989 * loading it, i.e it is already cached or
990 * another thread is currently loading it.
992 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_CACHED ) {
993 (*eip)->bei_state &= ~CACHE_ENTRY_NOT_CACHED;
998 if ( flag & ID_LOCKED ) {
999 bdb_cache_entryinfo_unlock( *eip );
1002 rc = bdb_cache_entry_db_lock( bdb, tid, *eip, load, 0, lock );
1003 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
1005 bdb_cache_entry_db_unlock( bdb, lock );
1006 } else if ( rc == 0 ) {
1009 rc = bdb_id2entry( op->o_bd, tid, id, &ep );
1012 ep->e_private = *eip;
1014 bdb_fix_dn( ep, 0 );
1017 #ifdef SLAP_ZONE_ALLOC
1018 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
1021 bdb_cache_lru_link( bdb, *eip );
1022 if (( flag & ID_NOCACHE ) &&
1023 ( bdb_cache_entryinfo_trylock( *eip ) == 0 )) {
1024 /* Set the cached state only if no other thread
1025 * found the info while we were loading the entry.
1027 if ( (*eip)->bei_finders == 1 )
1028 (*eip)->bei_state |= CACHE_ENTRY_NOT_CACHED;
1029 bdb_cache_entryinfo_unlock( *eip );
1033 /* If we succeeded, downgrade back to a readlock. */
1034 rc = bdb_cache_entry_db_relock( bdb, tid,
1037 /* Otherwise, release the lock. */
1038 bdb_cache_entry_db_unlock( bdb, lock );
1040 } else if ( !(*eip)->bei_e ) {
1041 /* Some other thread is trying to load the entry,
1042 * wait for it to finish.
1044 bdb_cache_entry_db_unlock( bdb, lock );
1045 bdb_cache_entryinfo_lock( *eip );
1050 /* Check for subtree renames
1052 rc = bdb_fix_dn( (*eip)->bei_e, 1 );
1054 bdb_cache_entry_db_relock( bdb,
1055 tid, *eip, 1, 0, lock );
1056 /* check again in case other modifier did it already */
1057 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
1058 rc = bdb_fix_dn( (*eip)->bei_e, 2 );
1059 bdb_cache_entry_db_relock( bdb,
1060 tid, *eip, 0, 0, lock );
1065 bdb_cache_entryinfo_lock( *eip );
1066 (*eip)->bei_finders--;
1068 (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
1069 bdb_cache_entryinfo_unlock( *eip );
1072 if ( flag & ID_LOCKED ) {
1073 bdb_cache_entryinfo_unlock( *eip );
1076 ep->e_private = NULL;
1077 #ifdef SLAP_ZONE_ALLOC
1078 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
1080 bdb_entry_return( ep );
1086 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ||
1087 bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax ) {
1088 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
1089 if ( !bdb->bi_cache.c_purging ) {
1090 if ( !( flag & ID_NOCACHE )) {
1091 bdb->bi_cache.c_cursize++;
1092 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
1094 bdb->bi_cache.c_purging = 1;
1096 } else if ( bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax ) {
1098 bdb->bi_cache.c_purging = 1;
1101 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
1104 bdb_cache_lru_purge( bdb );
1107 #ifdef SLAP_ZONE_ALLOC
1108 if (rc == 0 && (*eip)->bei_e) {
1109 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
1111 slap_zh_runlock(bdb->bi_cache.c_zctx);
1124 if ( BEI(e)->bei_kids ) {
1127 if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
1130 rc = bdb_dn2id_children( op, txn, e );
1131 if ( rc == DB_NOTFOUND ) {
1132 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1137 /* Update the cache after a successful database Add. */
1140 struct bdb_info *bdb,
1143 struct berval *nrdn,
1150 struct berval rdn = e->e_name;
1153 ei.bei_id = e->e_id;
1154 ei.bei_parent = eip;
1155 ei.bei_nrdn = *nrdn;
1158 /* Lock this entry so that bdb_add can run to completion.
1159 * It can only fail if BDB has run out of lock resources.
1161 rc = bdb_cache_entry_db_lock( bdb, txn, &ei, 0, 0, lock );
1163 bdb_cache_entryinfo_unlock( eip );
1168 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1169 char *ptr = ber_bvchr( &rdn, ',' );
1170 assert( ptr != NULL );
1171 rdn.bv_len = ptr - rdn.bv_val;
1173 ber_dupbv( &ei.bei_rdn, &rdn );
1174 if ( eip->bei_dkids ) eip->bei_dkids++;
1177 rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
1178 /* bdb_csn_commit can cause this when adding the database root entry */
1180 new->bei_e->e_private = NULL;
1181 #ifdef SLAP_ZONE_ALLOC
1182 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
1184 bdb_entry_return( new->bei_e );
1189 new->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1190 eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
1191 bdb_cache_entryinfo_unlock( eip );
1192 if (eip->bei_parent) {
1193 bdb_cache_entryinfo_lock( eip->bei_parent );
1194 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
1195 bdb_cache_entryinfo_unlock( eip->bei_parent );
1198 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
1199 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
1200 ++bdb->bi_cache.c_cursize;
1201 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
1202 !bdb->bi_cache.c_purging ) {
1204 bdb->bi_cache.c_purging = 1;
1206 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
1208 bdb_cache_lru_link( bdb, new );
1211 bdb_cache_lru_purge( bdb );
1218 struct bdb_info *bdb,
1220 Attribute *newAttrs,
1224 EntryInfo *ei = BEI(e);
1226 /* Get write lock on data */
1227 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1229 /* If we've done repeated mods on a cached entry, then e_attrs
1230 * is no longer contiguous with the entry, and must be freed.
1233 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1234 attrs_free( e->e_attrs );
1236 e->e_attrs = newAttrs;
1242 * Change the rdn in the entryinfo. Also move to a new parent if needed.
1246 struct bdb_info *bdb,
1248 struct berval *nrdn,
1254 EntryInfo *ei = BEI(e), *pei;
1260 /* Get write lock on data */
1261 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1262 if ( rc ) return rc;
1264 /* If we've done repeated mods on a cached entry, then e_attrs
1265 * is no longer contiguous with the entry, and must be freed.
1267 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1268 attrs_free( e->e_attrs );
1270 e->e_attrs = new->e_attrs;
1271 if( e->e_nname.bv_val < e->e_bv.bv_val ||
1272 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1274 ch_free(e->e_name.bv_val);
1275 ch_free(e->e_nname.bv_val);
1277 e->e_name = new->e_name;
1278 e->e_nname = new->e_nname;
1280 /* Lock the parent's kids AVL tree */
1281 pei = ei->bei_parent;
1282 bdb_cache_entryinfo_lock( pei );
1283 avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1284 free( ei->bei_nrdn.bv_val );
1285 ber_dupbv( &ei->bei_nrdn, nrdn );
1288 free( ei->bei_rdn.bv_val );
1291 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1292 char *ptr = ber_bvchr(&rdn, ',');
1293 assert( ptr != NULL );
1294 rdn.bv_len = ptr - rdn.bv_val;
1296 ber_dupbv( &ei->bei_rdn, &rdn );
1298 /* If new parent, decrement kid counts */
1301 if ( pei->bei_dkids ) {
1303 if ( pei->bei_dkids < 2 )
1304 pei->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1310 ein = ei->bei_parent;
1312 ei->bei_parent = ein;
1313 bdb_cache_entryinfo_unlock( pei );
1314 bdb_cache_entryinfo_lock( ein );
1316 /* new parent now has kids */
1317 if ( ein->bei_state & CACHE_ENTRY_NO_KIDS )
1318 ein->bei_state ^= CACHE_ENTRY_NO_KIDS;
1319 /* grandparent has grandkids */
1320 if ( ein->bei_parent )
1321 ein->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
1323 /* parent might now have grandkids */
1324 if ( ein->bei_state & CACHE_ENTRY_NO_GRANDKIDS &&
1325 !(ei->bei_state & CACHE_ENTRY_NO_KIDS))
1326 ein->bei_state ^= CACHE_ENTRY_NO_GRANDKIDS;
1329 if ( ein->bei_dkids ) ein->bei_dkids++;
1334 /* Record the generation number of this change */
1335 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1337 ei->bei_modrdns = bdb->bi_modrdns;
1338 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1341 avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1342 bdb_cache_entryinfo_unlock( ein );
1346 * cache_delete - delete the entry e from the cache.
1348 * returns: 0 e was deleted ok
1349 * 1 e was not in the cache
1350 * -1 something bad happened
1354 struct bdb_info *bdb,
1359 EntryInfo *ei = BEI(e);
1362 assert( e->e_private != NULL );
1364 /* Lock the entry's info */
1365 bdb_cache_entryinfo_lock( ei );
1367 /* Set this early, warn off any queriers */
1368 ei->bei_state |= CACHE_ENTRY_DELETED;
1370 bdb_cache_entryinfo_unlock( ei );
1372 /* Get write lock on the data */
1373 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1375 bdb_cache_entryinfo_lock( ei );
1376 /* couldn't lock, undo and give up */
1377 ei->bei_state ^= CACHE_ENTRY_DELETED;
1378 bdb_cache_entryinfo_unlock( ei );
1382 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1386 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
1388 bdb_cache_entryinfo_lock( ei->bei_parent );
1389 bdb_cache_entryinfo_lock( ei );
1390 rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
1391 bdb_cache_entryinfo_unlock( ei );
1393 /* free lru mutex */
1394 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
1400 bdb_cache_delete_cleanup(
1404 /* Enter with ei locked */
1406 /* already freed? */
1407 if ( !ei->bei_parent ) return;
1410 ei->bei_e->e_private = NULL;
1411 #ifdef SLAP_ZONE_ALLOC
1412 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1414 bdb_entry_return( ei->bei_e );
1419 bdb_cache_entryinfo_unlock( ei );
1420 bdb_cache_entryinfo_free( cache, ei );
1424 bdb_cache_delete_internal(
1429 int rc = 0; /* return code */
1432 /* already freed? */
1433 if ( !e->bei_parent ) {
1438 e->bei_state |= CACHE_ENTRY_PURGED;
1441 e->bei_parent->bei_ckids--;
1442 if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1445 if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1451 if ( e->bei_parent->bei_kids )
1454 bdb_cache_entryinfo_unlock( e->bei_parent );
1456 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1458 if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp )) {
1466 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1470 LRU_DEL( cache, e );
1473 ldap_pvt_thread_mutex_lock( &cache->c_count_mutex );
1475 ldap_pvt_thread_mutex_unlock( &cache->c_count_mutex );
1483 bdb_entryinfo_release( void *data )
1485 EntryInfo *ei = (EntryInfo *)data;
1486 if ( ei->bei_kids ) {
1487 avl_free( ei->bei_kids, NULL );
1490 ei->bei_e->e_private = NULL;
1491 #ifdef SLAP_ZONE_ALLOC
1492 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1494 bdb_entry_return( ei->bei_e );
1497 bdb_cache_entryinfo_destroy( ei );
1501 bdb_cache_release_all( Cache *cache )
1503 /* set cache write lock */
1504 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1506 ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
1508 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1510 avl_free( cache->c_dntree.bei_kids, NULL );
1511 avl_free( cache->c_idtree, bdb_entryinfo_release );
1512 for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1513 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1514 bdb_cache_entryinfo_destroy(cache->c_eifree);
1516 cache->c_cursize = 0;
1517 cache->c_eiused = 0;
1518 cache->c_leaves = 0;
1519 cache->c_idtree = NULL;
1520 cache->c_lruhead = NULL;
1521 cache->c_lrutail = NULL;
1522 cache->c_dntree.bei_kids = NULL;
1524 /* free lru mutex */
1525 ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
1526 /* free cache write lock */
1527 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1533 bdb_lru_print( Cache *cache )
1537 fprintf( stderr, "LRU circle head: %p\n", (void *) cache->c_lruhead );
1538 fprintf( stderr, "LRU circle (tail forward):\n" );
1539 for ( e = cache->c_lrutail; ; ) {
1540 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1541 (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1543 if ( e == cache->c_lrutail )
1546 fprintf( stderr, "LRU circle (tail backward):\n" );
1547 for ( e = cache->c_lrutail; ; ) {
1548 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1549 (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1551 if ( e == cache->c_lrutail )
1559 bdb_reader_free( void *key, void *data )
1561 /* DB_ENV *env = key; */
1564 if ( txn ) TXN_ABORT( txn );
1567 /* free up any keys used by the main thread */
1569 bdb_reader_flush( DB_ENV *env )
1572 void *ctx = ldap_pvt_thread_pool_context();
1574 if ( !ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1575 ldap_pvt_thread_pool_setkey( ctx, env, NULL, 0, NULL, NULL );
1576 bdb_reader_free( env, data );
1581 bdb_reader_get( Operation *op, DB_ENV *env, DB_TXN **txn )
1587 if ( !env || !txn ) return -1;
1589 /* If no op was provided, try to find the ctx anyway... */
1591 ctx = op->o_threadctx;
1593 ctx = ldap_pvt_thread_pool_context();
1596 /* Shouldn't happen unless we're single-threaded */
1602 if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1603 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1604 rc = TXN_BEGIN( env, NULL, txn, DB_READ_COMMITTED );
1605 if (rc) ldap_pvt_thread_yield();
1611 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1612 data, bdb_reader_free, NULL, NULL ) ) ) {
1614 Debug( LDAP_DEBUG_ANY, "bdb_reader_get: err %s(%d)\n",
1615 db_strerror(rc), rc, 0 );