1 /* cache.c - routines to maintain an in-core cache of entries */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2010 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
22 #include <ac/string.h>
23 #include <ac/socket.h>
32 #define bdb_cache_lru_purge hdb_cache_lru_purge
34 static void bdb_cache_lru_purge( struct bdb_info *bdb );
36 static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
40 static void bdb_lru_print(Cache *cache);
41 static void bdb_idtree_print(Cache *cache);
45 /* For concurrency experiments only! */
47 #define ldap_pvt_thread_rdwr_wlock(a) 0
48 #define ldap_pvt_thread_rdwr_wunlock(a) 0
49 #define ldap_pvt_thread_rdwr_rlock(a) 0
50 #define ldap_pvt_thread_rdwr_runlock(a) 0
54 #define ldap_pvt_thread_mutex_trylock(a) 0
58 bdb_cache_entryinfo_new( Cache *cache )
62 if ( cache->c_eifree ) {
63 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
64 if ( cache->c_eifree ) {
66 cache->c_eifree = ei->bei_lrunext;
68 ei->bei_lrunext = NULL;
70 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
73 ei = ch_calloc(1, sizeof(EntryInfo));
74 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
77 ei->bei_state = CACHE_ENTRY_REFERENCED;
83 bdb_cache_entryinfo_free( Cache *cache, EntryInfo *ei )
85 free( ei->bei_nrdn.bv_val );
86 BER_BVZERO( &ei->bei_nrdn );
88 free( ei->bei_rdn.bv_val );
89 BER_BVZERO( &ei->bei_rdn );
94 ei->bei_parent = NULL;
96 ei->bei_lruprev = NULL;
99 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
100 ei->bei_lrunext = cache->c_eifree;
101 cache->c_eifree = ei;
102 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
108 #define LRU_DEL( c, e ) do { \
109 if ( e == e->bei_lruprev ) { \
110 (c)->c_lruhead = (c)->c_lrutail = NULL; \
112 if ( e == (c)->c_lruhead ) (c)->c_lruhead = e->bei_lruprev; \
113 if ( e == (c)->c_lrutail ) (c)->c_lrutail = e->bei_lruprev; \
114 e->bei_lrunext->bei_lruprev = e->bei_lruprev; \
115 e->bei_lruprev->bei_lrunext = e->bei_lrunext; \
117 e->bei_lruprev = NULL; \
120 /* Note - we now use a Second-Chance / Clock algorithm instead of
121 * Least-Recently-Used. This tremendously improves concurrency
122 * because we no longer need to manipulate the lists every time an
123 * entry is touched. We only need to lock the lists when adding
124 * or deleting an entry. It's now a circular doubly-linked list.
125 * We always append to the tail, but the head traverses the circle
126 * during a purge operation.
129 bdb_cache_lru_link( struct bdb_info *bdb, EntryInfo *ei )
132 /* Already linked, ignore */
133 if ( ei->bei_lruprev )
136 /* Insert into circular LRU list */
137 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
139 ei->bei_lruprev = bdb->bi_cache.c_lrutail;
140 if ( bdb->bi_cache.c_lrutail ) {
141 ei->bei_lrunext = bdb->bi_cache.c_lrutail->bei_lrunext;
142 bdb->bi_cache.c_lrutail->bei_lrunext = ei;
143 if ( ei->bei_lrunext )
144 ei->bei_lrunext->bei_lruprev = ei;
146 ei->bei_lrunext = ei->bei_lruprev = ei;
147 bdb->bi_cache.c_lruhead = ei;
149 bdb->bi_cache.c_lrutail = ei;
150 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
157 /* #define NO_DB_LOCK 1 */
158 /* Note: The BerkeleyDB locks are much slower than regular
159 * mutexes or rdwr locks. But the BDB implementation has the
160 * advantage of using a fixed size lock table, instead of
161 * allocating a lock object per entry in the DB. That's a
162 * key benefit for scaling. It also frees us from worrying
163 * about undetectable deadlocks between BDB activity and our
164 * own cache activity. It's still worth exploring faster
165 * alternatives though.
168 /* Atomically release and reacquire a lock */
170 bdb_cache_entry_db_relock(
171 struct bdb_info *bdb,
185 if ( !lock ) return 0;
188 lockobj.data = &ei->bei_id;
189 lockobj.size = sizeof(ei->bei_id) + 1;
191 list[0].op = DB_LOCK_PUT;
192 list[0].lock = *lock;
193 list[1].op = DB_LOCK_GET;
194 list[1].lock = *lock;
195 list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
196 list[1].obj = &lockobj;
197 rc = bdb->bi_dbenv->lock_vec(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
200 if (rc && !tryOnly) {
201 Debug( LDAP_DEBUG_TRACE,
202 "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
203 ei->bei_id, rw, rc );
205 *lock = list[1].lock;
212 bdb_cache_entry_db_lock( struct bdb_info *bdb, DB_TXN *txn, EntryInfo *ei,
213 int rw, int tryOnly, DB_LOCK *lock )
222 if ( !lock ) return 0;
225 db_rw = DB_LOCK_WRITE;
227 db_rw = DB_LOCK_READ;
230 lockobj.data = &ei->bei_id;
231 lockobj.size = sizeof(ei->bei_id) + 1;
233 rc = LOCK_GET(bdb->bi_dbenv, TXN_ID(txn), tryOnly ? DB_LOCK_NOWAIT : 0,
234 &lockobj, db_rw, lock);
235 if (rc && !tryOnly) {
236 Debug( LDAP_DEBUG_TRACE,
237 "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
238 ei->bei_id, rw, rc );
241 #endif /* NO_DB_LOCK */
245 bdb_cache_entry_db_unlock ( struct bdb_info *bdb, DB_LOCK *lock )
252 if ( !lock || lock->mode == DB_LOCK_NG ) return 0;
254 rc = LOCK_PUT ( bdb->bi_dbenv, lock );
260 bdb_cache_return_entry_rw( struct bdb_info *bdb, Entry *e,
261 int rw, DB_LOCK *lock )
268 ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) &&
269 ( bdb_cache_entryinfo_trylock( ei ) == 0 )) {
270 if ( ei->bei_state & CACHE_ENTRY_NOT_CACHED ) {
271 /* Releasing the entry can only be done when
272 * we know that nobody else is using it, i.e we
273 * should have an entry_db writelock. But the
274 * flag is only set by the thread that loads the
275 * entry, and only if no other threads has found
276 * it while it was working. All other threads
277 * clear the flag, which mean that we should be
278 * the only thread using the entry if the flag
282 ei->bei_state ^= CACHE_ENTRY_NOT_CACHED;
285 bdb_cache_entryinfo_unlock( ei );
287 bdb_cache_entry_db_unlock( bdb, lock );
290 bdb_entry_return( e );
295 bdb_cache_entryinfo_destroy( EntryInfo *e )
297 ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
298 free( e->bei_nrdn.bv_val );
300 free( e->bei_rdn.bv_val );
306 /* Do a length-ordered sort on normalized RDNs */
308 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
310 const EntryInfo *e1 = v_e1, *e2 = v_e2;
311 int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
313 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
314 e1->bei_nrdn.bv_len );
320 bdb_id_cmp( const void *v_e1, const void *v_e2 )
322 const EntryInfo *e1 = v_e1, *e2 = v_e2;
323 return e1->bei_id - e2->bei_id;
327 bdb_id_dup_err( void *v1, void *v2 )
330 e2->bei_lrunext = v1;
334 /* Create an entryinfo in the cache. Caller must release the locks later.
337 bdb_entryinfo_add_internal(
338 struct bdb_info *bdb,
342 EntryInfo *ei2 = NULL;
346 ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
348 bdb_cache_entryinfo_lock( ei->bei_parent );
349 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
351 ei2->bei_id = ei->bei_id;
352 ei2->bei_parent = ei->bei_parent;
354 ei2->bei_rdn = ei->bei_rdn;
356 #ifdef SLAP_ZONE_ALLOC
360 /* Add to cache ID tree */
361 if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp,
363 EntryInfo *eix = ei2->bei_lrunext;
364 bdb_cache_entryinfo_free( &bdb->bi_cache, ei2 );
367 /* It got freed above because its value was
370 ei->bei_rdn.bv_val = NULL;
375 bdb->bi_cache.c_eiused++;
376 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
378 /* This is a new leaf node. But if parent had no kids, then it was
379 * a leaf and we would be decrementing that. So, only increment if
380 * the parent already has kids.
382 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
383 bdb->bi_cache.c_leaves++;
384 rc = avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
387 /* it's possible for hdb_cache_find_parent to beat us to it */
389 ei->bei_parent->bei_ckids++;
398 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
399 * the info for its closest ancestor. *res should be NULL to process a
400 * complete DN starting from the tree root. Otherwise *res must be the
401 * immediate parent of the requested DN, and only the RDN will be searched.
402 * The EntryInfo is locked upon return and must be unlocked by the caller.
411 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
412 EntryInfo ei, *eip, *ei2;
416 /* this function is always called with normalized DN */
418 /* we're doing a onelevel search for an RDN */
419 ei.bei_nrdn.bv_val = ndn->bv_val;
420 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
423 /* we're searching a full DN from the root */
424 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
425 ei.bei_nrdn.bv_val = ptr;
426 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
427 /* Skip to next rdn if suffix is empty */
428 if ( ei.bei_nrdn.bv_len == 0 ) {
429 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
430 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
431 if ( ptr >= ndn->bv_val ) {
432 if (DN_SEPARATOR(*ptr)) ptr++;
433 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
434 ei.bei_nrdn.bv_val = ptr;
437 eip = &bdb->bi_cache.c_dntree;
440 for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
441 eip->bei_state |= CACHE_ENTRY_REFERENCED;
443 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
446 int len = ei.bei_nrdn.bv_len;
448 if ( BER_BVISEMPTY( ndn )) {
453 ei.bei_nrdn.bv_len = ndn->bv_len -
454 (ei.bei_nrdn.bv_val - ndn->bv_val);
456 bdb_cache_entryinfo_unlock( eip );
458 BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Reading %s",
459 ei.bei_nrdn.bv_val );
461 lock.mode = DB_LOCK_NG;
462 rc = bdb_dn2id( op, &ei.bei_nrdn, &ei, txn, &lock );
464 bdb_cache_entryinfo_lock( eip );
466 bdb_cache_entry_db_unlock( bdb, &lock );
471 BDB_LOG_PRINTF( bdb->bi_dbenv, NULL, "slapd Read got %s(%d)",
472 ei.bei_nrdn.bv_val, ei.bei_id );
474 /* DN exists but needs to be added to cache */
475 ei.bei_nrdn.bv_len = len;
476 rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
477 /* add_internal left eip and c_rwlock locked */
479 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
480 bdb_cache_entry_db_unlock( bdb, &lock );
485 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
486 /* In the midst of deleting? Give it a chance to
489 bdb_cache_entryinfo_unlock( eip );
490 ldap_pvt_thread_yield();
491 bdb_cache_entryinfo_lock( eip );
495 bdb_cache_entryinfo_lock( ei2 );
496 bdb_cache_entryinfo_unlock( eip );
500 /* Advance to next lower RDN */
501 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
502 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
503 if ( ptr >= ndn->bv_val ) {
504 if (DN_SEPARATOR(*ptr)) ptr++;
505 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
506 ei.bei_nrdn.bv_val = ptr;
508 if ( ptr < ndn->bv_val ) {
518 /* Walk up the tree from a child node, looking for an ID that's already
519 * been linked into the cache.
522 hdb_cache_find_parent(
528 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
529 EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
537 rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id );
540 /* Save the previous node, if any */
543 /* Create a new node for the current ID */
544 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
545 ein->bei_id = ei.bei_id;
546 ein->bei_kids = ei.bei_kids;
547 ein->bei_nrdn = ei.bei_nrdn;
548 ein->bei_rdn = ei.bei_rdn;
549 ein->bei_ckids = ei.bei_ckids;
550 #ifdef SLAP_ZONE_ALLOC
556 /* This node is not fully connected yet */
557 ein->bei_state |= CACHE_ENTRY_NOT_LINKED;
559 /* If this is the first time, save this node
560 * to be returned later.
568 /* Insert this node into the ID tree */
569 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
570 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
571 bdb_id_cmp, bdb_id_dup_err ) ) {
572 EntryInfo *eix = ein->bei_lrunext;
574 if ( bdb_cache_entryinfo_trylock( eix )) {
575 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
576 ldap_pvt_thread_yield();
579 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
581 /* Someone else created this node just before us.
582 * Free our new copy and use the existing one.
584 bdb_cache_entryinfo_free( &bdb->bi_cache, ein );
586 /* if it was the node we were looking for, just return it */
597 /* otherwise, link up what we have and return */
601 /* If there was a previous node, link it to this one */
602 if ( ei2 ) ei2->bei_parent = ein;
604 /* Look for this node's parent */
607 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
608 (caddr_t) &eip, bdb_id_cmp );
610 ei2 = &bdb->bi_cache.c_dntree;
612 if ( ei2 && bdb_cache_entryinfo_trylock( ei2 )) {
613 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
614 ldap_pvt_thread_yield();
615 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
619 bdb->bi_cache.c_eiused++;
620 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
621 bdb->bi_cache.c_leaves++;
622 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
625 /* Got the parent, link in and we're done. */
627 bdb_cache_entryinfo_lock( eir );
628 ein->bei_parent = ei2;
630 if ( avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
631 avl_dup_error) == 0 )
634 /* Reset all the state info */
635 for (ein = eir; ein != ei2; ein=ein->bei_parent)
636 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
638 bdb_cache_entryinfo_unlock( ei2 );
645 ei.bei_id = eip.bei_id;
647 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
653 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
657 struct bdb_info *bdb,
664 /* See if we already have this one */
665 bdb_cache_entryinfo_lock( ei->bei_parent );
666 ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
667 bdb_cache_entryinfo_unlock( ei->bei_parent );
670 /* Not found, add it */
673 /* bei_rdn was not malloc'd before, do it now */
674 ber_dupbv( &bv, &ei->bei_rdn );
677 rc = bdb_entryinfo_add_internal( bdb, ei, res );
678 bdb_cache_entryinfo_unlock( ei->bei_parent );
679 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
681 /* Found, return it */
689 /* This is best-effort only. If all entries in the cache are
690 * busy, they will all be kept. This is unlikely to happen
691 * unless the cache is very much smaller than the working set.
694 bdb_cache_lru_purge( struct bdb_info *bdb )
696 DB_LOCK lock, *lockp;
697 EntryInfo *elru, *elnext = NULL;
700 ID count, efree, eifree = 0;
705 /* Wait for the mutex; we're the only one trying to purge. */
706 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
708 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
709 efree = bdb->bi_cache.c_cursize - bdb->bi_cache.c_maxsize;
710 efree += bdb->bi_cache.c_minfree;
715 /* maximum number of EntryInfo leaves to cache. In slapcat
716 * we always free all leaf nodes.
719 if ( slapMode & SLAP_TOOL_READONLY ) {
720 eifree = bdb->bi_cache.c_leaves;
721 } else if ( bdb->bi_cache.c_eimax &&
722 bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax ) {
723 eifree = bdb->bi_cache.c_minfree * 10;
724 if ( eifree >= bdb->bi_cache.c_leaves )
728 if ( !efree && !eifree ) {
729 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
730 bdb->bi_cache.c_purging = 0;
734 if ( bdb->bi_cache.c_txn ) {
747 /* Look for an unused entry to remove */
748 for ( elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
749 elnext = elru->bei_lrunext;
751 if ( bdb_cache_entryinfo_trylock( elru ))
754 /* This flag implements the clock replacement behavior */
755 if ( elru->bei_state & ( CACHE_ENTRY_REFERENCED )) {
756 elru->bei_state &= ~CACHE_ENTRY_REFERENCED;
757 bdb_cache_entryinfo_unlock( elru );
761 /* If this node is in the process of linking into the cache,
762 * or this node is being deleted, skip it.
764 if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
765 CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING |
766 CACHE_ENTRY_ONELEVEL )) ||
767 elru->bei_finders > 0 ) {
768 bdb_cache_entryinfo_unlock( elru );
772 if ( bdb_cache_entryinfo_trylock( elru->bei_parent )) {
773 bdb_cache_entryinfo_unlock( elru );
777 /* entryinfo is locked */
780 /* If we can successfully writelock it, then
781 * the object is idle.
783 if ( bdb_cache_entry_db_lock( bdb,
784 bdb->bi_cache.c_txn, elru, 1, 1, lockp ) == 0 ) {
786 /* Free entry for this node if it's present */
790 /* the cache may have gone over the limit while we
791 * weren't looking, so double check.
793 if ( !efree && ecount > bdb->bi_cache.c_maxsize )
794 efree = bdb->bi_cache.c_minfree;
796 if ( count < efree ) {
797 elru->bei_e->e_private = NULL;
798 #ifdef SLAP_ZONE_ALLOC
799 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
801 bdb_entry_return( elru->bei_e );
806 /* Keep this node cached, skip to next */
807 bdb_cache_entry_db_unlock( bdb, lockp );
811 bdb_cache_entry_db_unlock( bdb, lockp );
814 * If it is a leaf node, and we're over the limit, free it.
816 if ( elru->bei_kids ) {
817 /* Drop from list, we ignore it... */
818 LRU_DEL( &bdb->bi_cache, elru );
819 } else if ( eicount < eifree ) {
820 /* Too many leaf nodes, free this one */
821 bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
822 bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
825 } /* Leave on list until we need to free it */
830 bdb_cache_entryinfo_unlock( elru );
831 bdb_cache_entryinfo_unlock( elru->bei_parent );
834 if ( count >= efree && eicount >= eifree )
837 if ( elnext == bdb->bi_cache.c_lruhead )
844 if ( count || ecount > bdb->bi_cache.c_cursize ) {
845 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
846 /* HACK: we seem to be losing track, fix up now */
847 if ( ecount > bdb->bi_cache.c_cursize )
848 bdb->bi_cache.c_cursize = ecount;
849 bdb->bi_cache.c_cursize -= count;
850 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
852 bdb->bi_cache.c_lruhead = elnext;
853 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
854 bdb->bi_cache.c_purging = 0;
858 * cache_find_id - find an entry in the cache, given id.
859 * The entry is locked for Read upon return. Call with flag ID_LOCKED if
860 * the supplied *eip was already locked.
872 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
874 int rc = 0, load = 0;
875 EntryInfo ei = { 0 };
879 #ifdef SLAP_ZONE_ALLOC
880 slap_zh_rlock(bdb->bi_cache.c_zctx);
882 /* If we weren't given any info, see if we have it already cached */
884 again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
885 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
886 (caddr_t) &ei, bdb_id_cmp );
888 /* If the lock attempt fails, the info is in use */
889 if ( bdb_cache_entryinfo_trylock( *eip )) {
890 int del = (*eip)->bei_state & CACHE_ENTRY_DELETED;
891 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
892 /* If this node is being deleted, treat
893 * as if the delete has already finished
898 /* otherwise, wait for the info to free up */
899 ldap_pvt_thread_yield();
902 /* If this info isn't hooked up to its parent yet,
903 * unlock and wait for it to be fully initialized
905 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
906 bdb_cache_entryinfo_unlock( *eip );
907 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
908 ldap_pvt_thread_yield();
913 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
916 /* See if the ID exists in the database; add it to the cache if so */
919 rc = bdb_id2entry( op->o_bd, tid, id, &ep );
921 rc = bdb_cache_find_ndn( op, tid,
923 if ( *eip ) flag |= ID_LOCKED;
925 ep->e_private = NULL;
926 #ifdef SLAP_ZONE_ALLOC
927 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
929 bdb_entry_return( ep );
935 rc = hdb_cache_find_parent(op, tid, id, eip );
936 if ( rc == 0 ) flag |= ID_LOCKED;
940 /* Ok, we found the info, do we have the entry? */
942 if ( !( flag & ID_LOCKED )) {
943 bdb_cache_entryinfo_lock( *eip );
947 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
950 (*eip)->bei_finders++;
951 (*eip)->bei_state |= CACHE_ENTRY_REFERENCED;
952 if ( flag & ID_NOENTRY ) {
953 bdb_cache_entryinfo_unlock( *eip );
956 /* Make sure only one thread tries to load the entry */
958 #ifdef SLAP_ZONE_ALLOC
959 if ((*eip)->bei_e && !slap_zn_validate(
960 bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
961 (*eip)->bei_e = NULL;
962 (*eip)->bei_zseq = 0;
965 if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
967 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
971 /* Clear the uncached state if we are not
972 * loading it, i.e it is already cached or
973 * another thread is currently loading it.
975 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_CACHED ) {
976 (*eip)->bei_state &= ~CACHE_ENTRY_NOT_CACHED;
977 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
978 ++bdb->bi_cache.c_cursize;
979 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
984 if ( flag & ID_LOCKED ) {
985 bdb_cache_entryinfo_unlock( *eip );
988 rc = bdb_cache_entry_db_lock( bdb, tid, *eip, load, 0, lock );
989 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
991 bdb_cache_entry_db_unlock( bdb, lock );
992 bdb_cache_entryinfo_lock( *eip );
993 (*eip)->bei_finders--;
994 bdb_cache_entryinfo_unlock( *eip );
995 } else if ( rc == 0 ) {
998 rc = bdb_id2entry( op->o_bd, tid, id, &ep );
1001 ep->e_private = *eip;
1003 while ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED )
1004 ldap_pvt_thread_yield();
1005 bdb_fix_dn( ep, 0 );
1008 #ifdef SLAP_ZONE_ALLOC
1009 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
1012 bdb_cache_lru_link( bdb, *eip );
1013 if (( flag & ID_NOCACHE ) &&
1014 ( bdb_cache_entryinfo_trylock( *eip ) == 0 )) {
1015 /* Set the cached state only if no other thread
1016 * found the info while we were loading the entry.
1018 if ( (*eip)->bei_finders == 1 )
1019 (*eip)->bei_state |= CACHE_ENTRY_NOT_CACHED;
1020 bdb_cache_entryinfo_unlock( *eip );
1024 /* If we succeeded, downgrade back to a readlock. */
1025 rc = bdb_cache_entry_db_relock( bdb, tid,
1028 /* Otherwise, release the lock. */
1029 bdb_cache_entry_db_unlock( bdb, lock );
1031 } else if ( !(*eip)->bei_e ) {
1032 /* Some other thread is trying to load the entry,
1033 * wait for it to finish.
1035 bdb_cache_entry_db_unlock( bdb, lock );
1036 bdb_cache_entryinfo_lock( *eip );
1041 /* Check for subtree renames
1043 rc = bdb_fix_dn( (*eip)->bei_e, 1 );
1045 bdb_cache_entry_db_relock( bdb,
1046 tid, *eip, 1, 0, lock );
1047 /* check again in case other modifier did it already */
1048 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
1049 rc = bdb_fix_dn( (*eip)->bei_e, 2 );
1050 bdb_cache_entry_db_relock( bdb,
1051 tid, *eip, 0, 0, lock );
1055 bdb_cache_entryinfo_lock( *eip );
1056 (*eip)->bei_finders--;
1058 (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
1059 bdb_cache_entryinfo_unlock( *eip );
1063 if ( flag & ID_LOCKED ) {
1064 bdb_cache_entryinfo_unlock( *eip );
1067 ep->e_private = NULL;
1068 #ifdef SLAP_ZONE_ALLOC
1069 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
1071 bdb_entry_return( ep );
1077 if (( load && !( flag & ID_NOCACHE )) || bdb->bi_cache.c_eimax ) {
1078 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
1079 if ( load && !( flag & ID_NOCACHE )) {
1080 bdb->bi_cache.c_cursize++;
1081 if ( !bdb->bi_cache.c_purging && bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
1083 bdb->bi_cache.c_purging = 1;
1085 } else if ( !bdb->bi_cache.c_purging && bdb->bi_cache.c_eimax && bdb->bi_cache.c_leaves > bdb->bi_cache.c_eimax ) {
1087 bdb->bi_cache.c_purging = 1;
1089 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
1092 bdb_cache_lru_purge( bdb );
1095 #ifdef SLAP_ZONE_ALLOC
1096 if (rc == 0 && (*eip)->bei_e) {
1097 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
1099 slap_zh_runlock(bdb->bi_cache.c_zctx);
1112 if ( BEI(e)->bei_kids ) {
1115 if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
1118 rc = bdb_dn2id_children( op, txn, e );
1119 if ( rc == DB_NOTFOUND ) {
1120 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1125 /* Update the cache after a successful database Add. */
1128 struct bdb_info *bdb,
1131 struct berval *nrdn,
1138 struct berval rdn = e->e_name;
1141 ei.bei_id = e->e_id;
1142 ei.bei_parent = eip;
1143 ei.bei_nrdn = *nrdn;
1146 /* Lock this entry so that bdb_add can run to completion.
1147 * It can only fail if BDB has run out of lock resources.
1149 rc = bdb_cache_entry_db_lock( bdb, txn, &ei, 0, 0, lock );
1151 bdb_cache_entryinfo_unlock( eip );
1156 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1157 char *ptr = ber_bvchr( &rdn, ',' );
1158 assert( ptr != NULL );
1159 rdn.bv_len = ptr - rdn.bv_val;
1161 ber_dupbv( &ei.bei_rdn, &rdn );
1162 if ( eip->bei_dkids ) eip->bei_dkids++;
1165 if (eip->bei_parent) {
1166 bdb_cache_entryinfo_lock( eip->bei_parent );
1167 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
1168 bdb_cache_entryinfo_unlock( eip->bei_parent );
1171 rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
1172 /* bdb_csn_commit can cause this when adding the database root entry */
1174 new->bei_e->e_private = NULL;
1175 #ifdef SLAP_ZONE_ALLOC
1176 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
1178 bdb_entry_return( new->bei_e );
1183 new->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1184 eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
1185 bdb_cache_entryinfo_unlock( eip );
1187 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
1188 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
1189 ++bdb->bi_cache.c_cursize;
1190 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize &&
1191 !bdb->bi_cache.c_purging ) {
1193 bdb->bi_cache.c_purging = 1;
1195 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
1197 bdb_cache_lru_link( bdb, new );
1200 bdb_cache_lru_purge( bdb );
1207 struct bdb_info *bdb,
1209 Attribute *newAttrs,
1213 EntryInfo *ei = BEI(e);
1215 /* Get write lock on data */
1216 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1218 /* If we've done repeated mods on a cached entry, then e_attrs
1219 * is no longer contiguous with the entry, and must be freed.
1222 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1223 attrs_free( e->e_attrs );
1225 e->e_attrs = newAttrs;
1231 * Change the rdn in the entryinfo. Also move to a new parent if needed.
1235 struct bdb_info *bdb,
1237 struct berval *nrdn,
1243 EntryInfo *ei = BEI(e), *pei;
1249 /* Get write lock on data */
1250 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1251 if ( rc ) return rc;
1253 /* If we've done repeated mods on a cached entry, then e_attrs
1254 * is no longer contiguous with the entry, and must be freed.
1256 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1257 attrs_free( e->e_attrs );
1259 e->e_attrs = new->e_attrs;
1260 if( e->e_nname.bv_val < e->e_bv.bv_val ||
1261 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1263 ch_free(e->e_name.bv_val);
1264 ch_free(e->e_nname.bv_val);
1266 e->e_name = new->e_name;
1267 e->e_nname = new->e_nname;
1269 /* Lock the parent's kids AVL tree */
1270 pei = ei->bei_parent;
1271 bdb_cache_entryinfo_lock( pei );
1272 avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1273 free( ei->bei_nrdn.bv_val );
1274 ber_dupbv( &ei->bei_nrdn, nrdn );
1277 free( ei->bei_rdn.bv_val );
1280 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1281 char *ptr = ber_bvchr(&rdn, ',');
1282 assert( ptr != NULL );
1283 rdn.bv_len = ptr - rdn.bv_val;
1285 ber_dupbv( &ei->bei_rdn, &rdn );
1287 /* If new parent, decrement kid counts */
1290 if ( pei->bei_dkids ) {
1292 if ( pei->bei_dkids < 2 )
1293 pei->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1299 ein = ei->bei_parent;
1301 ei->bei_parent = ein;
1302 bdb_cache_entryinfo_unlock( pei );
1303 bdb_cache_entryinfo_lock( ein );
1305 /* new parent now has kids */
1306 if ( ein->bei_state & CACHE_ENTRY_NO_KIDS )
1307 ein->bei_state ^= CACHE_ENTRY_NO_KIDS;
1308 /* grandparent has grandkids */
1309 if ( ein->bei_parent )
1310 ein->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
1312 /* parent might now have grandkids */
1313 if ( ein->bei_state & CACHE_ENTRY_NO_GRANDKIDS &&
1314 !(ei->bei_state & CACHE_ENTRY_NO_KIDS))
1315 ein->bei_state ^= CACHE_ENTRY_NO_GRANDKIDS;
1318 if ( ein->bei_dkids ) ein->bei_dkids++;
1323 /* Record the generation number of this change */
1324 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1326 ei->bei_modrdns = bdb->bi_modrdns;
1327 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1330 avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1331 bdb_cache_entryinfo_unlock( ein );
1335 * cache_delete - delete the entry e from the cache.
1337 * returns: 0 e was deleted ok
1338 * 1 e was not in the cache
1339 * -1 something bad happened
1343 struct bdb_info *bdb,
1348 EntryInfo *ei = BEI(e);
1351 assert( e->e_private != NULL );
1353 /* Lock the entry's info */
1354 bdb_cache_entryinfo_lock( ei );
1356 /* Set this early, warn off any queriers */
1357 ei->bei_state |= CACHE_ENTRY_DELETED;
1359 if (( ei->bei_state & ( CACHE_ENTRY_NOT_LINKED |
1360 CACHE_ENTRY_LOADING | CACHE_ENTRY_ONELEVEL )) ||
1361 ei->bei_finders > 0 )
1364 bdb_cache_entryinfo_unlock( ei );
1367 ldap_pvt_thread_yield();
1369 bdb_cache_entryinfo_lock( ei );
1370 if (( ei->bei_state & ( CACHE_ENTRY_NOT_LINKED |
1371 CACHE_ENTRY_LOADING | CACHE_ENTRY_ONELEVEL )) ||
1372 ei->bei_finders > 0 )
1374 bdb_cache_entryinfo_unlock( ei );
1377 /* Get write lock on the data */
1378 rc = bdb_cache_entry_db_relock( bdb, txn, ei, 1, 0, lock );
1380 bdb_cache_entryinfo_lock( ei );
1381 /* couldn't lock, undo and give up */
1382 ei->bei_state ^= CACHE_ENTRY_DELETED;
1383 bdb_cache_entryinfo_unlock( ei );
1387 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1391 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_lru_mutex );
1393 bdb_cache_entryinfo_lock( ei->bei_parent );
1394 bdb_cache_entryinfo_lock( ei );
1395 rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
1396 bdb_cache_entryinfo_unlock( ei );
1398 /* free lru mutex */
1399 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_lru_mutex );
1405 bdb_cache_delete_cleanup(
1409 /* Enter with ei locked */
1411 /* already freed? */
1412 if ( !ei->bei_parent ) return;
1415 ei->bei_e->e_private = NULL;
1416 #ifdef SLAP_ZONE_ALLOC
1417 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1419 bdb_entry_return( ei->bei_e );
1424 bdb_cache_entryinfo_unlock( ei );
1425 bdb_cache_entryinfo_free( cache, ei );
1429 bdb_cache_delete_internal(
1434 int rc = 0; /* return code */
1437 /* already freed? */
1438 if ( !e->bei_parent ) {
1444 e->bei_parent->bei_ckids--;
1445 if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1448 if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1454 if ( e->bei_parent->bei_kids )
1457 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1459 if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp )) {
1467 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1468 bdb_cache_entryinfo_unlock( e->bei_parent );
1472 LRU_DEL( cache, e );
1475 ldap_pvt_thread_mutex_lock( &cache->c_count_mutex );
1477 ldap_pvt_thread_mutex_unlock( &cache->c_count_mutex );
1485 bdb_entryinfo_release( void *data )
1487 EntryInfo *ei = (EntryInfo *)data;
1488 if ( ei->bei_kids ) {
1489 avl_free( ei->bei_kids, NULL );
1492 ei->bei_e->e_private = NULL;
1493 #ifdef SLAP_ZONE_ALLOC
1494 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1496 bdb_entry_return( ei->bei_e );
1499 bdb_cache_entryinfo_destroy( ei );
1503 bdb_cache_release_all( Cache *cache )
1505 /* set cache write lock */
1506 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1508 ldap_pvt_thread_mutex_lock( &cache->c_lru_mutex );
1510 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1512 avl_free( cache->c_dntree.bei_kids, NULL );
1513 avl_free( cache->c_idtree, bdb_entryinfo_release );
1514 for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1515 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1516 bdb_cache_entryinfo_destroy(cache->c_eifree);
1518 cache->c_cursize = 0;
1519 cache->c_eiused = 0;
1520 cache->c_leaves = 0;
1521 cache->c_idtree = NULL;
1522 cache->c_lruhead = NULL;
1523 cache->c_lrutail = NULL;
1524 cache->c_dntree.bei_kids = NULL;
1526 /* free lru mutex */
1527 ldap_pvt_thread_mutex_unlock( &cache->c_lru_mutex );
1528 /* free cache write lock */
1529 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1535 bdb_lru_print( Cache *cache )
1539 fprintf( stderr, "LRU circle head: %p\n", (void *) cache->c_lruhead );
1540 fprintf( stderr, "LRU circle (tail forward):\n" );
1541 for ( e = cache->c_lrutail; ; ) {
1542 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1543 (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1545 if ( e == cache->c_lrutail )
1548 fprintf( stderr, "LRU circle (tail backward):\n" );
1549 for ( e = cache->c_lrutail; ; ) {
1550 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1551 (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1553 if ( e == cache->c_lrutail )
1559 bdb_entryinfo_print(void *data, void *arg)
1561 EntryInfo *e = data;
1562 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1563 (void *) e, (void *) e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1568 bdb_idtree_print(Cache *cache)
1570 avl_apply( cache->c_idtree, bdb_entryinfo_print, NULL, -1, AVL_INORDER );
1576 bdb_reader_free( void *key, void *data )
1578 /* DB_ENV *env = key; */
1581 if ( txn ) TXN_ABORT( txn );
1584 /* free up any keys used by the main thread */
1586 bdb_reader_flush( DB_ENV *env )
1589 void *ctx = ldap_pvt_thread_pool_context();
1591 if ( !ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1592 ldap_pvt_thread_pool_setkey( ctx, env, NULL, 0, NULL, NULL );
1593 bdb_reader_free( env, data );
1598 bdb_reader_get( Operation *op, DB_ENV *env, DB_TXN **txn )
1604 if ( !env || !txn ) return -1;
1606 /* If no op was provided, try to find the ctx anyway... */
1608 ctx = op->o_threadctx;
1610 ctx = ldap_pvt_thread_pool_context();
1613 /* Shouldn't happen unless we're single-threaded */
1619 if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1620 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1621 rc = TXN_BEGIN( env, NULL, txn, DB_READ_COMMITTED );
1622 if (rc) ldap_pvt_thread_yield();
1628 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1629 data, bdb_reader_free, NULL, NULL ) ) ) {
1631 Debug( LDAP_DEBUG_ANY, "bdb_reader_get: err %s(%d)\n",
1632 db_strerror(rc), rc, 0 );