1 /* cache.c - routines to maintain an in-core cache of entries */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2007 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
22 #include <ac/string.h>
23 #include <ac/socket.h>
32 #define bdb_cache_lru_purge hdb_cache_lru_purge
34 static void bdb_cache_lru_purge( struct bdb_info *bdb, uint32_t locker );
36 static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
39 static void bdb_lru_print(Cache *cache);
43 /* For concurrency experiments only! */
45 #define ldap_pvt_thread_rdwr_wlock(a) 0
46 #define ldap_pvt_thread_rdwr_wunlock(a) 0
47 #define ldap_pvt_thread_rdwr_rlock(a) 0
48 #define ldap_pvt_thread_rdwr_runlock(a) 0
52 #define ldap_pvt_thread_mutex_trylock(a) 0
56 bdb_cache_entryinfo_new( Cache *cache )
60 if ( cache->c_eifree ) {
61 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
62 if ( cache->c_eifree ) {
64 cache->c_eifree = ei->bei_lrunext;
66 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
69 ei = ch_calloc(1, sizeof(EntryInfo));
70 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
73 ei->bei_state = CACHE_ENTRY_REFERENCED;
78 /* Note - we now use a Second-Chance / Clock algorithm instead of
79 * Least-Recently-Used. This tremendously improves concurrency
80 * because we no longer need to manipulate the lists every time an
81 * entry is touched. We only need to lock the lists when adding
82 * or deleting an entry. It's now a circular doubly-linked list.
83 * We always append to the tail, but the head traverses the circle
84 * during a purge operation.
87 bdb_cache_lru_link( Cache *cache, EntryInfo *ei )
89 /* Insert into circular LRU list */
90 ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
91 ei->bei_lruprev = cache->c_lrutail;
92 if ( cache->c_lrutail ) {
93 ei->bei_lrunext = cache->c_lrutail->bei_lrunext;
94 cache->c_lrutail->bei_lrunext = ei;
95 if ( ei->bei_lrunext )
96 ei->bei_lrunext->bei_lruprev = ei;
98 ei->bei_lrunext = ei->bei_lruprev = ei;
99 cache->c_lruhead = ei;
101 cache->c_lrutail = ei;
102 ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
109 /* #define NO_DB_LOCK 1 */
110 /* Note: The BerkeleyDB locks are much slower than regular
111 * mutexes or rdwr locks. But the BDB implementation has the
112 * advantage of using a fixed size lock table, instead of
113 * allocating a lock object per entry in the DB. That's a
114 * key benefit for scaling. It also frees us from worrying
115 * about undetectable deadlocks between BDB activity and our
116 * own cache activity. It's still worth exploring faster
117 * alternatives though.
120 /* Atomically release and reacquire a lock */
122 bdb_cache_entry_db_relock(
123 struct bdb_info *bdb,
137 if ( !lock ) return 0;
139 lockobj.data = &ei->bei_id;
140 lockobj.size = sizeof(ei->bei_id) + 1;
142 list[0].op = DB_LOCK_PUT;
143 list[0].lock = *lock;
144 list[1].op = DB_LOCK_GET;
145 list[1].lock = *lock;
146 list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
147 list[1].obj = &lockobj;
148 rc = bdb->bi_dbenv->lock_vec(bdb->bi_dbenv, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
151 if (rc && !tryOnly) {
152 Debug( LDAP_DEBUG_TRACE,
153 "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
154 ei->bei_id, rw, rc );
156 *lock = list[1].lock;
163 bdb_cache_entry_db_lock( struct bdb_info *bdb, u_int32_t locker, EntryInfo *ei,
164 int rw, int tryOnly, DB_LOCK *lock )
173 if ( !lock ) return 0;
176 db_rw = DB_LOCK_WRITE;
178 db_rw = DB_LOCK_READ;
180 lockobj.data = &ei->bei_id;
181 lockobj.size = sizeof(ei->bei_id) + 1;
183 rc = LOCK_GET(bdb->bi_dbenv, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
184 &lockobj, db_rw, lock);
185 if (rc && !tryOnly) {
186 Debug( LDAP_DEBUG_TRACE,
187 "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
188 ei->bei_id, rw, rc );
191 #endif /* NO_DB_LOCK */
195 bdb_cache_entry_db_unlock ( struct bdb_info *bdb, DB_LOCK *lock )
202 if ( !lock || lock->mode == DB_LOCK_NG ) return 0;
204 rc = LOCK_PUT ( bdb->bi_dbenv, lock );
210 bdb_cache_entryinfo_destroy( EntryInfo *e )
212 ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
213 free( e->bei_nrdn.bv_val );
215 free( e->bei_rdn.bv_val );
221 /* Do a length-ordered sort on normalized RDNs */
223 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
225 const EntryInfo *e1 = v_e1, *e2 = v_e2;
226 int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
228 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
229 e1->bei_nrdn.bv_len );
235 bdb_id_cmp( const void *v_e1, const void *v_e2 )
237 const EntryInfo *e1 = v_e1, *e2 = v_e2;
238 return e1->bei_id - e2->bei_id;
241 /* Create an entryinfo in the cache. Caller must release the locks later.
244 bdb_entryinfo_add_internal(
245 struct bdb_info *bdb,
249 EntryInfo *ei2 = NULL;
253 ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
255 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
256 bdb_cache_entryinfo_lock( ei->bei_parent );
258 ei2->bei_id = ei->bei_id;
259 ei2->bei_parent = ei->bei_parent;
261 ei2->bei_rdn = ei->bei_rdn;
263 #ifdef SLAP_ZONE_ALLOC
267 /* Add to cache ID tree */
268 if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
270 eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
271 bdb_cache_entryinfo_destroy( ei2 );
274 /* It got freed above because its value was
277 ei->bei_rdn.bv_val = NULL;
280 bdb->bi_cache.c_eiused++;
281 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
283 /* This is a new leaf node. But if parent had no kids, then it was
284 * a leaf and we would be decrementing that. So, only increment if
285 * the parent already has kids.
287 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
288 bdb->bi_cache.c_leaves++;
289 avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
292 ei->bei_parent->bei_ckids++;
295 bdb_cache_lru_link( &bdb->bi_cache, ei2 );
301 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
302 * the info for its closest ancestor. *res should be NULL to process a
303 * complete DN starting from the tree root. Otherwise *res must be the
304 * immediate parent of the requested DN, and only the RDN will be searched.
305 * The EntryInfo is locked upon return and must be unlocked by the caller.
314 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
315 EntryInfo ei, *eip, *ei2;
319 /* this function is always called with normalized DN */
321 /* we're doing a onelevel search for an RDN */
322 ei.bei_nrdn.bv_val = ndn->bv_val;
323 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
326 /* we're searching a full DN from the root */
327 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
328 ei.bei_nrdn.bv_val = ptr;
329 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
330 /* Skip to next rdn if suffix is empty */
331 if ( ei.bei_nrdn.bv_len == 0 ) {
332 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
333 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
334 if ( ptr >= ndn->bv_val ) {
335 if (DN_SEPARATOR(*ptr)) ptr++;
336 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
337 ei.bei_nrdn.bv_val = ptr;
340 eip = &bdb->bi_cache.c_dntree;
343 for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
344 eip->bei_state |= CACHE_ENTRY_REFERENCED;
346 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
348 int len = ei.bei_nrdn.bv_len;
350 if ( BER_BVISEMPTY( ndn )) {
355 ei.bei_nrdn.bv_len = ndn->bv_len -
356 (ei.bei_nrdn.bv_val - ndn->bv_val);
357 bdb_cache_entryinfo_unlock( eip );
359 rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
361 bdb_cache_entryinfo_lock( eip );
366 /* DN exists but needs to be added to cache */
367 ei.bei_nrdn.bv_len = len;
368 rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
369 /* add_internal left eip and c_rwlock locked */
370 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
375 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
376 /* In the midst of deleting? Give it a chance to
379 bdb_cache_entryinfo_unlock( eip );
380 ldap_pvt_thread_yield();
381 bdb_cache_entryinfo_lock( eip );
385 bdb_cache_entryinfo_unlock( eip );
386 bdb_cache_entryinfo_lock( ei2 );
390 /* Advance to next lower RDN */
391 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
392 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
393 if ( ptr >= ndn->bv_val ) {
394 if (DN_SEPARATOR(*ptr)) ptr++;
395 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
396 ei.bei_nrdn.bv_val = ptr;
398 if ( ptr < ndn->bv_val ) {
408 /* Walk up the tree from a child node, looking for an ID that's already
409 * been linked into the cache.
412 hdb_cache_find_parent(
419 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
420 EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
428 rc = hdb_dn2id_parent( op, txn, locker, &ei, &eip.bei_id );
431 /* Save the previous node, if any */
434 /* Create a new node for the current ID */
435 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
436 ein->bei_id = ei.bei_id;
437 ein->bei_kids = ei.bei_kids;
438 ein->bei_nrdn = ei.bei_nrdn;
439 ein->bei_rdn = ei.bei_rdn;
440 ein->bei_ckids = ei.bei_ckids;
441 #ifdef SLAP_ZONE_ALLOC
446 /* This node is not fully connected yet */
447 ein->bei_state |= CACHE_ENTRY_NOT_LINKED;
449 /* Insert this node into the ID tree */
450 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
451 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
452 bdb_id_cmp, avl_dup_error ) ) {
454 /* Someone else created this node just before us.
455 * Free our new copy and use the existing one.
457 bdb_cache_entryinfo_destroy( ein );
458 ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
459 (caddr_t) &ei, bdb_id_cmp );
461 /* Link in any kids we've already processed */
463 bdb_cache_entryinfo_lock( ein );
464 avl_insert( &ein->bei_kids, (caddr_t)ei2,
465 bdb_rdn_cmp, avl_dup_error );
467 bdb_cache_entryinfo_unlock( ein );
470 bdb_cache_lru_link( &bdb->bi_cache, ein );
473 /* If this is the first time, save this node
474 * to be returned later.
476 if ( eir == NULL ) eir = ein;
478 /* If there was a previous node, link it to this one */
479 if ( ei2 ) ei2->bei_parent = ein;
481 /* Look for this node's parent */
483 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
484 (caddr_t) &eip, bdb_id_cmp );
486 ei2 = &bdb->bi_cache.c_dntree;
488 bdb->bi_cache.c_eiused++;
489 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
490 bdb->bi_cache.c_leaves++;
491 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
493 /* Got the parent, link in and we're done. */
495 bdb_cache_entryinfo_lock( ei2 );
496 ein->bei_parent = ei2;
498 avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
502 /* Reset all the state info */
503 for (ein = eir; ein != ei2; ein=ein->bei_parent)
504 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
506 bdb_cache_entryinfo_unlock( ei2 );
507 bdb_cache_entryinfo_lock( eir );
513 ei.bei_id = eip.bei_id;
515 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
521 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
525 struct bdb_info *bdb,
532 /* See if we already have this one */
533 bdb_cache_entryinfo_lock( ei->bei_parent );
534 ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
535 bdb_cache_entryinfo_unlock( ei->bei_parent );
538 /* Not found, add it */
541 /* bei_rdn was not malloc'd before, do it now */
542 ber_dupbv( &bv, &ei->bei_rdn );
545 rc = bdb_entryinfo_add_internal( bdb, ei, res );
546 bdb_cache_entryinfo_unlock( ei->bei_parent );
547 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
549 /* Found, return it */
558 bdb_cache_lru_purge( struct bdb_info *bdb, uint32_t locker )
561 EntryInfo *elru, *elnext;
564 /* Don't bother if we can't get the lock */
565 if ( ldap_pvt_thread_mutex_trylock( &bdb->bi_cache.lru_head_mutex ) )
568 if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize ) {
569 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
574 /* Look for an unused entry to remove */
575 for (elru = bdb->bi_cache.c_lruhead; elru; elru = elnext ) {
576 elnext = elru->bei_lrunext;
578 if ( ldap_pvt_thread_mutex_trylock( &elru->bei_kids_mutex ))
581 /* This flag implements the clock replacement behavior */
582 if ( elru->bei_state & ( CACHE_ENTRY_REFERENCED )) {
583 elru->bei_state &= ~CACHE_ENTRY_REFERENCED;
584 bdb_cache_entryinfo_unlock( elru );
588 /* If this node is in the process of linking into the cache,
589 * or this node is being deleted, skip it.
591 * Also, if this node has no entry attached, skip it, there's
592 * nothing to purge anyway.
594 if (( elru->bei_state & ( CACHE_ENTRY_NOT_LINKED |
595 CACHE_ENTRY_DELETED | CACHE_ENTRY_LOADING )) ||
597 bdb_cache_entryinfo_unlock( elru );
601 /* entryinfo is locked */
604 /* If we can successfully writelock it, then
605 * the object is idle.
607 if ( bdb_cache_entry_db_lock( bdb, locker, elru, 1, 1, &lock ) == 0 ) {
609 /* Free entry for this node if it's present */
611 elru->bei_e->e_private = NULL;
612 #ifdef SLAP_ZONE_ALLOC
613 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
615 bdb_entry_return( elru->bei_e );
620 bdb_cache_entry_db_unlock( bdb, &lock );
622 /* ITS#4010 if we're in slapcat, and this node is a leaf
625 * FIXME: we need to do this for slapd as well, (which is
626 * why we compute bi_cache.c_leaves now) but at the moment
627 * we can't because it causes unresolvable deadlocks.
629 if ( slapMode & SLAP_TOOL_READONLY ) {
630 if ( !elru->bei_kids ) {
631 bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
632 bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
635 /* Leave node on LRU list for a future pass */
640 bdb_cache_entryinfo_unlock( elru );
642 if ( count >= bdb->bi_cache.c_minfree ) {
643 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
644 bdb->bi_cache.c_cursize -= count;
645 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
650 bdb->bi_cache.c_lruhead = elnext;
651 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
656 struct bdb_info *bdb,
659 EntryInfo ei = { 0 },
664 ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
665 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
666 (caddr_t) &ei, bdb_id_cmp );
667 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
672 * cache_find_id - find an entry in the cache, given id.
673 * The entry is locked for Read upon return. Call with islocked TRUE if
674 * the supplied *eip was already locked.
687 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
689 int rc = 0, load = 0;
690 EntryInfo ei = { 0 };
694 #ifdef SLAP_ZONE_ALLOC
695 slap_zh_rlock(bdb->bi_cache.c_zctx);
697 /* If we weren't given any info, see if we have it already cached */
699 again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
700 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
701 (caddr_t) &ei, bdb_id_cmp );
703 /* If the lock attempt fails, the info is in use */
704 if ( ldap_pvt_thread_mutex_trylock(
705 &(*eip)->bei_kids_mutex )) {
706 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
707 /* If this node is being deleted, treat
708 * as if the delete has already finished
710 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
713 /* otherwise, wait for the info to free up */
714 ldap_pvt_thread_yield();
717 /* If this info isn't hooked up to its parent yet,
718 * unlock and wait for it to be fully initialized
720 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
721 bdb_cache_entryinfo_unlock( *eip );
722 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
723 ldap_pvt_thread_yield();
728 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
731 /* See if the ID exists in the database; add it to the cache if so */
734 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
736 rc = bdb_cache_find_ndn( op, tid,
738 if ( *eip ) islocked = 1;
740 ep->e_private = NULL;
741 #ifdef SLAP_ZONE_ALLOC
742 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
744 bdb_entry_return( ep );
750 rc = hdb_cache_find_parent(op, tid, locker, id, eip );
751 if ( rc == 0 ) islocked = 1;
755 /* Ok, we found the info, do we have the entry? */
757 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
760 /* Make sure only one thread tries to load the entry */
762 #ifdef SLAP_ZONE_ALLOC
763 if ((*eip)->bei_e && !slap_zn_validate(
764 bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
765 (*eip)->bei_e = NULL;
766 (*eip)->bei_zseq = 0;
769 if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
771 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
774 bdb_cache_entryinfo_unlock( *eip );
777 rc = bdb_cache_entry_db_lock( bdb, locker, *eip, load, 0, lock );
778 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
780 bdb_cache_entry_db_unlock( bdb, lock );
781 } else if ( rc == 0 ) {
784 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
787 ep->e_private = *eip;
792 #ifdef SLAP_ZONE_ALLOC
793 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
798 /* If we succeeded, downgrade back to a readlock. */
799 rc = bdb_cache_entry_db_relock( bdb, locker,
802 /* Otherwise, release the lock. */
803 bdb_cache_entry_db_unlock( bdb, lock );
805 bdb_cache_entryinfo_lock( *eip );
806 (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
807 bdb_cache_entryinfo_unlock( *eip );
808 } else if ( !(*eip)->bei_e ) {
809 /* Some other thread is trying to load the entry,
810 * wait for it to finish.
812 bdb_cache_entry_db_unlock( bdb, lock );
813 bdb_cache_entryinfo_lock( *eip );
818 /* Check for subtree renames
820 rc = bdb_fix_dn( (*eip)->bei_e, 1 );
822 bdb_cache_entry_db_relock( bdb,
823 locker, *eip, 1, 0, lock );
824 /* check again in case other modifier did it already */
825 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
826 rc = bdb_fix_dn( (*eip)->bei_e, 2 );
827 bdb_cache_entry_db_relock( bdb,
828 locker, *eip, 0, 0, lock );
837 bdb_cache_entryinfo_unlock( *eip );
840 ep->e_private = NULL;
841 #ifdef SLAP_ZONE_ALLOC
842 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
844 bdb_entry_return( ep );
851 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
852 bdb->bi_cache.c_cursize++;
853 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize )
855 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
858 bdb_cache_lru_purge( bdb, locker );
861 #ifdef SLAP_ZONE_ALLOC
862 if (rc == 0 && (*eip)->bei_e) {
863 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
865 slap_zh_runlock(bdb->bi_cache.c_zctx);
878 if ( BEI(e)->bei_kids ) {
881 if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
884 rc = bdb_dn2id_children( op, txn, e );
885 if ( rc == DB_NOTFOUND ) {
886 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
891 /* Update the cache after a successful database Add. */
894 struct bdb_info *bdb,
904 struct berval rdn = e->e_name;
912 /* Lock this entry so that bdb_add can run to completion.
913 * It can only fail if BDB has run out of lock resources.
915 rc = bdb_cache_entry_db_lock( bdb, locker, &ei, 0, 0, lock );
917 bdb_cache_entryinfo_unlock( eip );
922 if ( nrdn->bv_len != e->e_nname.bv_len ) {
923 char *ptr = ber_bvchr( &rdn, ',' );
924 assert( ptr != NULL );
925 rdn.bv_len = ptr - rdn.bv_val;
927 ber_dupbv( &ei.bei_rdn, &rdn );
928 if ( eip->bei_dkids ) eip->bei_dkids++;
931 rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
932 /* bdb_csn_commit can cause this when adding the database root entry */
934 new->bei_e->e_private = NULL;
935 #ifdef SLAP_ZONE_ALLOC
936 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
938 bdb_entry_return( new->bei_e );
943 new->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
944 eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
945 if (eip->bei_parent) {
946 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
948 bdb_cache_entryinfo_unlock( eip );
950 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
951 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.c_count_mutex );
952 ++bdb->bi_cache.c_cursize;
953 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize )
955 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.c_count_mutex );
958 bdb_cache_lru_purge( bdb, locker );
965 struct bdb_info *bdb,
971 EntryInfo *ei = BEI(e);
973 /* Get write lock on data */
974 rc = bdb_cache_entry_db_relock( bdb, locker, ei, 1, 0, lock );
976 /* If we've done repeated mods on a cached entry, then e_attrs
977 * is no longer contiguous with the entry, and must be freed.
980 if ( (void *)e->e_attrs != (void *)(e+1) ) {
981 attrs_free( e->e_attrs );
983 e->e_attrs = newAttrs;
989 * Change the rdn in the entryinfo. Also move to a new parent if needed.
993 struct bdb_info *bdb,
1001 EntryInfo *ei = BEI(e), *pei;
1007 /* Get write lock on data */
1008 rc = bdb_cache_entry_db_relock( bdb, locker, ei, 1, 0, lock );
1009 if ( rc ) return rc;
1011 /* If we've done repeated mods on a cached entry, then e_attrs
1012 * is no longer contiguous with the entry, and must be freed.
1014 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1015 attrs_free( e->e_attrs );
1017 e->e_attrs = new->e_attrs;
1018 if( e->e_nname.bv_val < e->e_bv.bv_val ||
1019 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1021 ch_free(e->e_name.bv_val);
1022 ch_free(e->e_nname.bv_val);
1024 e->e_name = new->e_name;
1025 e->e_nname = new->e_nname;
1027 /* Lock the parent's kids AVL tree */
1028 pei = ei->bei_parent;
1029 bdb_cache_entryinfo_lock( pei );
1030 avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1031 free( ei->bei_nrdn.bv_val );
1032 ber_dupbv( &ei->bei_nrdn, nrdn );
1034 if ( !pei->bei_kids )
1035 pei->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
1038 free( ei->bei_rdn.bv_val );
1041 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1042 char *ptr = ber_bvchr(&rdn, ',');
1043 assert( ptr != NULL );
1044 rdn.bv_len = ptr - rdn.bv_val;
1046 ber_dupbv( &ei->bei_rdn, &rdn );
1048 if ( pei->bei_dkids ) pei->bei_dkids--;
1052 ein = ei->bei_parent;
1054 ei->bei_parent = ein;
1055 bdb_cache_entryinfo_unlock( pei );
1056 bdb_cache_entryinfo_lock( ein );
1058 /* parent now has kids */
1059 if ( ein->bei_state & CACHE_ENTRY_NO_KIDS )
1060 ein->bei_state ^= CACHE_ENTRY_NO_KIDS;
1063 /* parent might now have grandkids */
1064 if ( ein->bei_state & CACHE_ENTRY_NO_GRANDKIDS &&
1065 !(ei->bei_state & (CACHE_ENTRY_NO_KIDS)))
1066 ein->bei_state ^= CACHE_ENTRY_NO_GRANDKIDS;
1069 /* Record the generation number of this change */
1070 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1072 ei->bei_modrdns = bdb->bi_modrdns;
1073 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1076 if ( ein->bei_dkids ) ein->bei_dkids++;
1078 avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1079 bdb_cache_entryinfo_unlock( ein );
1083 * cache_delete - delete the entry e from the cache.
1085 * returns: 0 e was deleted ok
1086 * 1 e was not in the cache
1087 * -1 something bad happened
1091 struct bdb_info *bdb,
1096 EntryInfo *ei = BEI(e);
1099 assert( e->e_private != NULL );
1101 /* Set this early, warn off any queriers */
1102 ei->bei_state |= CACHE_ENTRY_DELETED;
1104 /* Lock the entry's info */
1105 bdb_cache_entryinfo_lock( ei );
1107 /* Get write lock on the data */
1108 rc = bdb_cache_entry_db_relock( bdb, locker, ei, 1, 0, lock );
1110 /* couldn't lock, undo and give up */
1111 ei->bei_state ^= CACHE_ENTRY_DELETED;
1112 bdb_cache_entryinfo_unlock( ei );
1116 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1120 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
1122 rc = bdb_cache_delete_internal( &bdb->bi_cache, e->e_private, 1 );
1124 /* free lru mutex */
1125 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
1127 /* Leave entry info locked */
1133 bdb_cache_delete_cleanup(
1138 ei->bei_e->e_private = NULL;
1139 #ifdef SLAP_ZONE_ALLOC
1140 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1142 bdb_entry_return( ei->bei_e );
1147 free( ei->bei_nrdn.bv_val );
1148 ei->bei_nrdn.bv_val = NULL;
1150 free( ei->bei_rdn.bv_val );
1151 ei->bei_rdn.bv_val = NULL;
1152 ei->bei_modrdns = 0;
1156 ei->bei_parent = NULL;
1157 ei->bei_kids = NULL;
1158 ei->bei_lruprev = NULL;
1160 ldap_pvt_thread_mutex_lock( &cache->c_eifree_mutex );
1161 ei->bei_lrunext = cache->c_eifree;
1162 cache->c_eifree = ei;
1163 ldap_pvt_thread_mutex_unlock( &cache->c_eifree_mutex );
1164 bdb_cache_entryinfo_unlock( ei );
1168 bdb_cache_delete_internal(
1173 int rc = 0; /* return code */
1176 /* Lock the parent's kids tree */
1177 bdb_cache_entryinfo_lock( e->bei_parent );
1180 e->bei_parent->bei_ckids--;
1181 if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1184 if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1189 if ( e->bei_parent->bei_kids )
1192 bdb_cache_entryinfo_unlock( e->bei_parent );
1194 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1196 if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp )) {
1203 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1207 if ( e == cache->c_lruhead ) cache->c_lruhead = e->bei_lrunext;
1208 if ( e == cache->c_lrutail ) {
1209 ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1210 if ( e == cache->c_lrutail ) cache->c_lrutail = e->bei_lruprev;
1211 ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1214 if ( e->bei_lrunext ) e->bei_lrunext->bei_lruprev = e->bei_lruprev;
1215 if ( e->bei_lruprev ) e->bei_lruprev->bei_lrunext = e->bei_lrunext;
1218 ldap_pvt_thread_mutex_lock( &cache->c_count_mutex );
1220 ldap_pvt_thread_mutex_unlock( &cache->c_count_mutex );
1228 bdb_entryinfo_release( void *data )
1230 EntryInfo *ei = (EntryInfo *)data;
1231 if ( ei->bei_kids ) {
1232 avl_free( ei->bei_kids, NULL );
1235 ei->bei_e->e_private = NULL;
1236 #ifdef SLAP_ZONE_ALLOC
1237 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1239 bdb_entry_return( ei->bei_e );
1242 bdb_cache_entryinfo_destroy( ei );
1246 bdb_cache_release_all( Cache *cache )
1248 /* set cache write lock */
1249 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1251 ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1253 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1255 avl_free( cache->c_dntree.bei_kids, NULL );
1256 avl_free( cache->c_idtree, bdb_entryinfo_release );
1257 for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1258 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1259 bdb_cache_entryinfo_destroy(cache->c_eifree);
1261 cache->c_cursize = 0;
1262 cache->c_eiused = 0;
1263 cache->c_leaves = 0;
1264 cache->c_idtree = NULL;
1265 cache->c_lruhead = NULL;
1266 cache->c_lrutail = NULL;
1267 cache->c_dntree.bei_kids = NULL;
1269 /* free lru mutex */
1270 ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1271 /* free cache write lock */
1272 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1278 bdb_lru_print( Cache *cache )
1282 fprintf( stderr, "LRU circle head: %p\n", cache->c_lruhead );
1283 fprintf( stderr, "LRU circle (tail forward):\n" );
1284 for ( e = cache->c_lrutail; ; ) {
1285 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1286 e, e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1288 if ( e == cache->c_lrutail )
1291 fprintf( stderr, "LRU circle (tail backward):\n" );
1292 for ( e = cache->c_lrutail; ; ) {
1293 fprintf( stderr, "\t%p, %p id %ld rdn \"%s\"\n",
1294 e, e->bei_e, e->bei_id, e->bei_nrdn.bv_val );
1296 if ( e == cache->c_lrutail )
1303 #ifdef BDB_REUSE_LOCKERS
1305 bdb_locker_id_free( void *key, void *data )
1308 u_int32_t lockid = (long)data;
1311 rc = XLOCK_ID_FREE( env, lockid );
1312 if ( rc == EINVAL ) {
1314 Debug( LDAP_DEBUG_ANY,
1315 "bdb_locker_id_free: %lu err %s(%d)\n",
1316 (unsigned long) lockid, db_strerror(rc), rc );
1317 /* release all locks held by this locker. */
1318 lr.op = DB_LOCK_PUT_ALL;
1320 env->lock_vec( env, lockid, 0, &lr, 1, NULL );
1321 XLOCK_ID_FREE( env, lockid );
1326 bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
1333 if ( !env || !locker ) return -1;
1335 /* If no op was provided, try to find the ctx anyway... */
1337 ctx = op->o_threadctx;
1339 ctx = ldap_pvt_thread_pool_context();
1342 /* Shouldn't happen unless we're single-threaded */
1348 if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1349 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1350 rc = XLOCK_ID( env, &lockid );
1351 if (rc) ldap_pvt_thread_yield();
1356 data = (void *)((long)lockid);
1357 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1358 data, bdb_locker_id_free ) ) ) {
1359 XLOCK_ID_FREE( env, lockid );
1360 Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
1361 db_strerror(rc), rc, 0 );
1366 lockid = (long)data;
1371 #endif /* BDB_REUSE_LOCKERS */