1 /* cache.c - routines to maintain an in-core cache of entries */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2006 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
22 #include <ac/string.h>
23 #include <ac/socket.h>
32 #define bdb_cache_lru_add hdb_cache_lru_add
34 static void bdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei );
36 static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
39 static void bdb_lru_print(Cache *cache);
44 bdb_cache_entryinfo_new( Cache *cache )
48 if ( cache->c_eifree ) {
49 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
50 if ( cache->c_eifree ) {
52 cache->c_eifree = ei->bei_lrunext;
54 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
57 ei->bei_lrunext = NULL;
60 ei = ch_calloc(1, sizeof(struct bdb_entry_info));
61 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
67 /* Atomically release and reacquire a lock */
69 bdb_cache_entry_db_relock(
84 if ( !lock ) return 0;
86 lockobj.data = &ei->bei_id;
87 lockobj.size = sizeof(ei->bei_id) + 1;
89 list[0].op = DB_LOCK_PUT;
91 list[1].op = DB_LOCK_GET;
93 list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
94 list[1].obj = &lockobj;
95 rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
99 Debug( LDAP_DEBUG_TRACE,
100 "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
101 ei->bei_id, rw, rc );
103 *lock = list[1].lock;
110 bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
111 int rw, int tryOnly, DB_LOCK *lock )
120 if ( !lock ) return 0;
123 db_rw = DB_LOCK_WRITE;
125 db_rw = DB_LOCK_READ;
127 lockobj.data = &ei->bei_id;
128 lockobj.size = sizeof(ei->bei_id) + 1;
130 rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
131 &lockobj, db_rw, lock);
132 if (rc && !tryOnly) {
133 Debug( LDAP_DEBUG_TRACE,
134 "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
135 ei->bei_id, rw, rc );
138 #endif /* NO_THREADS */
142 bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock )
149 if ( !lock ) return 0;
151 rc = LOCK_PUT ( env, lock );
157 bdb_cache_entryinfo_destroy( EntryInfo *e )
159 ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
160 free( e->bei_nrdn.bv_val );
162 free( e->bei_rdn.bv_val );
168 #define LRU_DELETE( cache, ei ) do { \
169 if ( (ei)->bei_lruprev != NULL ) { \
170 (ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
172 (cache)->c_lruhead = (ei)->bei_lrunext; \
174 if ( (ei)->bei_lrunext != NULL ) { \
175 (ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
177 (cache)->c_lrutail = (ei)->bei_lruprev; \
179 (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
182 #define LRU_ADD( cache, ei ) do { \
183 (ei)->bei_lrunext = (cache)->c_lruhead; \
184 if ( (ei)->bei_lrunext != NULL ) { \
185 (ei)->bei_lrunext->bei_lruprev = (ei); \
187 (cache)->c_lruhead = (ei); \
188 (ei)->bei_lruprev = NULL; \
189 if ( (cache)->c_lrutail == NULL ) { \
190 (cache)->c_lrutail = (ei); \
194 /* Do a length-ordered sort on normalized RDNs */
196 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
198 const EntryInfo *e1 = v_e1, *e2 = v_e2;
199 int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
201 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
202 e1->bei_nrdn.bv_len );
208 bdb_id_cmp( const void *v_e1, const void *v_e2 )
210 const EntryInfo *e1 = v_e1, *e2 = v_e2;
211 return e1->bei_id - e2->bei_id;
214 /* Create an entryinfo in the cache. Caller must release the locks later.
217 bdb_entryinfo_add_internal(
218 struct bdb_info *bdb,
222 EntryInfo *ei2 = NULL;
226 ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
228 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
229 bdb_cache_entryinfo_lock( ei->bei_parent );
231 ei2->bei_id = ei->bei_id;
232 ei2->bei_parent = ei->bei_parent;
234 ei2->bei_rdn = ei->bei_rdn;
236 #ifdef SLAP_ZONE_ALLOC
240 /* Add to cache ID tree */
241 if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
243 eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
244 bdb_cache_entryinfo_destroy( ei2 );
247 /* It got freed above because its value was
250 ei->bei_rdn.bv_val = NULL;
253 bdb->bi_cache.c_eiused++;
254 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
256 /* This is a new leaf node. But if parent had no kids, then it was
257 * a leaf and we would be decrementing that. So, only increment if
258 * the parent already has kids.
260 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
261 bdb->bi_cache.c_leaves++;
262 avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
265 ei->bei_parent->bei_ckids++;
273 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
274 * the info for its closest ancestor. *res should be NULL to process a
275 * complete DN starting from the tree root. Otherwise *res must be the
276 * immediate parent of the requested DN, and only the RDN will be searched.
277 * The EntryInfo is locked upon return and must be unlocked by the caller.
286 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
287 EntryInfo ei, *eip, *ei2;
291 /* this function is always called with normalized DN */
293 /* we're doing a onelevel search for an RDN */
294 ei.bei_nrdn.bv_val = ndn->bv_val;
295 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
298 /* we're searching a full DN from the root */
299 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
300 ei.bei_nrdn.bv_val = ptr;
301 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
302 /* Skip to next rdn if suffix is empty */
303 if ( ei.bei_nrdn.bv_len == 0 ) {
304 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
305 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
306 if ( ptr >= ndn->bv_val ) {
307 if (DN_SEPARATOR(*ptr)) ptr++;
308 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
309 ei.bei_nrdn.bv_val = ptr;
312 eip = &bdb->bi_cache.c_dntree;
315 for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
317 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
319 int len = ei.bei_nrdn.bv_len;
321 if ( BER_BVISEMPTY( ndn )) {
326 ei.bei_nrdn.bv_len = ndn->bv_len -
327 (ei.bei_nrdn.bv_val - ndn->bv_val);
328 bdb_cache_entryinfo_unlock( eip );
330 rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
332 bdb_cache_entryinfo_lock( eip );
337 /* DN exists but needs to be added to cache */
338 ei.bei_nrdn.bv_len = len;
339 rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
340 /* add_internal left eip and c_rwlock locked */
341 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
346 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
347 /* In the midst of deleting? Give it a chance to
350 bdb_cache_entryinfo_unlock( eip );
351 ldap_pvt_thread_yield();
352 bdb_cache_entryinfo_lock( eip );
356 bdb_cache_entryinfo_unlock( eip );
357 bdb_cache_entryinfo_lock( ei2 );
361 /* Advance to next lower RDN */
362 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
363 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
364 if ( ptr >= ndn->bv_val ) {
365 if (DN_SEPARATOR(*ptr)) ptr++;
366 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
367 ei.bei_nrdn.bv_val = ptr;
369 if ( ptr < ndn->bv_val ) {
379 /* Walk up the tree from a child node, looking for an ID that's already
380 * been linked into the cache.
383 hdb_cache_find_parent(
390 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
391 EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
400 rc = hdb_dn2id_parent( op, txn, locker, &ei, &eip.bei_id );
403 /* Save the previous node, if any */
406 /* Create a new node for the current ID */
407 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
408 ein->bei_id = ei.bei_id;
409 ein->bei_kids = ei.bei_kids;
410 ein->bei_nrdn = ei.bei_nrdn;
411 ein->bei_rdn = ei.bei_rdn;
412 ein->bei_ckids = ei.bei_ckids;
413 #ifdef SLAP_ZONE_ALLOC
418 /* This node is not fully connected yet */
419 ein->bei_state = CACHE_ENTRY_NOT_LINKED;
421 /* Insert this node into the ID tree */
422 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
423 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
424 bdb_id_cmp, avl_dup_error ) ) {
426 /* Someone else created this node just before us.
427 * Free our new copy and use the existing one.
429 bdb_cache_entryinfo_destroy( ein );
430 ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
431 (caddr_t) &ei, bdb_id_cmp );
433 /* Link in any kids we've already processed */
435 bdb_cache_entryinfo_lock( ein );
436 avl_insert( &ein->bei_kids, (caddr_t)ei2,
437 bdb_rdn_cmp, avl_dup_error );
439 bdb_cache_entryinfo_unlock( ein );
445 /* If this is the first time, save this node
446 * to be returned later.
448 if ( eir == NULL ) eir = ein;
450 /* If there was a previous node, link it to this one */
451 if ( ei2 ) ei2->bei_parent = ein;
453 /* Look for this node's parent */
455 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
456 (caddr_t) &eip, bdb_id_cmp );
458 ei2 = &bdb->bi_cache.c_dntree;
460 bdb->bi_cache.c_eiused++;
461 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
462 bdb->bi_cache.c_leaves++;
463 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
466 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
467 bdb_cache_lru_add( bdb, ein );
471 /* Got the parent, link in and we're done. */
473 bdb_cache_entryinfo_lock( ei2 );
474 ein->bei_parent = ei2;
475 avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
478 bdb_cache_entryinfo_unlock( ei2 );
479 bdb_cache_entryinfo_lock( eir );
481 /* Reset all the state info */
482 for (ein = eir; ein != ei2; ein=ein->bei_parent)
483 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
488 ei.bei_id = eip.bei_id;
490 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
496 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
500 struct bdb_info *bdb,
507 /* See if we already have this one */
508 bdb_cache_entryinfo_lock( ei->bei_parent );
509 ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
510 bdb_cache_entryinfo_unlock( ei->bei_parent );
513 /* Not found, add it */
516 /* bei_rdn was not malloc'd before, do it now */
517 ber_dupbv( &bv, &ei->bei_rdn );
520 rc = bdb_entryinfo_add_internal( bdb, ei, res );
521 bdb_cache_entryinfo_unlock( ei->bei_parent );
522 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
524 /* Found, return it */
533 bdb_cache_lru_purge(void *ctx, void *arg)
535 struct re_s *rtask = arg;
536 struct bdb_info *bdb = rtask->arg;
537 DB_LOCK lock, *lockp;
538 EntryInfo *elru, *elprev;
541 if ( bdb->bi_cache.c_locker ) {
547 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
549 /* Look for an unused entry to remove */
550 for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) {
551 elprev = elru->bei_lruprev;
553 /* If we can successfully writelock it, then
554 * the object is idle.
556 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
557 bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
561 /* If this node is in the process of linking into the cache,
562 * or this node is being deleted, skip it.
564 if ( elru->bei_state &
565 ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) {
566 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
569 /* Free entry for this node if it's present */
571 elru->bei_e->e_private = NULL;
572 #ifdef SLAP_ZONE_ALLOC
573 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
575 bdb_entry_return( elru->bei_e );
580 /* ITS#4010 if we're in slapcat, and this node is a leaf
583 * FIXME: we need to do this for slapd as well, (which is
584 * why we compute bi_cache.c_leaves now) but at the moment
585 * we can't because it causes unresolvable deadlocks.
587 if ( slapMode & SLAP_TOOL_READONLY ) {
588 if ( !elru->bei_kids ) {
589 /* This does LRU_DELETE for us */
590 bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
591 bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
593 /* Leave node on LRU list for a future pass */
595 LRU_DELETE( &bdb->bi_cache, elru );
597 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
599 if ( count == bdb->bi_cache.c_minfree ) {
600 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
601 bdb->bi_cache.c_cursize -= bdb->bi_cache.c_minfree;
602 if ( bdb->bi_cache.c_maxsize - bdb->bi_cache.c_cursize >=
603 bdb->bi_cache.c_minfree )
606 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
612 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
614 /* If we're running as a task, drop the task */
616 ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
617 ldap_pvt_runqueue_stoptask( &slapd_rq, rtask );
618 /* Defer processing till we're needed again */
619 ldap_pvt_runqueue_resched( &slapd_rq, rtask, 1 );
620 ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
626 /* caller must have lru_head_mutex locked. mutex
627 * will be unlocked on return.
631 struct bdb_info *bdb,
634 LRU_ADD( &bdb->bi_cache, ei );
635 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
637 /* See if we're above the cache size limit */
638 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
639 if ( slapMode & SLAP_TOOL_MODE ) {
643 bdb_cache_lru_purge( NULL, &rtask );
646 ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
647 if ( bdb->bi_cache_task ) {
648 if ( !ldap_pvt_runqueue_isrunning( &slapd_rq,
649 bdb->bi_cache_task )) {
650 /* We want it to start right now */
651 bdb->bi_cache_task->interval.tv_sec = 0;
652 ldap_pvt_runqueue_resched( &slapd_rq, bdb->bi_cache_task,
654 /* But don't try to reschedule it while it's running */
655 bdb->bi_cache_task->interval.tv_sec = 3600;
659 bdb->bi_cache_task = ldap_pvt_runqueue_insert( &slapd_rq, 3600,
660 bdb_cache_lru_purge, bdb, "bdb_cache_lru_purge",
661 bdb->bi_dbenv_home );
664 ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
665 /* Don't bother waking if the purge task is already running */
667 slap_wake_listener();
674 struct bdb_info *bdb,
677 EntryInfo ei = { 0 },
682 ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
683 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
684 (caddr_t) &ei, bdb_id_cmp );
685 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
690 * cache_find_id - find an entry in the cache, given id.
691 * The entry is locked for Read upon return. Call with islocked TRUE if
692 * the supplied *eip was already locked.
705 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
707 int rc = 0, load = 0;
708 EntryInfo ei = { 0 };
712 #ifdef SLAP_ZONE_ALLOC
713 slap_zh_rlock(bdb->bi_cache.c_zctx);
715 /* If we weren't given any info, see if we have it already cached */
717 again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
718 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
719 (caddr_t) &ei, bdb_id_cmp );
721 /* If the lock attempt fails, the info is in use */
722 if ( ldap_pvt_thread_mutex_trylock(
723 &(*eip)->bei_kids_mutex )) {
724 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
725 /* If this node is being deleted, treat
726 * as if the delete has already finished
728 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
731 /* otherwise, wait for the info to free up */
732 ldap_pvt_thread_yield();
735 /* If this info isn't hooked up to its parent yet,
736 * unlock and wait for it to be fully initialized
738 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
739 bdb_cache_entryinfo_unlock( *eip );
740 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
741 ldap_pvt_thread_yield();
746 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
749 /* See if the ID exists in the database; add it to the cache if so */
752 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
754 rc = bdb_cache_find_ndn( op, tid,
756 if ( *eip ) islocked = 1;
758 #ifdef SLAP_ZONE_ALLOC
759 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
761 bdb_entry_return( ep );
767 rc = hdb_cache_find_parent(op, tid, locker, id, eip );
768 if ( rc == 0 && *eip ) islocked = 1;
772 /* Ok, we found the info, do we have the entry? */
773 if ( *eip && rc == 0 ) {
774 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
777 /* Make sure only one thread tries to load the entry */
779 #ifdef SLAP_ZONE_ALLOC
780 if ((*eip)->bei_e && !slap_zn_validate(
781 bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
782 (*eip)->bei_e = NULL;
783 (*eip)->bei_zseq = 0;
786 if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
788 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
791 bdb_cache_entryinfo_unlock( *eip );
794 rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock );
795 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
797 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
798 } else if ( rc == 0 ) {
800 /* Give up original read lock, obtain write lock
803 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
806 if ( rc == 0 && !ep) {
807 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
810 ep->e_private = *eip;
815 #ifdef SLAP_ZONE_ALLOC
816 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
820 (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
822 /* If we succeeded, downgrade back to a readlock. */
823 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
826 /* Otherwise, release the lock. */
827 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
829 } else if ( !(*eip)->bei_e ) {
830 /* Some other thread is trying to load the entry,
831 * give it a chance to finish.
833 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
834 ldap_pvt_thread_yield();
835 bdb_cache_entryinfo_lock( *eip );
840 /* Check for subtree renames
842 rc = bdb_fix_dn( (*eip)->bei_e, 1 );
844 bdb_cache_entry_db_relock( bdb->bi_dbenv,
845 locker, *eip, 1, 0, lock );
846 /* check again in case other modifier did it already */
847 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
848 rc = bdb_fix_dn( (*eip)->bei_e, 2 );
849 bdb_cache_entry_db_relock( bdb->bi_dbenv,
850 locker, *eip, 0, 0, lock );
859 bdb_cache_entryinfo_unlock( *eip );
862 #ifdef SLAP_ZONE_ALLOC
863 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
865 bdb_entry_return( ep );
871 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
872 bdb->bi_cache.c_cursize++;
873 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
876 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
878 /* If the LRU list has only one entry and this is it, it
879 * doesn't need to be added again.
881 if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail &&
882 bdb->bi_cache.c_lruhead == *eip ) {
883 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
885 /* if entry is on LRU list, remove from old spot */
886 if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) {
887 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
888 LRU_DELETE( &bdb->bi_cache, *eip );
889 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
891 /* lru_head_mutex is unlocked for us */
892 bdb_cache_lru_add( bdb, *eip );
896 #ifdef SLAP_ZONE_ALLOC
897 if (rc == 0 && (*eip)->bei_e) {
898 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
900 slap_zh_runlock(bdb->bi_cache.c_zctx);
913 if ( BEI(e)->bei_kids ) {
916 if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
919 rc = bdb_dn2id_children( op, txn, e );
920 if ( rc == DB_NOTFOUND ) {
921 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
926 /* Update the cache after a successful database Add. */
929 struct bdb_info *bdb,
939 struct berval rdn = e->e_name;
947 /* Lock this entry so that bdb_add can run to completion.
948 * It can only fail if BDB has run out of lock resources.
950 rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock );
952 bdb_cache_entryinfo_unlock( eip );
957 if ( nrdn->bv_len != e->e_nname.bv_len ) {
958 char *ptr = ber_bvchr( &rdn, ',' );
959 assert( ptr != NULL );
960 rdn.bv_len = ptr - rdn.bv_val;
962 ber_dupbv( &ei.bei_rdn, &rdn );
963 if ( eip->bei_dkids ) eip->bei_dkids++;
966 rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
967 /* bdb_csn_commit can cause this when adding the database root entry */
969 new->bei_e->e_private = NULL;
970 #ifdef SLAP_ZONE_ALLOC
971 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
973 bdb_entry_return( new->bei_e );
978 new->bei_state = CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
979 eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
980 if (eip->bei_parent) {
981 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
983 bdb_cache_entryinfo_unlock( eip );
985 ++bdb->bi_cache.c_cursize;
986 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
989 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
991 /* lru_head_mutex is unlocked for us */
992 bdb_cache_lru_add( bdb, new );
1000 Attribute *newAttrs,
1005 EntryInfo *ei = BEI(e);
1007 /* Get write lock on data */
1008 rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1010 /* If we've done repeated mods on a cached entry, then e_attrs
1011 * is no longer contiguous with the entry, and must be freed.
1014 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1015 attrs_free( e->e_attrs );
1017 e->e_attrs = newAttrs;
1023 * Change the rdn in the entryinfo. Also move to a new parent if needed.
1027 struct bdb_info *bdb,
1029 struct berval *nrdn,
1035 EntryInfo *ei = BEI(e), *pei;
1041 /* Get write lock on data */
1042 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, ei, 1, 0, lock );
1043 if ( rc ) return rc;
1045 /* If we've done repeated mods on a cached entry, then e_attrs
1046 * is no longer contiguous with the entry, and must be freed.
1048 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1049 attrs_free( e->e_attrs );
1051 e->e_attrs = new->e_attrs;
1052 if( e->e_nname.bv_val < e->e_bv.bv_val ||
1053 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1055 ch_free(e->e_name.bv_val);
1056 ch_free(e->e_nname.bv_val);
1058 e->e_name = new->e_name;
1059 e->e_nname = new->e_nname;
1061 /* Lock the parent's kids AVL tree */
1062 pei = ei->bei_parent;
1063 bdb_cache_entryinfo_lock( pei );
1064 avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1065 free( ei->bei_nrdn.bv_val );
1066 ber_dupbv( &ei->bei_nrdn, nrdn );
1068 free( ei->bei_rdn.bv_val );
1071 if ( nrdn->bv_len != e->e_nname.bv_len ) {
1072 char *ptr = ber_bvchr(&rdn, ',');
1073 assert( ptr != NULL );
1074 rdn.bv_len = ptr - rdn.bv_val;
1076 ber_dupbv( &ei->bei_rdn, &rdn );
1080 ein = ei->bei_parent;
1082 ei->bei_parent = ein;
1083 bdb_cache_entryinfo_unlock( pei );
1084 bdb_cache_entryinfo_lock( ein );
1088 /* Record the generation number of this change */
1089 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1091 ei->bei_modrdns = bdb->bi_modrdns;
1092 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1095 avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1096 bdb_cache_entryinfo_unlock( ein );
1100 * cache_delete - delete the entry e from the cache.
1102 * returns: 0 e was deleted ok
1103 * 1 e was not in the cache
1104 * -1 something bad happened
1114 EntryInfo *ei = BEI(e);
1117 assert( e->e_private != NULL );
1119 /* Set this early, warn off any queriers */
1120 ei->bei_state |= CACHE_ENTRY_DELETED;
1122 /* Lock the entry's info */
1123 bdb_cache_entryinfo_lock( ei );
1125 /* Get write lock on the data */
1126 rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1128 /* couldn't lock, undo and give up */
1129 ei->bei_state ^= CACHE_ENTRY_DELETED;
1130 bdb_cache_entryinfo_unlock( ei );
1134 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1138 ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1140 /* set cache write lock */
1141 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1143 rc = bdb_cache_delete_internal( cache, e->e_private, 1 );
1145 /* free cache write lock */
1146 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1148 /* free lru mutex */
1149 ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1151 /* Leave entry info locked */
1157 bdb_cache_delete_cleanup(
1162 ei->bei_e->e_private = NULL;
1163 #ifdef SLAP_ZONE_ALLOC
1164 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1166 bdb_entry_return( ei->bei_e );
1171 free( ei->bei_nrdn.bv_val );
1172 ei->bei_nrdn.bv_val = NULL;
1174 free( ei->bei_rdn.bv_val );
1175 ei->bei_rdn.bv_val = NULL;
1176 ei->bei_modrdns = 0;
1180 ei->bei_parent = NULL;
1181 ei->bei_kids = NULL;
1182 ei->bei_lruprev = NULL;
1184 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1185 ei->bei_lrunext = cache->c_eifree;
1186 cache->c_eifree = ei;
1187 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1188 bdb_cache_entryinfo_unlock( ei );
1192 bdb_cache_delete_internal(
1197 int rc = 0; /* return code */
1199 /* Lock the parent's kids tree */
1200 bdb_cache_entryinfo_lock( e->bei_parent );
1203 e->bei_parent->bei_ckids--;
1204 if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1207 if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1212 if ( e->bei_parent->bei_kids )
1216 if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) {
1224 LRU_DELETE( cache, e );
1225 if ( e->bei_e ) cache->c_cursize--;
1228 bdb_cache_entryinfo_unlock( e->bei_parent );
1234 bdb_entryinfo_release( void *data )
1236 EntryInfo *ei = (EntryInfo *)data;
1237 if ( ei->bei_kids ) {
1238 avl_free( ei->bei_kids, NULL );
1241 ei->bei_e->e_private = NULL;
1242 #ifdef SLAP_ZONE_ALLOC
1243 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1245 bdb_entry_return( ei->bei_e );
1248 bdb_cache_entryinfo_destroy( ei );
1252 bdb_cache_release_all( Cache *cache )
1254 /* set cache write lock */
1255 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1257 ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1259 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1261 avl_free( cache->c_dntree.bei_kids, NULL );
1262 avl_free( cache->c_idtree, bdb_entryinfo_release );
1263 for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1264 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1265 bdb_cache_entryinfo_destroy(cache->c_eifree);
1267 cache->c_cursize = 0;
1268 cache->c_eiused = 0;
1269 cache->c_leaves = 0;
1270 cache->c_idtree = NULL;
1271 cache->c_lruhead = NULL;
1272 cache->c_lrutail = NULL;
1273 cache->c_dntree.bei_kids = NULL;
1275 /* free lru mutex */
1276 ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1277 /* free cache write lock */
1278 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1284 bdb_lru_print( Cache *cache )
1288 fprintf( stderr, "LRU queue (head to tail):\n" );
1289 for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
1290 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1291 e->bei_nrdn.bv_val, e->bei_id );
1293 fprintf( stderr, "LRU queue (tail to head):\n" );
1294 for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
1295 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1296 e->bei_nrdn.bv_val, e->bei_id );
1302 #ifdef BDB_REUSE_LOCKERS
1304 bdb_locker_id_free( void *key, void *data )
1307 u_int32_t lockid = (long)data;
1310 rc = XLOCK_ID_FREE( env, lockid );
1311 if ( rc == EINVAL ) {
1313 Debug( LDAP_DEBUG_ANY,
1314 "bdb_locker_id_free: %lu err %s(%d)\n",
1315 (unsigned long) lockid, db_strerror(rc), rc );
1316 /* release all locks held by this locker. */
1317 lr.op = DB_LOCK_PUT_ALL;
1319 env->lock_vec( env, lockid, 0, &lr, 1, NULL );
1320 XLOCK_ID_FREE( env, lockid );
1325 bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
1332 if ( !env || !locker ) return -1;
1334 /* If no op was provided, try to find the ctx anyway... */
1336 ctx = op->o_threadctx;
1338 ctx = ldap_pvt_thread_pool_context();
1341 /* Shouldn't happen unless we're single-threaded */
1347 if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1348 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1349 rc = XLOCK_ID( env, &lockid );
1350 if (rc) ldap_pvt_thread_yield();
1355 data = (void *)((long)lockid);
1356 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1357 data, bdb_locker_id_free ) ) ) {
1358 XLOCK_ID_FREE( env, lockid );
1359 Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
1360 db_strerror(rc), rc, 0 );
1365 lockid = (long)data;
1370 #endif /* BDB_REUSE_LOCKERS */
1373 bdb_cache_delete_entry(
1374 struct bdb_info *bdb,
1379 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
1380 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, ei, 1, 1, lock ) == 0 )
1382 if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) {
1383 LRU_DELETE( &bdb->bi_cache, ei );
1384 ei->bei_e->e_private = NULL;
1385 #ifdef SLAP_ZONE_ALLOC
1386 bdb_entry_return( bdb, ei->bei_e, ei->bei_zseq );
1388 bdb_entry_return( ei->bei_e );
1391 --bdb->bi_cache.c_cursize;
1393 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
1395 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );