1 /* cache.c - routines to maintain an in-core cache of entries */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
5 * Copyright 2000-2004 The OpenLDAP Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted only as authorized by the OpenLDAP
12 * A copy of this license is available in the file LICENSE in the
13 * top-level directory of the distribution or, alternatively, at
14 * <http://www.OpenLDAP.org/license.html>.
22 #include <ac/string.h>
23 #include <ac/socket.h>
29 static int bdb_cache_delete_internal(Cache *cache, EntryInfo *e);
31 static void bdb_lru_print(Cache *cache);
34 static int bdb_txn_get( Operation *op, DB_ENV *env, DB_TXN **txn, int reset );
37 bdb_cache_entryinfo_new( Cache *cache )
41 if ( cache->c_eifree ) {
42 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
43 if ( cache->c_eifree ) {
45 cache->c_eifree = ei->bei_lrunext;
47 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
50 ei->bei_lrunext = NULL;
53 ei = ch_calloc(1, sizeof(struct bdb_entry_info));
54 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
60 /* Atomically release and reacquire a lock */
62 bdb_cache_entry_db_relock(
77 if ( !lock ) return 0;
79 lockobj.data = &ei->bei_id;
80 lockobj.size = sizeof(ei->bei_id) + 1;
82 list[0].op = DB_LOCK_PUT;
84 list[1].op = DB_LOCK_GET;
86 list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
87 list[1].obj = &lockobj;
88 rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
92 Debug( LDAP_DEBUG_TRACE,
93 "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
103 bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
104 int rw, int tryOnly, DB_LOCK *lock )
113 if ( !lock ) return 0;
116 db_rw = DB_LOCK_WRITE;
118 db_rw = DB_LOCK_READ;
120 lockobj.data = &ei->bei_id;
121 lockobj.size = sizeof(ei->bei_id) + 1;
123 rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
124 &lockobj, db_rw, lock);
125 if (rc && !tryOnly) {
126 Debug( LDAP_DEBUG_TRACE,
127 "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
128 ei->bei_id, rw, rc );
131 #endif /* NO_THREADS */
135 bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock )
142 if ( !lock ) return 0;
144 rc = LOCK_PUT ( env, lock );
150 bdb_cache_entryinfo_destroy( EntryInfo *e )
152 ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
153 free( e->bei_nrdn.bv_val );
155 free( e->bei_rdn.bv_val );
161 #define LRU_DELETE( cache, ei ) do { \
162 if ( (ei)->bei_lruprev != NULL ) { \
163 (ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
165 (cache)->c_lruhead = (ei)->bei_lrunext; \
167 if ( (ei)->bei_lrunext != NULL ) { \
168 (ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
170 (cache)->c_lrutail = (ei)->bei_lruprev; \
172 (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
175 #define LRU_ADD( cache, ei ) do { \
176 (ei)->bei_lrunext = (cache)->c_lruhead; \
177 if ( (ei)->bei_lrunext != NULL ) { \
178 (ei)->bei_lrunext->bei_lruprev = (ei); \
180 (cache)->c_lruhead = (ei); \
181 (ei)->bei_lruprev = NULL; \
182 if ( (cache)->c_lrutail == NULL ) { \
183 (cache)->c_lrutail = (ei); \
187 /* Do a length-ordered sort on normalized RDNs */
189 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
191 const EntryInfo *e1 = v_e1, *e2 = v_e2;
192 int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
194 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
195 e1->bei_nrdn.bv_len );
201 bdb_id_cmp( const void *v_e1, const void *v_e2 )
203 const EntryInfo *e1 = v_e1, *e2 = v_e2;
204 return e1->bei_id - e2->bei_id;
207 /* Create an entryinfo in the cache. Caller must release the locks later.
210 bdb_entryinfo_add_internal(
211 struct bdb_info *bdb,
215 EntryInfo *ei2 = NULL;
219 ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
221 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
222 bdb_cache_entryinfo_lock( ei->bei_parent );
224 ei2->bei_id = ei->bei_id;
225 ei2->bei_parent = ei->bei_parent;
227 ei2->bei_rdn = ei->bei_rdn;
230 /* Add to cache ID tree */
231 if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
233 eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
234 bdb_cache_entryinfo_destroy( ei2 );
237 /* It got freed above because its value was
240 ei->bei_rdn.bv_val = NULL;
243 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
244 avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
247 ei->bei_parent->bei_ckids++;
255 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
256 * the info for its closest ancestor. *res should be NULL to process a
257 * complete DN starting from the tree root. Otherwise *res must be the
258 * immediate parent of the requested DN, and only the RDN will be searched.
259 * The EntryInfo is locked upon return and must be unlocked by the caller.
268 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
269 EntryInfo ei, *eip, *ei2;
273 /* this function is always called with normalized DN */
275 /* we're doing a onelevel search for an RDN */
276 ei.bei_nrdn.bv_val = ndn->bv_val;
277 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
280 /* we're searching a full DN from the root */
281 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
282 ei.bei_nrdn.bv_val = ptr;
283 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
284 /* Skip to next rdn if suffix is empty */
285 if ( ei.bei_nrdn.bv_len == 0 ) {
286 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
287 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
288 if ( ptr >= ndn->bv_val ) {
289 if (DN_SEPARATOR(*ptr)) ptr++;
290 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
291 ei.bei_nrdn.bv_val = ptr;
294 eip = &bdb->bi_cache.c_dntree;
297 for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
299 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
301 int len = ei.bei_nrdn.bv_len;
303 ei.bei_nrdn.bv_len = ndn->bv_len -
304 (ei.bei_nrdn.bv_val - ndn->bv_val);
305 bdb_cache_entryinfo_unlock( eip );
307 rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
309 bdb_cache_entryinfo_lock( eip );
314 /* DN exists but needs to be added to cache */
315 ei.bei_nrdn.bv_len = len;
316 rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
317 /* add_internal left eip and c_rwlock locked */
318 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
323 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
324 /* In the midst of deleting? Give it a chance to
327 bdb_cache_entryinfo_unlock( eip );
328 ldap_pvt_thread_yield();
329 bdb_cache_entryinfo_lock( eip );
333 bdb_cache_entryinfo_unlock( eip );
334 bdb_cache_entryinfo_lock( ei2 );
338 /* Advance to next lower RDN */
339 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
340 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
341 if ( ptr >= ndn->bv_val ) {
342 if (DN_SEPARATOR(*ptr)) ptr++;
343 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
344 ei.bei_nrdn.bv_val = ptr;
346 if ( ptr < ndn->bv_val ) {
356 /* Walk up the tree from a child node, looking for an ID that's already
357 * been linked into the cache.
360 hdb_cache_find_parent(
366 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
367 EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
368 char ndn[SLAP_LDAPDN_MAXLEN];
377 rc = hdb_dn2id_parent( op, txn, &ei, &eip.bei_id );
380 /* Save the previous node, if any */
383 /* Create a new node for the current ID */
384 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
385 ein->bei_id = ei.bei_id;
386 ein->bei_kids = ei.bei_kids;
387 ein->bei_nrdn = ei.bei_nrdn;
388 ein->bei_rdn = ei.bei_rdn;
390 /* This node is not fully connected yet */
391 ein->bei_state = CACHE_ENTRY_NOT_LINKED;
393 /* Insert this node into the ID tree */
394 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
395 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
396 bdb_id_cmp, avl_dup_error ) ) {
398 /* Someone else created this node just before us.
399 * Free our new copy and use the existing one.
401 bdb_cache_entryinfo_destroy( ein );
402 ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
403 (caddr_t) &ei, bdb_id_cmp );
405 /* Link in any kids we've already processed */
407 bdb_cache_entryinfo_lock( ein );
408 avl_insert( &ein->bei_kids, (caddr_t)ei2,
409 bdb_rdn_cmp, avl_dup_error );
410 bdb_cache_entryinfo_unlock( ein );
418 /* If this is the first time, save this node
419 * to be returned later.
421 if ( eir == NULL ) eir = ein;
423 /* If there was a previous node, link it to this one */
424 if ( ei2 ) ei2->bei_parent = ein;
426 /* Look for this node's parent */
428 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
429 (caddr_t) &eip, bdb_id_cmp );
431 ei2 = &bdb->bi_cache.c_dntree;
433 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
435 /* Got the parent, link in and we're done. */
437 bdb_cache_entryinfo_lock( ei2 );
438 ein->bei_parent = ei2;
439 avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
441 bdb_cache_entryinfo_unlock( ei2 );
442 bdb_cache_entryinfo_lock( eir );
444 /* Reset all the state info */
445 for (ein = eir; ein != ei2; ein=ein->bei_parent)
446 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
451 ei.bei_id = eip.bei_id;
452 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
458 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
462 struct bdb_info *bdb,
469 /* See if we already have this one */
470 bdb_cache_entryinfo_lock( ei->bei_parent );
471 ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
472 bdb_cache_entryinfo_unlock( ei->bei_parent );
475 /* Not found, add it */
478 /* bei_rdn was not malloc'd before, do it now */
479 ber_dupbv( &bv, &ei->bei_rdn );
482 rc = bdb_entryinfo_add_internal( bdb, ei, res );
483 bdb_cache_entryinfo_unlock( ei->bei_parent );
484 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
486 /* Found, return it */
494 /* caller must have lru_mutex locked. mutex
495 * will be unlocked on return.
499 struct bdb_info *bdb,
503 DB_LOCK lock, *lockp;
511 /* See if we're above the cache size limit */
512 if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
513 EntryInfo *elru, *elprev;
516 /* Look for an unused entry to remove */
517 for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev, i++ ) {
518 elprev = elru->bei_lruprev;
520 /* Too many probes, not enough idle, give up */
523 /* Leave DB root alone, BDB_HIER needs this */
524 if ( elru->bei_parent && !elru->bei_parent->bei_parent )
527 /* If we can successfully writelock it, then
528 * the object is idle.
530 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, elru, 1, 1,
534 /* If there's no entry, or this node is in
535 * the process of linking into the cache,
536 * or this node is being deleted, skip it.
538 if ( !elru->bei_e || (elru->bei_state &
539 ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED ))) {
540 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
543 /* If this node is in use or has children, just free the
544 * entry and unlink from the LRU list.
546 lstat = ldap_pvt_thread_mutex_trylock( &elru->bei_kids_mutex );
547 if ( lstat || elru->bei_kids ) {
548 LRU_DELETE( &bdb->bi_cache, elru );
549 elru->bei_e->e_private = NULL;
550 bdb_entry_return( elru->bei_e );
554 bdb_cache_entryinfo_unlock( elru );
556 /* Else free the entry and its entryinfo.
559 bdb_cache_delete_internal( &bdb->bi_cache, elru );
560 bdb_cache_delete_cleanup( &bdb->bi_cache, elru->bei_e );
562 /* break the loop, unsafe to muck with more than one */
565 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
566 --bdb->bi_cache.c_cursize;
567 if (bdb->bi_cache.c_cursize < bdb->bi_cache.c_maxsize)
572 LRU_ADD( &bdb->bi_cache, ei );
573 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_mutex );
578 struct bdb_info *bdb,
581 EntryInfo ei = { 0 },
586 ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
587 ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
588 (caddr_t) &ei, bdb_id_cmp );
589 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
594 * cache_find_id - find an entry in the cache, given id.
595 * The entry is locked for Read upon return. Call with islocked TRUE if
596 * the supplied *eip was already locked.
609 struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
612 EntryInfo ei = { 0 };
616 /* If we weren't given any info, see if we have it already cached */
618 again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
619 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
620 (caddr_t) &ei, bdb_id_cmp );
622 /* If the lock attempt fails, the info is in use */
623 if ( ldap_pvt_thread_mutex_trylock(
624 &(*eip)->bei_kids_mutex )) {
625 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
626 /* If this node is being deleted, treat
627 * as if the delete has already finished
629 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
632 /* otherwise, wait for the info to free up */
633 ldap_pvt_thread_yield();
636 /* If this info isn't hooked up to its parent yet,
637 * unlock and wait for it to be fully initialized
639 if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
640 bdb_cache_entryinfo_unlock( *eip );
641 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
642 ldap_pvt_thread_yield();
647 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
650 /* See if the ID exists in the database; add it to the cache if so */
653 rc = bdb_id2entry( op->o_bd, tid, id, &ep );
655 rc = bdb_cache_find_ndn( op, tid,
657 if ( *eip ) islocked = 1;
659 bdb_entry_return( ep );
664 rc = hdb_cache_find_parent(op, tid, id, eip );
665 if ( rc == 0 && *eip ) islocked = 1;
669 /* Ok, we found the info, do we have the entry? */
670 if ( *eip && rc == 0 ) {
671 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
675 /* Make sure only one thread tries to load the entry */
676 load1: if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
678 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
681 bdb_cache_entryinfo_unlock( *eip );
684 rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock );
685 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
687 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
688 } else if ( rc == 0 ) {
691 u_int32_t locker2 = locker;
693 /* We don't wrap entire read operations in txn's, but
694 * we need our cache entry lock and any DB page locks
695 * to be associated, in order for deadlock detection
696 * to work properly. So if we need to read from the DB,
697 * we use a long-lived per-thread txn for this step.
700 rc = bdb_txn_get( op, bdb->bi_dbenv, <id, 0 );
702 locker2 = TXN_ID( ltid );
706 /* Give up original read lock, obtain write lock with
707 * (possibly) new locker ID.
710 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker2,
713 if ( rc == 0 && !ep) {
714 rc = bdb_id2entry( op->o_bd, ltid, id, &ep );
717 ep->e_private = *eip;
724 (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
726 /* If we succeeded, downgrade back to a readlock. */
727 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
730 /* Otherwise, release the lock. */
731 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
733 if ( locker2 != locker ) {
734 /* If we're using the per-thread txn, release all
735 * of its page locks now.
738 list.op = DB_LOCK_PUT_ALL;
740 bdb->bi_dbenv->lock_vec( bdb->bi_dbenv, locker2,
742 /* If this txn was deadlocked, we must abort it
743 * and invalidate this per-thread txn.
745 if ( rc == DB_LOCK_DEADLOCK ) {
746 bdb_txn_get( op, bdb->bi_dbenv, <id, 1 );
749 } else if ( !(*eip)->bei_e ) {
750 /* Some other thread is trying to load the entry,
751 * give it a chance to finish.
753 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
754 ldap_pvt_thread_yield();
755 bdb_cache_entryinfo_lock( *eip );
760 /* Check for subtree renames
762 rc = bdb_fix_dn( (*eip)->bei_e, 1 );
764 bdb_cache_entry_db_relock( bdb->bi_dbenv,
765 locker, *eip, 1, 0, lock );
766 /* check again in case other modifier did it already */
767 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
768 rc = bdb_fix_dn( (*eip)->bei_e, 2 );
769 bdb_cache_entry_db_relock( bdb->bi_dbenv,
770 locker, *eip, 0, 0, lock );
779 bdb_cache_entryinfo_unlock( *eip );
782 bdb_entry_return( ep );
786 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_mutex );
787 /* if entry is on LRU list, remove from old spot */
788 if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) {
789 LRU_DELETE( &bdb->bi_cache, *eip );
791 /* if entry is new, bump cache size */
792 bdb->bi_cache.c_cursize++;
794 /* lru_mutex is unlocked for us */
795 bdb_cache_lru_add( bdb, locker, *eip );
809 if ( BEI(e)->bei_kids ) {
812 if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
815 rc = bdb_dn2id_children( op, txn, e );
816 if ( rc == DB_NOTFOUND ) {
817 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
822 /* Update the cache after a successful database Add. */
825 struct bdb_info *bdb,
832 struct berval rdn = e->e_name;
841 /* Lock this entry so that bdb_add can run to completion.
842 * It can only fail if BDB has run out of lock resources.
844 rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock );
846 bdb_cache_entryinfo_unlock( eip );
851 if ( nrdn->bv_len != e->e_nname.bv_len ) {
852 char *ptr = strchr( rdn.bv_val, ',' );
853 rdn.bv_len = ptr - rdn.bv_val;
855 ber_dupbv( &ei.bei_rdn, &rdn );
856 if ( eip->bei_dkids ) eip->bei_dkids++;
859 rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
860 /* bdb_csn_commit can cause this when adding the database root entry */
862 new->bei_e->e_private = NULL;
863 bdb_entry_return( new->bei_e );
867 new->bei_state = CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
868 eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
869 if (eip->bei_parent) {
870 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
872 bdb_cache_entryinfo_unlock( eip );
875 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_mutex );
876 ++bdb->bi_cache.c_cursize;
877 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
879 /* lru_mutex is unlocked for us */
880 bdb_cache_lru_add( bdb, locker, new );
893 EntryInfo *ei = BEI(e);
895 /* Get write lock on data */
896 rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
898 /* If we've done repeated mods on a cached entry, then e_attrs
899 * is no longer contiguous with the entry, and must be freed.
902 if ( (void *)e->e_attrs != (void *)(e+1) ) {
903 attrs_free( e->e_attrs );
905 e->e_attrs = newAttrs;
911 * Change the rdn in the entryinfo. Also move to a new parent if needed.
923 EntryInfo *ei = BEI(e), *pei;
927 /* Get write lock on data */
928 rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
931 /* If we've done repeated mods on a cached entry, then e_attrs
932 * is no longer contiguous with the entry, and must be freed.
934 if ( (void *)e->e_attrs != (void *)(e+1) ) {
935 attrs_free( e->e_attrs );
937 e->e_attrs = new->e_attrs;
938 if( e->e_nname.bv_val < e->e_bv.bv_val ||
939 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
941 ch_free(e->e_name.bv_val);
942 ch_free(e->e_nname.bv_val);
944 e->e_name = new->e_name;
945 e->e_nname = new->e_nname;
947 /* Lock the parent's kids AVL tree */
948 pei = ei->bei_parent;
949 bdb_cache_entryinfo_lock( pei );
950 avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
951 free( ei->bei_nrdn.bv_val );
952 ber_dupbv( &ei->bei_nrdn, nrdn );
954 free( ei->bei_rdn.bv_val );
957 if ( nrdn->bv_len != e->e_nname.bv_len ) {
958 char *ptr = strchr(rdn.bv_val, ',');
959 rdn.bv_len = ptr - rdn.bv_val;
961 ber_dupbv( &ei->bei_rdn, &rdn );
965 ein = ei->bei_parent;
967 ei->bei_parent = ein;
968 bdb_cache_entryinfo_unlock( pei );
969 bdb_cache_entryinfo_lock( ein );
973 int max = ei->bei_modrdns;
974 /* Record the generation number of this change */
975 for ( pei = ein; pei->bei_parent; pei = pei->bei_parent ) {
976 if ( pei->bei_modrdns > max ) max = pei->bei_modrdns;
978 ei->bei_modrdns = max + 1;
981 avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
982 bdb_cache_entryinfo_unlock( ein );
986 * cache_delete - delete the entry e from the cache.
988 * returns: 0 e was deleted ok
989 * 1 e was not in the cache
990 * -1 something bad happened
1000 EntryInfo *ei = BEI(e);
1003 assert( e->e_private );
1005 /* Set this early, warn off any queriers */
1006 ei->bei_state |= CACHE_ENTRY_DELETED;
1008 /* Lock the entry's info */
1009 bdb_cache_entryinfo_lock( ei );
1011 /* Get write lock on the data */
1012 rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1014 /* couldn't lock, undo and give up */
1015 ei->bei_state ^= CACHE_ENTRY_DELETED;
1016 bdb_cache_entryinfo_unlock( ei );
1020 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1024 ldap_pvt_thread_mutex_lock( &cache->lru_mutex );
1025 rc = bdb_cache_delete_internal( cache, e->e_private );
1026 /* free lru mutex */
1027 ldap_pvt_thread_mutex_unlock( &cache->lru_mutex );
1029 /* Leave entry info locked */
1035 bdb_cache_delete_cleanup(
1039 EntryInfo *ei = BEI(e);
1042 e->e_private = NULL;
1043 bdb_entry_return( e );
1045 free( ei->bei_nrdn.bv_val );
1046 ei->bei_nrdn.bv_val = NULL;
1048 free( ei->bei_rdn.bv_val );
1049 ei->bei_rdn.bv_val = NULL;
1050 ei->bei_modrdns = 0;
1054 ei->bei_parent = NULL;
1055 ei->bei_kids = NULL;
1056 ei->bei_lruprev = NULL;
1058 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1059 ei->bei_lrunext = cache->c_eifree;
1060 cache->c_eifree = ei;
1061 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1062 bdb_cache_entryinfo_unlock( ei );
1066 bdb_cache_delete_internal(
1070 int rc = 0; /* return code */
1072 /* set cache write lock */
1073 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1075 /* Lock the parent's kids tree */
1076 bdb_cache_entryinfo_lock( e->bei_parent );
1079 e->bei_parent->bei_ckids--;
1080 if ( e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1083 if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1090 if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) {
1099 LRU_DELETE( cache, e );
1102 /* free cache write lock */
1103 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1104 bdb_cache_entryinfo_unlock( e->bei_parent );
1110 bdb_entryinfo_release( void *data )
1112 EntryInfo *ei = (EntryInfo *)data;
1113 if ( ei->bei_kids ) {
1114 avl_free( ei->bei_kids, NULL );
1117 ei->bei_e->e_private = NULL;
1118 bdb_entry_return( ei->bei_e );
1120 bdb_cache_entryinfo_destroy( ei );
1124 bdb_cache_release_all( Cache *cache )
1126 /* set cache write lock */
1127 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1129 ldap_pvt_thread_mutex_lock( &cache->lru_mutex );
1131 Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1133 avl_free( cache->c_dntree.bei_kids, NULL );
1134 avl_free( cache->c_idtree, bdb_entryinfo_release );
1135 for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1136 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1137 bdb_cache_entryinfo_destroy(cache->c_eifree);
1139 cache->c_lruhead = NULL;
1140 cache->c_lrutail = NULL;
1142 /* free lru mutex */
1143 ldap_pvt_thread_mutex_unlock( &cache->lru_mutex );
1144 /* free cache write lock */
1145 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1150 bdb_lru_print( Cache *cache )
1154 fprintf( stderr, "LRU queue (head to tail):\n" );
1155 for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
1156 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1157 e->bei_nrdn.bv_val, e->bei_id );
1159 fprintf( stderr, "LRU queue (tail to head):\n" );
1160 for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
1161 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1162 e->bei_nrdn.bv_val, e->bei_id );
1168 bdb_txn_free( void *key, void *data )
1174 /* Obtain a long-lived transaction for the current thread.
1175 * If reset == 1, remove the current transaction. */
1177 bdb_txn_get( Operation *op, DB_ENV *env, DB_TXN **txn, int reset )
1180 void *ctx, *data = NULL;
1182 /* If no op was provided, try to find the ctx anyway... */
1184 ctx = op->o_threadctx;
1186 ctx = ldap_pvt_thread_pool_context();
1189 /* Shouldn't happen unless we're single-threaded */
1197 return ldap_pvt_thread_pool_setkey( ctx, ((char *)env)+1, NULL, NULL );
1200 if ( ldap_pvt_thread_pool_getkey( ctx, ((char *)env)+1, &data, NULL ) ||
1202 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1203 rc = TXN_BEGIN( env, NULL, txn, 0 );
1204 if (rc) ldap_pvt_thread_yield();
1209 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, ((char *)env)+1,
1210 *txn, bdb_txn_free ) ) ) {
1212 Debug( LDAP_DEBUG_ANY, "bdb_txn_get: err %s(%d)\n",
1213 db_strerror(rc), rc, 0 );
1223 #ifdef BDB_REUSE_LOCKERS
1225 bdb_locker_id_free( void *key, void *data )
1228 int lockid = (int) data;
1231 rc = XLOCK_ID_FREE( env, lockid );
1232 if ( rc == EINVAL ) {
1234 Debug( LDAP_DEBUG_ANY,
1235 "bdb_locker_id_free: %d err %s(%d)\n",
1236 lockid, db_strerror(rc), rc );
1237 /* release all locks held by this locker. */
1238 lr.op = DB_LOCK_PUT_ALL;
1240 env->lock_vec( env, lockid, 0, &lr, 1, NULL );
1241 XLOCK_ID_FREE( env, lockid );
1246 bdb_locker_id( Operation *op, DB_ENV *env, int *locker )
1252 if ( !env || !locker ) return -1;
1254 /* If no op was provided, try to find the ctx anyway... */
1256 ctx = op->o_threadctx;
1258 ctx = ldap_pvt_thread_pool_context();
1261 /* Shouldn't happen unless we're single-threaded */
1267 if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1268 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1269 rc = XLOCK_ID( env, &lockid );
1270 if (rc) ldap_pvt_thread_yield();
1275 data = (void *)lockid;
1276 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1277 data, bdb_locker_id_free ) ) ) {
1278 XLOCK_ID_FREE( env, lockid );
1279 Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
1280 db_strerror(rc), rc, 0 );
1293 bdb_cache_delete_entry(
1294 struct bdb_info *bdb,
1299 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
1300 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, ei, 1, 1, lock ) == 0 )
1302 if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) {
1303 LRU_DELETE( &bdb->bi_cache, ei );
1304 ei->bei_e->e_private = NULL;
1305 bdb_entry_return( ei->bei_e );
1307 --bdb->bi_cache.c_cursize;
1309 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
1311 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );