]> git.sur5r.net Git - openldap/blob - servers/slapd/back-bdb/cache.c
db71ca70cea7e19f6ae097993412cb76ecc99242
[openldap] / servers / slapd / back-bdb / cache.c
1 /* cache.c - routines to maintain an in-core cache of entries */
2 /* $OpenLDAP$ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
4  *
5  * Copyright 2000-2006 The OpenLDAP Foundation.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted only as authorized by the OpenLDAP
10  * Public License.
11  *
12  * A copy of this license is available in the file LICENSE in the
13  * top-level directory of the distribution or, alternatively, at
14  * <http://www.OpenLDAP.org/license.html>.
15  */
16
17 #include "portable.h"
18
19 #include <stdio.h>
20
21 #include <ac/errno.h>
22 #include <ac/string.h>
23 #include <ac/socket.h>
24
25 #include "slap.h"
26
27 #include "back-bdb.h"
28
29 #include "ldap_rq.h"
30
31 #ifdef BDB_HIER
32 #define bdb_cache_lru_add       hdb_cache_lru_add
33 #endif
34 static void bdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei );
35
36 static int      bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
37 #ifdef LDAP_DEBUG
38 #ifdef SLAPD_UNUSED
39 static void     bdb_lru_print(Cache *cache);
40 #endif
41 #endif
42
43 static EntryInfo *
44 bdb_cache_entryinfo_new( Cache *cache )
45 {
46         EntryInfo *ei = NULL;
47
48         if ( cache->c_eifree ) {
49                 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
50                 if ( cache->c_eifree ) {
51                         ei = cache->c_eifree;
52                         cache->c_eifree = ei->bei_lrunext;
53                 }
54                 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
55         }
56         if ( ei ) {
57                 ei->bei_lrunext = NULL;
58                 ei->bei_state = 0;
59         } else {
60                 ei = ch_calloc(1, sizeof(struct bdb_entry_info));
61                 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
62         }
63
64         return ei;
65 }
66
67 /* Atomically release and reacquire a lock */
68 int
69 bdb_cache_entry_db_relock(
70         DB_ENV *env,
71         u_int32_t locker,
72         EntryInfo *ei,
73         int rw,
74         int tryOnly,
75         DB_LOCK *lock )
76 {
77 #ifdef NO_THREADS
78         return 0;
79 #else
80         int     rc;
81         DBT     lockobj;
82         DB_LOCKREQ list[2];
83
84         if ( !lock ) return 0;
85
86         lockobj.data = &ei->bei_id;
87         lockobj.size = sizeof(ei->bei_id) + 1;
88
89         list[0].op = DB_LOCK_PUT;
90         list[0].lock = *lock;
91         list[1].op = DB_LOCK_GET;
92         list[1].lock = *lock;
93         list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
94         list[1].obj = &lockobj;
95         rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
96                 list, 2, NULL );
97
98         if (rc && !tryOnly) {
99                 Debug( LDAP_DEBUG_TRACE,
100                         "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
101                         ei->bei_id, rw, rc );
102         } else {
103                 *lock = list[1].lock;
104         }
105         return rc;
106 #endif
107 }
108
109 static int
110 bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
111         int rw, int tryOnly, DB_LOCK *lock )
112 {
113 #ifdef NO_THREADS
114         return 0;
115 #else
116         int       rc;
117         DBT       lockobj;
118         int       db_rw;
119
120         if ( !lock ) return 0;
121
122         if (rw)
123                 db_rw = DB_LOCK_WRITE;
124         else
125                 db_rw = DB_LOCK_READ;
126
127         lockobj.data = &ei->bei_id;
128         lockobj.size = sizeof(ei->bei_id) + 1;
129
130         rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
131                                         &lockobj, db_rw, lock);
132         if (rc && !tryOnly) {
133                 Debug( LDAP_DEBUG_TRACE,
134                         "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
135                         ei->bei_id, rw, rc );
136         }
137         return rc;
138 #endif /* NO_THREADS */
139 }
140
141 int
142 bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock )
143 {
144 #ifdef NO_THREADS
145         return 0;
146 #else
147         int rc;
148
149         if ( !lock ) return 0;
150
151         rc = LOCK_PUT ( env, lock );
152         return rc;
153 #endif
154 }
155
156 static int
157 bdb_cache_entryinfo_destroy( EntryInfo *e )
158 {
159         ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
160         free( e->bei_nrdn.bv_val );
161 #ifdef BDB_HIER
162         free( e->bei_rdn.bv_val );
163 #endif
164         free( e );
165         return 0;
166 }
167
168 #define LRU_DELETE( cache, ei ) do { \
169         if ( (ei)->bei_lruprev != NULL ) { \
170                 (ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
171         } else { \
172                 (cache)->c_lruhead = (ei)->bei_lrunext; \
173         } \
174         if ( (ei)->bei_lrunext != NULL ) { \
175                 (ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
176         } else { \
177                 (cache)->c_lrutail = (ei)->bei_lruprev; \
178         } \
179         (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
180 } while(0)
181
182 #define LRU_ADD( cache, ei ) do { \
183         (ei)->bei_lrunext = (cache)->c_lruhead; \
184         if ( (ei)->bei_lrunext != NULL ) { \
185                 (ei)->bei_lrunext->bei_lruprev = (ei); \
186         } \
187         (cache)->c_lruhead = (ei); \
188         (ei)->bei_lruprev = NULL; \
189         if ( (cache)->c_lrutail == NULL ) { \
190                 (cache)->c_lrutail = (ei); \
191         } \
192 } while(0)
193
194 /* Do a length-ordered sort on normalized RDNs */
195 static int
196 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
197 {
198         const EntryInfo *e1 = v_e1, *e2 = v_e2;
199         int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
200         if (rc == 0) {
201                 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
202                         e1->bei_nrdn.bv_len );
203         }
204         return rc;
205 }
206
207 static int
208 bdb_id_cmp( const void *v_e1, const void *v_e2 )
209 {
210         const EntryInfo *e1 = v_e1, *e2 = v_e2;
211         return e1->bei_id - e2->bei_id;
212 }
213
214 /* Create an entryinfo in the cache. Caller must release the locks later.
215  */
216 static int
217 bdb_entryinfo_add_internal(
218         struct bdb_info *bdb,
219         EntryInfo *ei,
220         EntryInfo **res )
221 {
222         EntryInfo *ei2 = NULL;
223
224         *res = NULL;
225
226         ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
227
228         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
229         bdb_cache_entryinfo_lock( ei->bei_parent );
230
231         ei2->bei_id = ei->bei_id;
232         ei2->bei_parent = ei->bei_parent;
233 #ifdef BDB_HIER
234         ei2->bei_rdn = ei->bei_rdn;
235 #endif
236 #ifdef SLAP_ZONE_ALLOC
237         ei2->bei_bdb = bdb;
238 #endif
239
240         /* Add to cache ID tree */
241         if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
242                 EntryInfo *eix;
243                 eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
244                 bdb_cache_entryinfo_destroy( ei2 );
245                 ei2 = eix;
246 #ifdef BDB_HIER
247                 /* It got freed above because its value was
248                  * assigned to ei2.
249                  */
250                 ei->bei_rdn.bv_val = NULL;
251 #endif
252         } else {
253                 bdb->bi_cache.c_eiused++;
254                 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
255
256                 /* This is a new leaf node. But if parent had no kids, then it was
257                  * a leaf and we would be decrementing that. So, only increment if
258                  * the parent already has kids.
259                  */
260                 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
261                         bdb->bi_cache.c_leaves++;
262                 avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
263                         avl_dup_error );
264 #ifdef BDB_HIER
265                 ei->bei_parent->bei_ckids++;
266 #endif
267         }
268
269         *res = ei2;
270         return 0;
271 }
272
273 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
274  * the info for its closest ancestor. *res should be NULL to process a
275  * complete DN starting from the tree root. Otherwise *res must be the
276  * immediate parent of the requested DN, and only the RDN will be searched.
277  * The EntryInfo is locked upon return and must be unlocked by the caller.
278  */
279 int
280 bdb_cache_find_ndn(
281         Operation       *op,
282         DB_TXN          *txn,
283         struct berval   *ndn,
284         EntryInfo       **res )
285 {
286         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
287         EntryInfo       ei, *eip, *ei2;
288         int rc = 0;
289         char *ptr;
290
291         /* this function is always called with normalized DN */
292         if ( *res ) {
293                 /* we're doing a onelevel search for an RDN */
294                 ei.bei_nrdn.bv_val = ndn->bv_val;
295                 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
296                 eip = *res;
297         } else {
298                 /* we're searching a full DN from the root */
299                 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
300                 ei.bei_nrdn.bv_val = ptr;
301                 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
302                 /* Skip to next rdn if suffix is empty */
303                 if ( ei.bei_nrdn.bv_len == 0 ) {
304                         for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
305                                 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
306                         if ( ptr >= ndn->bv_val ) {
307                                 if (DN_SEPARATOR(*ptr)) ptr++;
308                                 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
309                                 ei.bei_nrdn.bv_val = ptr;
310                         }
311                 }
312                 eip = &bdb->bi_cache.c_dntree;
313         }
314         
315         for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
316                 ei.bei_parent = eip;
317                 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
318                 if ( !ei2 ) {
319                         int len = ei.bei_nrdn.bv_len;
320                                 
321                         if ( BER_BVISEMPTY( ndn )) {
322                                 *res = eip;
323                                 return LDAP_SUCCESS;
324                         }
325
326                         ei.bei_nrdn.bv_len = ndn->bv_len -
327                                 (ei.bei_nrdn.bv_val - ndn->bv_val);
328                         bdb_cache_entryinfo_unlock( eip );
329
330                         rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
331                         if (rc) {
332                                 bdb_cache_entryinfo_lock( eip );
333                                 *res = eip;
334                                 return rc;
335                         }
336
337                         /* DN exists but needs to be added to cache */
338                         ei.bei_nrdn.bv_len = len;
339                         rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
340                         /* add_internal left eip and c_rwlock locked */
341                         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
342                         if ( rc ) {
343                                 *res = eip;
344                                 return rc;
345                         }
346                 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
347                         /* In the midst of deleting? Give it a chance to
348                          * complete.
349                          */
350                         bdb_cache_entryinfo_unlock( eip );
351                         ldap_pvt_thread_yield();
352                         bdb_cache_entryinfo_lock( eip );
353                         *res = eip;
354                         return DB_NOTFOUND;
355                 }
356                 bdb_cache_entryinfo_unlock( eip );
357                 bdb_cache_entryinfo_lock( ei2 );
358
359                 eip = ei2;
360
361                 /* Advance to next lower RDN */
362                 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
363                         && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
364                 if ( ptr >= ndn->bv_val ) {
365                         if (DN_SEPARATOR(*ptr)) ptr++;
366                         ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
367                         ei.bei_nrdn.bv_val = ptr;
368                 }
369                 if ( ptr < ndn->bv_val ) {
370                         *res = eip;
371                         break;
372                 }
373         }
374
375         return rc;
376 }
377
378 #ifdef BDB_HIER
379 /* Walk up the tree from a child node, looking for an ID that's already
380  * been linked into the cache.
381  */
382 int
383 hdb_cache_find_parent(
384         Operation *op,
385         DB_TXN *txn,
386         u_int32_t       locker,
387         ID id,
388         EntryInfo **res )
389 {
390         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
391         EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
392         int rc;
393         int addlru = 0;
394
395         ei.bei_id = id;
396         ei.bei_kids = NULL;
397         ei.bei_ckids = 0;
398
399         for (;;) {
400                 rc = hdb_dn2id_parent( op, txn, locker, &ei, &eip.bei_id );
401                 if ( rc ) break;
402
403                 /* Save the previous node, if any */
404                 ei2 = ein;
405
406                 /* Create a new node for the current ID */
407                 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
408                 ein->bei_id = ei.bei_id;
409                 ein->bei_kids = ei.bei_kids;
410                 ein->bei_nrdn = ei.bei_nrdn;
411                 ein->bei_rdn = ei.bei_rdn;
412                 ein->bei_ckids = ei.bei_ckids;
413 #ifdef SLAP_ZONE_ALLOC
414                 ein->bei_bdb = bdb;
415 #endif
416                 ei.bei_ckids = 0;
417                 
418                 /* This node is not fully connected yet */
419                 ein->bei_state = CACHE_ENTRY_NOT_LINKED;
420
421                 /* Insert this node into the ID tree */
422                 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
423                 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
424                         bdb_id_cmp, avl_dup_error ) ) {
425
426                         /* Someone else created this node just before us.
427                          * Free our new copy and use the existing one.
428                          */
429                         bdb_cache_entryinfo_destroy( ein );
430                         ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
431                                 (caddr_t) &ei, bdb_id_cmp );
432                         
433                         /* Link in any kids we've already processed */
434                         if ( ei2 ) {
435                                 bdb_cache_entryinfo_lock( ein );
436                                 avl_insert( &ein->bei_kids, (caddr_t)ei2,
437                                         bdb_rdn_cmp, avl_dup_error );
438                                 ein->bei_ckids++;
439                                 bdb_cache_entryinfo_unlock( ein );
440                         }
441                         addlru = 0;
442
443                 }
444
445                 /* If this is the first time, save this node
446                  * to be returned later.
447                  */
448                 if ( eir == NULL ) eir = ein;
449
450                 /* If there was a previous node, link it to this one */
451                 if ( ei2 ) ei2->bei_parent = ein;
452
453                 /* Look for this node's parent */
454                 if ( eip.bei_id ) {
455                         ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
456                                         (caddr_t) &eip, bdb_id_cmp );
457                 } else {
458                         ei2 = &bdb->bi_cache.c_dntree;
459                 }
460                 bdb->bi_cache.c_eiused++;
461                 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
462                                 bdb->bi_cache.c_leaves++;
463                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
464
465                 if ( addlru ) {
466                         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
467                         bdb_cache_lru_add( bdb, ein );
468                 }
469                 addlru = 1;
470
471                 /* Got the parent, link in and we're done. */
472                 if ( ei2 ) {
473                         bdb_cache_entryinfo_lock( ei2 );
474                         ein->bei_parent = ei2;
475                         avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
476                                 avl_dup_error);
477                         ei2->bei_ckids++;
478                         bdb_cache_entryinfo_unlock( ei2 );
479                         bdb_cache_entryinfo_lock( eir );
480
481                         /* Reset all the state info */
482                         for (ein = eir; ein != ei2; ein=ein->bei_parent)
483                                 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
484                         *res = eir;
485                         break;
486                 }
487                 ei.bei_kids = NULL;
488                 ei.bei_id = eip.bei_id;
489                 ei.bei_ckids = 1;
490                 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
491                         avl_dup_error );
492         }
493         return rc;
494 }
495
496 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
497  * of a given node
498  */
499 int hdb_cache_load(
500         struct bdb_info *bdb,
501         EntryInfo *ei,
502         EntryInfo **res )
503 {
504         EntryInfo *ei2;
505         int rc;
506
507         /* See if we already have this one */
508         bdb_cache_entryinfo_lock( ei->bei_parent );
509         ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
510         bdb_cache_entryinfo_unlock( ei->bei_parent );
511
512         if ( !ei2 ) {
513                 /* Not found, add it */
514                 struct berval bv;
515
516                 /* bei_rdn was not malloc'd before, do it now */
517                 ber_dupbv( &bv, &ei->bei_rdn );
518                 ei->bei_rdn = bv;
519
520                 rc = bdb_entryinfo_add_internal( bdb, ei, res );
521                 bdb_cache_entryinfo_unlock( ei->bei_parent );
522                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
523         } else {
524                 /* Found, return it */
525                 *res = ei2;
526                 return 0;
527         }
528         return rc;
529 }
530 #endif
531
532 static void *
533 bdb_cache_lru_purge(void *ctx, void *arg)
534 {
535         struct re_s *rtask = arg;
536         struct bdb_info *bdb = rtask->arg;
537         DB_LOCK         lock, *lockp;
538         EntryInfo *elru, *elprev;
539         int count = 0;
540
541         if ( bdb->bi_cache.c_locker ) {
542                 lockp = &lock;
543         } else {
544                 lockp = NULL;
545         }
546
547         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
548
549         /* Look for an unused entry to remove */
550         for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) {
551                 elprev = elru->bei_lruprev;
552
553                 /* If we can successfully writelock it, then
554                  * the object is idle.
555                  */
556                 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
557                                 bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
558
559                         int stop = 0;
560
561                         /* If this node is in the process of linking into the cache,
562                          * or this node is being deleted, skip it.
563                          */
564                         if ( elru->bei_state &
565                                 ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) {
566                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
567                                 continue;
568                         }
569                         /* Free entry for this node if it's present */
570                         if ( elru->bei_e ) {
571                                 elru->bei_e->e_private = NULL;
572 #ifdef SLAP_ZONE_ALLOC
573                                 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
574 #else
575                                 bdb_entry_return( elru->bei_e );
576 #endif
577                                 elru->bei_e = NULL;
578                                 count++;
579                         }
580                         /* ITS#4010 if we're in slapcat, and this node is a leaf
581                          * node, free it.
582                          *
583                          * FIXME: we need to do this for slapd as well, (which is
584                          * why we compute bi_cache.c_leaves now) but at the moment
585                          * we can't because it causes unresolvable deadlocks. 
586                          */
587                         if ( slapMode & SLAP_TOOL_READONLY ) {
588                                 if ( !elru->bei_kids ) {
589                                         /* This does LRU_DELETE for us */
590                                         bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
591                                         bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
592                                 }
593                                 /* Leave node on LRU list for a future pass */
594                         } else {
595                                 LRU_DELETE( &bdb->bi_cache, elru );
596                         }
597                         bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
598
599                         if ( count == bdb->bi_cache.c_minfree ) {
600                                 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
601                                 bdb->bi_cache.c_cursize -= bdb->bi_cache.c_minfree;
602                                 if ( bdb->bi_cache.c_maxsize - bdb->bi_cache.c_cursize >=
603                                         bdb->bi_cache.c_minfree )
604                                         stop = 1;
605                                 count = 0;
606                                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
607                         }
608                         if (stop) break;
609                 }
610         }
611
612         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
613
614         /* If we're running as a task, drop the task */
615         if ( ctx ) {
616                 ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
617                 ldap_pvt_runqueue_stoptask( &slapd_rq, rtask );
618                 /* Defer processing till we're needed again */
619                 ldap_pvt_runqueue_resched( &slapd_rq, rtask, 1 );
620                 ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
621         }
622
623         return NULL;
624 }
625
626 /* caller must have lru_head_mutex locked. mutex
627  * will be unlocked on return.
628  */
629 static void
630 bdb_cache_lru_add(
631         struct bdb_info *bdb,
632         EntryInfo *ei )
633 {
634         LRU_ADD( &bdb->bi_cache, ei );
635         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
636
637         /* See if we're above the cache size limit */
638         if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
639                 if ( slapMode & SLAP_TOOL_MODE ) {
640                         struct re_s rtask;
641
642                         rtask.arg = bdb;
643                         bdb_cache_lru_purge( NULL, &rtask );
644                 } else {
645                         int wake = 0;
646                         ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
647                         if ( bdb->bi_cache_task ) {
648                                 if ( !ldap_pvt_runqueue_isrunning( &slapd_rq,
649                                         bdb->bi_cache_task )) {
650                                         /* We want it to start right now */
651                                         bdb->bi_cache_task->interval.tv_sec = 0;
652                                         ldap_pvt_runqueue_resched( &slapd_rq, bdb->bi_cache_task,
653                                                 0 );
654                                         /* But don't try to reschedule it while it's running */
655                                         bdb->bi_cache_task->interval.tv_sec = 3600;
656                                         wake = 1;
657                                 }
658                         } else {
659                                 bdb->bi_cache_task = ldap_pvt_runqueue_insert( &slapd_rq, 3600,
660                                         bdb_cache_lru_purge, bdb, "bdb_cache_lru_purge",
661                                         bdb->bi_dbenv_home );
662                                 wake = 1;
663                         }
664                         ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
665                         /* Don't bother waking if the purge task is already running */
666                         if ( wake )
667                                 slap_wake_listener();
668                 }
669         }
670 }
671
672 EntryInfo *
673 bdb_cache_find_info(
674         struct bdb_info *bdb,
675         ID id )
676 {
677         EntryInfo       ei = { 0 },
678                         *ei2;
679
680         ei.bei_id = id;
681
682         ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
683         ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
684                                         (caddr_t) &ei, bdb_id_cmp );
685         ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
686         return ei2;
687 }
688
689 /*
690  * cache_find_id - find an entry in the cache, given id.
691  * The entry is locked for Read upon return. Call with islocked TRUE if
692  * the supplied *eip was already locked.
693  */
694
695 int
696 bdb_cache_find_id(
697         Operation *op,
698         DB_TXN  *tid,
699         ID                              id,
700         EntryInfo       **eip,
701         int             islocked,
702         u_int32_t       locker,
703         DB_LOCK         *lock )
704 {
705         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
706         Entry   *ep = NULL;
707         int     rc = 0, load = 0;
708         EntryInfo ei = { 0 };
709
710         ei.bei_id = id;
711
712 #ifdef SLAP_ZONE_ALLOC
713         slap_zh_rlock(bdb->bi_cache.c_zctx);
714 #endif
715         /* If we weren't given any info, see if we have it already cached */
716         if ( !*eip ) {
717 again:  ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
718                 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
719                         (caddr_t) &ei, bdb_id_cmp );
720                 if ( *eip ) {
721                         /* If the lock attempt fails, the info is in use */
722                         if ( ldap_pvt_thread_mutex_trylock(
723                                         &(*eip)->bei_kids_mutex )) {
724                                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
725                                 /* If this node is being deleted, treat
726                                  * as if the delete has already finished
727                                  */
728                                 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
729                                         return DB_NOTFOUND;
730                                 }
731                                 /* otherwise, wait for the info to free up */
732                                 ldap_pvt_thread_yield();
733                                 goto again;
734                         }
735                         /* If this info isn't hooked up to its parent yet,
736                          * unlock and wait for it to be fully initialized
737                          */
738                         if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
739                                 bdb_cache_entryinfo_unlock( *eip );
740                                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
741                                 ldap_pvt_thread_yield();
742                                 goto again;
743                         }
744                         islocked = 1;
745                 }
746                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
747         }
748
749         /* See if the ID exists in the database; add it to the cache if so */
750         if ( !*eip ) {
751 #ifndef BDB_HIER
752                 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
753                 if ( rc == 0 ) {
754                         rc = bdb_cache_find_ndn( op, tid,
755                                 &ep->e_nname, eip );
756                         if ( *eip ) islocked = 1;
757                         if ( rc ) {
758 #ifdef SLAP_ZONE_ALLOC
759                                 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
760 #else
761                                 bdb_entry_return( ep );
762 #endif
763                                 ep = NULL;
764                         }
765                 }
766 #else
767                 rc = hdb_cache_find_parent(op, tid, locker, id, eip );
768                 if ( rc == 0 ) islocked = 1;
769 #endif
770         }
771
772         /* Ok, we found the info, do we have the entry? */
773         if ( rc == 0 ) {
774                 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
775                         rc = DB_NOTFOUND;
776                 } else {
777                         /* Make sure only one thread tries to load the entry */
778 load1:
779 #ifdef SLAP_ZONE_ALLOC
780                         if ((*eip)->bei_e && !slap_zn_validate(
781                                         bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
782                                 (*eip)->bei_e = NULL;
783                                 (*eip)->bei_zseq = 0;
784                         }
785 #endif
786                         if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
787                                 load = 1;
788                                 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
789                         }
790                         if ( islocked ) {
791                                 bdb_cache_entryinfo_unlock( *eip );
792                                 islocked = 0;
793                         }
794                         rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock );
795                         if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
796                                 rc = DB_NOTFOUND;
797                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
798                         } else if ( rc == 0 ) {
799                                 if ( load ) {
800                                         /* Give up original read lock, obtain write lock
801                                          */
802                                     if ( rc == 0 ) {
803                                                 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
804                                                         *eip, 1, 0, lock );
805                                         }
806                                         if ( rc == 0 && !ep) {
807                                                 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
808                                         }
809                                         if ( rc == 0 ) {
810                                                 ep->e_private = *eip;
811 #ifdef BDB_HIER
812                                                 bdb_fix_dn( ep, 0 );
813 #endif
814                                                 (*eip)->bei_e = ep;
815 #ifdef SLAP_ZONE_ALLOC
816                                                 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
817 #endif
818                                                 ep = NULL;
819                                         }
820                                         (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
821                                         if ( rc == 0 ) {
822                                                 /* If we succeeded, downgrade back to a readlock. */
823                                                 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
824                                                         *eip, 0, 0, lock );
825                                         } else {
826                                                 /* Otherwise, release the lock. */
827                                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
828                                         }
829                                 } else if ( !(*eip)->bei_e ) {
830                                         /* Some other thread is trying to load the entry,
831                                          * give it a chance to finish.
832                                          */
833                                         bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
834                                         ldap_pvt_thread_yield();
835                                         bdb_cache_entryinfo_lock( *eip );
836                                         islocked = 1;
837                                         goto load1;
838 #ifdef BDB_HIER
839                                 } else {
840                                         /* Check for subtree renames
841                                          */
842                                         rc = bdb_fix_dn( (*eip)->bei_e, 1 );
843                                         if ( rc ) {
844                                                 bdb_cache_entry_db_relock( bdb->bi_dbenv,
845                                                         locker, *eip, 1, 0, lock );
846                                                 /* check again in case other modifier did it already */
847                                                 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
848                                                         rc = bdb_fix_dn( (*eip)->bei_e, 2 );
849                                                 bdb_cache_entry_db_relock( bdb->bi_dbenv,
850                                                         locker, *eip, 0, 0, lock );
851                                         }
852 #endif
853                                 }
854
855                         }
856                 }
857         }
858         if ( islocked ) {
859                 bdb_cache_entryinfo_unlock( *eip );
860         }
861         if ( ep ) {
862 #ifdef SLAP_ZONE_ALLOC
863                 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
864 #else
865                 bdb_entry_return( ep );
866 #endif
867         }
868         if ( rc == 0 ) {
869
870                 if ( load ) {
871                         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
872                         bdb->bi_cache.c_cursize++;
873                         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
874                 }
875
876                 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
877
878                 /* If the LRU list has only one entry and this is it, it
879                  * doesn't need to be added again.
880                  */
881                 if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail &&
882                         bdb->bi_cache.c_lruhead == *eip ) {
883                         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
884                 } else {
885                         /* if entry is on LRU list, remove from old spot */
886                         if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) {
887                                 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
888                                 LRU_DELETE( &bdb->bi_cache, *eip );
889                                 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
890                         }
891                         /* lru_head_mutex is unlocked for us */
892                         bdb_cache_lru_add( bdb, *eip );
893                 }
894         }
895
896 #ifdef SLAP_ZONE_ALLOC
897         if (rc == 0 && (*eip)->bei_e) {
898                 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
899         }
900         slap_zh_runlock(bdb->bi_cache.c_zctx);
901 #endif
902         return rc;
903 }
904
905 int
906 bdb_cache_children(
907         Operation *op,
908         DB_TXN *txn,
909         Entry *e )
910 {
911         int rc;
912
913         if ( BEI(e)->bei_kids ) {
914                 return 0;
915         }
916         if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
917                 return DB_NOTFOUND;
918         }
919         rc = bdb_dn2id_children( op, txn, e );
920         if ( rc == DB_NOTFOUND ) {
921                 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
922         }
923         return rc;
924 }
925
926 /* Update the cache after a successful database Add. */
927 int
928 bdb_cache_add(
929         struct bdb_info *bdb,
930         EntryInfo *eip,
931         Entry *e,
932         struct berval *nrdn,
933         u_int32_t locker )
934 {
935         EntryInfo *new, ei;
936         DB_LOCK lock;
937         int rc;
938 #ifdef BDB_HIER
939         struct berval rdn = e->e_name;
940 #endif
941
942         ei.bei_id = e->e_id;
943         ei.bei_parent = eip;
944         ei.bei_nrdn = *nrdn;
945         ei.bei_lockpad = 0;
946
947         /* Lock this entry so that bdb_add can run to completion.
948          * It can only fail if BDB has run out of lock resources.
949          */
950         rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock );
951         if ( rc ) {
952                 bdb_cache_entryinfo_unlock( eip );
953                 return rc;
954         }
955
956 #ifdef BDB_HIER
957         if ( nrdn->bv_len != e->e_nname.bv_len ) {
958                 char *ptr = ber_bvchr( &rdn, ',' );
959                 assert( ptr != NULL );
960                 rdn.bv_len = ptr - rdn.bv_val;
961         }
962         ber_dupbv( &ei.bei_rdn, &rdn );
963         if ( eip->bei_dkids ) eip->bei_dkids++;
964 #endif
965
966         rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
967         /* bdb_csn_commit can cause this when adding the database root entry */
968         if ( new->bei_e ) {
969                 new->bei_e->e_private = NULL;
970 #ifdef SLAP_ZONE_ALLOC
971                 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
972 #else
973                 bdb_entry_return( new->bei_e );
974 #endif
975         }
976         new->bei_e = e;
977         e->e_private = new;
978         new->bei_state = CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
979         eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
980         if (eip->bei_parent) {
981                 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
982         }
983         bdb_cache_entryinfo_unlock( eip );
984
985         ++bdb->bi_cache.c_cursize;
986         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
987
988         /* set lru mutex */
989         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
990
991         /* lru_head_mutex is unlocked for us */
992         bdb_cache_lru_add( bdb, new );
993
994         return rc;
995 }
996
997 int
998 bdb_cache_modify(
999         Entry *e,
1000         Attribute *newAttrs,
1001         DB_ENV *env,
1002         u_int32_t locker,
1003         DB_LOCK *lock )
1004 {
1005         EntryInfo *ei = BEI(e);
1006         int rc;
1007         /* Get write lock on data */
1008         rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1009
1010         /* If we've done repeated mods on a cached entry, then e_attrs
1011          * is no longer contiguous with the entry, and must be freed.
1012          */
1013         if ( ! rc ) {
1014                 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1015                         attrs_free( e->e_attrs ); 
1016                 }
1017                 e->e_attrs = newAttrs;
1018         }
1019         return rc;
1020 }
1021
1022 /*
1023  * Change the rdn in the entryinfo. Also move to a new parent if needed.
1024  */
1025 int
1026 bdb_cache_modrdn(
1027         struct bdb_info *bdb,
1028         Entry *e,
1029         struct berval *nrdn,
1030         Entry *new,
1031         EntryInfo *ein,
1032         u_int32_t locker,
1033         DB_LOCK *lock )
1034 {
1035         EntryInfo *ei = BEI(e), *pei;
1036         int rc;
1037 #ifdef BDB_HIER
1038         struct berval rdn;
1039 #endif
1040
1041         /* Get write lock on data */
1042         rc =  bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, ei, 1, 0, lock );
1043         if ( rc ) return rc;
1044
1045         /* If we've done repeated mods on a cached entry, then e_attrs
1046          * is no longer contiguous with the entry, and must be freed.
1047          */
1048         if ( (void *)e->e_attrs != (void *)(e+1) ) {
1049                 attrs_free( e->e_attrs );
1050         }
1051         e->e_attrs = new->e_attrs;
1052         if( e->e_nname.bv_val < e->e_bv.bv_val ||
1053                 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1054         {
1055                 ch_free(e->e_name.bv_val);
1056                 ch_free(e->e_nname.bv_val);
1057         }
1058         e->e_name = new->e_name;
1059         e->e_nname = new->e_nname;
1060
1061         /* Lock the parent's kids AVL tree */
1062         pei = ei->bei_parent;
1063         bdb_cache_entryinfo_lock( pei );
1064         avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1065         free( ei->bei_nrdn.bv_val );
1066         ber_dupbv( &ei->bei_nrdn, nrdn );
1067 #ifdef BDB_HIER
1068         free( ei->bei_rdn.bv_val );
1069
1070         rdn = e->e_name;
1071         if ( nrdn->bv_len != e->e_nname.bv_len ) {
1072                 char *ptr = ber_bvchr(&rdn, ',');
1073                 assert( ptr != NULL );
1074                 rdn.bv_len = ptr - rdn.bv_val;
1075         }
1076         ber_dupbv( &ei->bei_rdn, &rdn );
1077 #endif
1078
1079         if (!ein) {
1080                 ein = ei->bei_parent;
1081         } else {
1082                 ei->bei_parent = ein;
1083                 bdb_cache_entryinfo_unlock( pei );
1084                 bdb_cache_entryinfo_lock( ein );
1085         }
1086 #ifdef BDB_HIER
1087         {
1088                 /* Record the generation number of this change */
1089                 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1090                 bdb->bi_modrdns++;
1091                 ei->bei_modrdns = bdb->bi_modrdns;
1092                 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1093         }
1094 #endif
1095         avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1096         bdb_cache_entryinfo_unlock( ein );
1097         return rc;
1098 }
1099 /*
1100  * cache_delete - delete the entry e from the cache. 
1101  *
1102  * returns:     0       e was deleted ok
1103  *              1       e was not in the cache
1104  *              -1      something bad happened
1105  */
1106 int
1107 bdb_cache_delete(
1108     Cache       *cache,
1109     Entry               *e,
1110     DB_ENV      *env,
1111     u_int32_t   locker,
1112     DB_LOCK     *lock )
1113 {
1114         EntryInfo *ei = BEI(e);
1115         int     rc;
1116
1117         assert( e->e_private != NULL );
1118
1119         /* Set this early, warn off any queriers */
1120         ei->bei_state |= CACHE_ENTRY_DELETED;
1121
1122         /* Lock the entry's info */
1123         bdb_cache_entryinfo_lock( ei );
1124
1125         /* Get write lock on the data */
1126         rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1127         if ( rc ) {
1128                 /* couldn't lock, undo and give up */
1129                 ei->bei_state ^= CACHE_ENTRY_DELETED;
1130                 bdb_cache_entryinfo_unlock( ei );
1131                 return rc;
1132         }
1133
1134         Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1135                 e->e_id, 0, 0 );
1136
1137         /* set lru mutex */
1138         ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1139
1140         /* set cache write lock */
1141         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1142
1143         rc = bdb_cache_delete_internal( cache, e->e_private, 1 );
1144
1145         /* free cache write lock */
1146         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1147
1148         /* free lru mutex */
1149         ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1150
1151         /* Leave entry info locked */
1152
1153         return( rc );
1154 }
1155
1156 void
1157 bdb_cache_delete_cleanup(
1158         Cache *cache,
1159         EntryInfo *ei )
1160 {
1161         if ( ei->bei_e ) {
1162                 ei->bei_e->e_private = NULL;
1163 #ifdef SLAP_ZONE_ALLOC
1164                 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1165 #else
1166                 bdb_entry_return( ei->bei_e );
1167 #endif
1168                 ei->bei_e = NULL;
1169         }
1170
1171         free( ei->bei_nrdn.bv_val );
1172         ei->bei_nrdn.bv_val = NULL;
1173 #ifdef BDB_HIER
1174         free( ei->bei_rdn.bv_val );
1175         ei->bei_rdn.bv_val = NULL;
1176         ei->bei_modrdns = 0;
1177         ei->bei_ckids = 0;
1178         ei->bei_dkids = 0;
1179 #endif
1180         ei->bei_parent = NULL;
1181         ei->bei_kids = NULL;
1182         ei->bei_lruprev = NULL;
1183
1184         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1185         ei->bei_lrunext = cache->c_eifree;
1186         cache->c_eifree = ei;
1187         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1188         bdb_cache_entryinfo_unlock( ei );
1189 }
1190
1191 static int
1192 bdb_cache_delete_internal(
1193     Cache       *cache,
1194     EntryInfo           *e,
1195     int         decr )
1196 {
1197         int rc = 0;     /* return code */
1198
1199         /* Lock the parent's kids tree */
1200         bdb_cache_entryinfo_lock( e->bei_parent );
1201
1202 #ifdef BDB_HIER
1203         e->bei_parent->bei_ckids--;
1204         if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1205 #endif
1206         /* dn tree */
1207         if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1208                 == NULL )
1209         {
1210                 rc = -1;
1211         }
1212         if ( e->bei_parent->bei_kids )
1213                 cache->c_leaves--;
1214
1215         /* id tree */
1216         if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) {
1217                 rc = -1;
1218         }
1219
1220         if ( rc == 0 ){
1221                 cache->c_eiused--;
1222
1223                 /* lru */
1224                 LRU_DELETE( cache, e );
1225                 if ( e->bei_e ) cache->c_cursize--;
1226         }
1227
1228         bdb_cache_entryinfo_unlock( e->bei_parent );
1229
1230         return( rc );
1231 }
1232
1233 static void
1234 bdb_entryinfo_release( void *data )
1235 {
1236         EntryInfo *ei = (EntryInfo *)data;
1237         if ( ei->bei_kids ) {
1238                 avl_free( ei->bei_kids, NULL );
1239         }
1240         if ( ei->bei_e ) {
1241                 ei->bei_e->e_private = NULL;
1242 #ifdef SLAP_ZONE_ALLOC
1243                 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1244 #else
1245                 bdb_entry_return( ei->bei_e );
1246 #endif
1247         }
1248         bdb_cache_entryinfo_destroy( ei );
1249 }
1250
1251 void
1252 bdb_cache_release_all( Cache *cache )
1253 {
1254         /* set cache write lock */
1255         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1256         /* set lru mutex */
1257         ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1258
1259         Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1260
1261         avl_free( cache->c_dntree.bei_kids, NULL );
1262         avl_free( cache->c_idtree, bdb_entryinfo_release );
1263         for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1264                 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1265                 bdb_cache_entryinfo_destroy(cache->c_eifree);
1266         }
1267         cache->c_cursize = 0;
1268         cache->c_eiused = 0;
1269         cache->c_leaves = 0;
1270         cache->c_idtree = NULL;
1271         cache->c_lruhead = NULL;
1272         cache->c_lrutail = NULL;
1273         cache->c_dntree.bei_kids = NULL;
1274
1275         /* free lru mutex */
1276         ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1277         /* free cache write lock */
1278         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1279 }
1280
1281 #ifdef LDAP_DEBUG
1282 #ifdef SLAPD_UNUSED
1283 static void
1284 bdb_lru_print( Cache *cache )
1285 {
1286         EntryInfo       *e;
1287
1288         fprintf( stderr, "LRU queue (head to tail):\n" );
1289         for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
1290                 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1291                         e->bei_nrdn.bv_val, e->bei_id );
1292         }
1293         fprintf( stderr, "LRU queue (tail to head):\n" );
1294         for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
1295                 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1296                         e->bei_nrdn.bv_val, e->bei_id );
1297         }
1298 }
1299 #endif
1300 #endif
1301
1302 #ifdef BDB_REUSE_LOCKERS
1303 static void
1304 bdb_locker_id_free( void *key, void *data )
1305 {
1306         DB_ENV *env = key;
1307         u_int32_t lockid = (long)data;
1308         int rc;
1309
1310         rc = XLOCK_ID_FREE( env, lockid );
1311         if ( rc == EINVAL ) {
1312                 DB_LOCKREQ lr;
1313                 Debug( LDAP_DEBUG_ANY,
1314                         "bdb_locker_id_free: %lu err %s(%d)\n",
1315                         (unsigned long) lockid, db_strerror(rc), rc );
1316                 /* release all locks held by this locker. */
1317                 lr.op = DB_LOCK_PUT_ALL;
1318                 lr.obj = NULL;
1319                 env->lock_vec( env, lockid, 0, &lr, 1, NULL );
1320                 XLOCK_ID_FREE( env, lockid );
1321         }
1322 }
1323
1324 int
1325 bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
1326 {
1327         int i, rc;
1328         u_int32_t lockid;
1329         void *data;
1330         void *ctx;
1331
1332         if ( !env || !locker ) return -1;
1333
1334         /* If no op was provided, try to find the ctx anyway... */
1335         if ( op ) {
1336                 ctx = op->o_threadctx;
1337         } else {
1338                 ctx = ldap_pvt_thread_pool_context();
1339         }
1340
1341         /* Shouldn't happen unless we're single-threaded */
1342         if ( !ctx ) {
1343                 *locker = 0;
1344                 return 0;
1345         }
1346
1347         if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1348                 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1349                         rc = XLOCK_ID( env, &lockid );
1350                         if (rc) ldap_pvt_thread_yield();
1351                 }
1352                 if ( rc != 0) {
1353                         return rc;
1354                 }
1355                 data = (void *)((long)lockid);
1356                 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1357                         data, bdb_locker_id_free ) ) ) {
1358                         XLOCK_ID_FREE( env, lockid );
1359                         Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
1360                                 db_strerror(rc), rc, 0 );
1361
1362                         return rc;
1363                 }
1364         } else {
1365                 lockid = (long)data;
1366         }
1367         *locker = lockid;
1368         return 0;
1369 }
1370 #endif /* BDB_REUSE_LOCKERS */
1371
1372 void
1373 bdb_cache_delete_entry(
1374         struct bdb_info *bdb,
1375         EntryInfo *ei,
1376         u_int32_t locker,
1377         DB_LOCK *lock )
1378 {
1379         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
1380         if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, ei, 1, 1, lock ) == 0 )
1381         {
1382                 if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) {
1383                         LRU_DELETE( &bdb->bi_cache, ei );
1384                         ei->bei_e->e_private = NULL;
1385 #ifdef SLAP_ZONE_ALLOC
1386                         bdb_entry_return( bdb, ei->bei_e, ei->bei_zseq );
1387 #else
1388                         bdb_entry_return( ei->bei_e );
1389 #endif
1390                         ei->bei_e = NULL;
1391                         --bdb->bi_cache.c_cursize;
1392                 }
1393                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
1394         }
1395         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
1396 }