]> git.sur5r.net Git - openldap/blob - servers/slapd/back-bdb/cache.c
use slab memory for proxyauthz
[openldap] / servers / slapd / back-bdb / cache.c
1 /* cache.c - routines to maintain an in-core cache of entries */
2 /* $OpenLDAP$ */
3 /* This work is part of OpenLDAP Software <http://www.openldap.org/>.
4  *
5  * Copyright 2000-2006 The OpenLDAP Foundation.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted only as authorized by the OpenLDAP
10  * Public License.
11  *
12  * A copy of this license is available in the file LICENSE in the
13  * top-level directory of the distribution or, alternatively, at
14  * <http://www.OpenLDAP.org/license.html>.
15  */
16
17 #include "portable.h"
18
19 #include <stdio.h>
20
21 #include <ac/errno.h>
22 #include <ac/string.h>
23 #include <ac/socket.h>
24
25 #include "slap.h"
26
27 #include "back-bdb.h"
28
29 #include "ldap_rq.h"
30
31 #ifdef BDB_HIER
32 #define bdb_cache_lru_add       hdb_cache_lru_add
33 #endif
34 static void bdb_cache_lru_add( struct bdb_info *bdb, EntryInfo *ei );
35
36 static int      bdb_cache_delete_internal(Cache *cache, EntryInfo *e, int decr);
37 #ifdef LDAP_DEBUG
38 #ifdef SLAPD_UNUSED
39 static void     bdb_lru_print(Cache *cache);
40 #endif
41 #endif
42
43 static EntryInfo *
44 bdb_cache_entryinfo_new( Cache *cache )
45 {
46         EntryInfo *ei = NULL;
47
48         if ( cache->c_eifree ) {
49                 ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
50                 if ( cache->c_eifree ) {
51                         ei = cache->c_eifree;
52                         cache->c_eifree = ei->bei_lrunext;
53                 }
54                 ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
55         }
56         if ( ei ) {
57                 ei->bei_lrunext = NULL;
58                 ei->bei_state = 0;
59         } else {
60                 ei = ch_calloc(1, sizeof(struct bdb_entry_info));
61                 ldap_pvt_thread_mutex_init( &ei->bei_kids_mutex );
62         }
63
64         return ei;
65 }
66
67 /* Atomically release and reacquire a lock */
68 int
69 bdb_cache_entry_db_relock(
70         DB_ENV *env,
71         u_int32_t locker,
72         EntryInfo *ei,
73         int rw,
74         int tryOnly,
75         DB_LOCK *lock )
76 {
77 #ifdef NO_THREADS
78         return 0;
79 #else
80         int     rc;
81         DBT     lockobj;
82         DB_LOCKREQ list[2];
83
84         if ( !lock ) return 0;
85
86         lockobj.data = &ei->bei_id;
87         lockobj.size = sizeof(ei->bei_id) + 1;
88
89         list[0].op = DB_LOCK_PUT;
90         list[0].lock = *lock;
91         list[1].op = DB_LOCK_GET;
92         list[1].lock = *lock;
93         list[1].mode = rw ? DB_LOCK_WRITE : DB_LOCK_READ;
94         list[1].obj = &lockobj;
95         rc = env->lock_vec(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
96                 list, 2, NULL );
97
98         if (rc && !tryOnly) {
99                 Debug( LDAP_DEBUG_TRACE,
100                         "bdb_cache_entry_db_relock: entry %ld, rw %d, rc %d\n",
101                         ei->bei_id, rw, rc );
102         } else {
103                 *lock = list[1].lock;
104         }
105         return rc;
106 #endif
107 }
108
109 static int
110 bdb_cache_entry_db_lock( DB_ENV *env, u_int32_t locker, EntryInfo *ei,
111         int rw, int tryOnly, DB_LOCK *lock )
112 {
113 #ifdef NO_THREADS
114         return 0;
115 #else
116         int       rc;
117         DBT       lockobj;
118         int       db_rw;
119
120         if ( !lock ) return 0;
121
122         if (rw)
123                 db_rw = DB_LOCK_WRITE;
124         else
125                 db_rw = DB_LOCK_READ;
126
127         lockobj.data = &ei->bei_id;
128         lockobj.size = sizeof(ei->bei_id) + 1;
129
130         rc = LOCK_GET(env, locker, tryOnly ? DB_LOCK_NOWAIT : 0,
131                                         &lockobj, db_rw, lock);
132         if (rc && !tryOnly) {
133                 Debug( LDAP_DEBUG_TRACE,
134                         "bdb_cache_entry_db_lock: entry %ld, rw %d, rc %d\n",
135                         ei->bei_id, rw, rc );
136         }
137         return rc;
138 #endif /* NO_THREADS */
139 }
140
141 int
142 bdb_cache_entry_db_unlock ( DB_ENV *env, DB_LOCK *lock )
143 {
144 #ifdef NO_THREADS
145         return 0;
146 #else
147         int rc;
148
149         if ( !lock ) return 0;
150
151         rc = LOCK_PUT ( env, lock );
152         return rc;
153 #endif
154 }
155
156 static int
157 bdb_cache_entryinfo_destroy( EntryInfo *e )
158 {
159         ldap_pvt_thread_mutex_destroy( &e->bei_kids_mutex );
160         free( e->bei_nrdn.bv_val );
161 #ifdef BDB_HIER
162         free( e->bei_rdn.bv_val );
163 #endif
164         free( e );
165         return 0;
166 }
167
168 #define LRU_DELETE( cache, ei ) do { \
169         if ( (ei)->bei_lruprev != NULL ) { \
170                 (ei)->bei_lruprev->bei_lrunext = (ei)->bei_lrunext; \
171         } else { \
172                 (cache)->c_lruhead = (ei)->bei_lrunext; \
173         } \
174         if ( (ei)->bei_lrunext != NULL ) { \
175                 (ei)->bei_lrunext->bei_lruprev = (ei)->bei_lruprev; \
176         } else { \
177                 (cache)->c_lrutail = (ei)->bei_lruprev; \
178         } \
179         (ei)->bei_lrunext = (ei)->bei_lruprev = NULL; \
180 } while(0)
181
182 #define LRU_ADD( cache, ei ) do { \
183         (ei)->bei_lrunext = (cache)->c_lruhead; \
184         if ( (ei)->bei_lrunext != NULL ) { \
185                 (ei)->bei_lrunext->bei_lruprev = (ei); \
186         } \
187         (cache)->c_lruhead = (ei); \
188         (ei)->bei_lruprev = NULL; \
189         if ( (cache)->c_lrutail == NULL ) { \
190                 (cache)->c_lrutail = (ei); \
191         } \
192 } while(0)
193
194 /* Do a length-ordered sort on normalized RDNs */
195 static int
196 bdb_rdn_cmp( const void *v_e1, const void *v_e2 )
197 {
198         const EntryInfo *e1 = v_e1, *e2 = v_e2;
199         int rc = e1->bei_nrdn.bv_len - e2->bei_nrdn.bv_len;
200         if (rc == 0) {
201                 rc = strncmp( e1->bei_nrdn.bv_val, e2->bei_nrdn.bv_val,
202                         e1->bei_nrdn.bv_len );
203         }
204         return rc;
205 }
206
207 static int
208 bdb_id_cmp( const void *v_e1, const void *v_e2 )
209 {
210         const EntryInfo *e1 = v_e1, *e2 = v_e2;
211         return e1->bei_id - e2->bei_id;
212 }
213
214 /* Create an entryinfo in the cache. Caller must release the locks later.
215  */
216 static int
217 bdb_entryinfo_add_internal(
218         struct bdb_info *bdb,
219         EntryInfo *ei,
220         EntryInfo **res )
221 {
222         EntryInfo *ei2 = NULL;
223
224         *res = NULL;
225
226         ei2 = bdb_cache_entryinfo_new( &bdb->bi_cache );
227
228         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
229         bdb_cache_entryinfo_lock( ei->bei_parent );
230
231         ei2->bei_id = ei->bei_id;
232         ei2->bei_parent = ei->bei_parent;
233 #ifdef BDB_HIER
234         ei2->bei_rdn = ei->bei_rdn;
235 #endif
236 #ifdef SLAP_ZONE_ALLOC
237         ei2->bei_bdb = bdb;
238 #endif
239
240         /* Add to cache ID tree */
241         if (avl_insert( &bdb->bi_cache.c_idtree, ei2, bdb_id_cmp, avl_dup_error )) {
242                 EntryInfo *eix;
243                 eix = avl_find( bdb->bi_cache.c_idtree, ei2, bdb_id_cmp );
244                 bdb_cache_entryinfo_destroy( ei2 );
245                 ei2 = eix;
246 #ifdef BDB_HIER
247                 /* It got freed above because its value was
248                  * assigned to ei2.
249                  */
250                 ei->bei_rdn.bv_val = NULL;
251 #endif
252         } else {
253                 bdb->bi_cache.c_eiused++;
254                 ber_dupbv( &ei2->bei_nrdn, &ei->bei_nrdn );
255
256                 /* This is a new leaf node. But if parent had no kids, then it was
257                  * a leaf and we would be decrementing that. So, only increment if
258                  * the parent already has kids.
259                  */
260                 if ( ei->bei_parent->bei_kids || !ei->bei_parent->bei_id )
261                         bdb->bi_cache.c_leaves++;
262                 avl_insert( &ei->bei_parent->bei_kids, ei2, bdb_rdn_cmp,
263                         avl_dup_error );
264 #ifdef BDB_HIER
265                 ei->bei_parent->bei_ckids++;
266 #endif
267         }
268
269         *res = ei2;
270         return 0;
271 }
272
273 /* Find the EntryInfo for the requested DN. If the DN cannot be found, return
274  * the info for its closest ancestor. *res should be NULL to process a
275  * complete DN starting from the tree root. Otherwise *res must be the
276  * immediate parent of the requested DN, and only the RDN will be searched.
277  * The EntryInfo is locked upon return and must be unlocked by the caller.
278  */
279 int
280 bdb_cache_find_ndn(
281         Operation       *op,
282         DB_TXN          *txn,
283         struct berval   *ndn,
284         EntryInfo       **res )
285 {
286         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
287         EntryInfo       ei, *eip, *ei2;
288         int rc = 0;
289         char *ptr;
290
291         /* this function is always called with normalized DN */
292         if ( *res ) {
293                 /* we're doing a onelevel search for an RDN */
294                 ei.bei_nrdn.bv_val = ndn->bv_val;
295                 ei.bei_nrdn.bv_len = dn_rdnlen( op->o_bd, ndn );
296                 eip = *res;
297         } else {
298                 /* we're searching a full DN from the root */
299                 ptr = ndn->bv_val + ndn->bv_len - op->o_bd->be_nsuffix[0].bv_len;
300                 ei.bei_nrdn.bv_val = ptr;
301                 ei.bei_nrdn.bv_len = op->o_bd->be_nsuffix[0].bv_len;
302                 /* Skip to next rdn if suffix is empty */
303                 if ( ei.bei_nrdn.bv_len == 0 ) {
304                         for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
305                                 && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
306                         if ( ptr >= ndn->bv_val ) {
307                                 if (DN_SEPARATOR(*ptr)) ptr++;
308                                 ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr;
309                                 ei.bei_nrdn.bv_val = ptr;
310                         }
311                 }
312                 eip = &bdb->bi_cache.c_dntree;
313         }
314         
315         for ( bdb_cache_entryinfo_lock( eip ); eip; ) {
316                 ei.bei_parent = eip;
317                 ei2 = (EntryInfo *)avl_find( eip->bei_kids, &ei, bdb_rdn_cmp );
318                 if ( !ei2 ) {
319                         int len = ei.bei_nrdn.bv_len;
320                                 
321                         if ( BER_BVISEMPTY( ndn )) {
322                                 *res = eip;
323                                 return LDAP_SUCCESS;
324                         }
325
326                         ei.bei_nrdn.bv_len = ndn->bv_len -
327                                 (ei.bei_nrdn.bv_val - ndn->bv_val);
328                         bdb_cache_entryinfo_unlock( eip );
329
330                         rc = bdb_dn2id( op, txn, &ei.bei_nrdn, &ei );
331                         if (rc) {
332                                 bdb_cache_entryinfo_lock( eip );
333                                 *res = eip;
334                                 return rc;
335                         }
336
337                         /* DN exists but needs to be added to cache */
338                         ei.bei_nrdn.bv_len = len;
339                         rc = bdb_entryinfo_add_internal( bdb, &ei, &ei2 );
340                         /* add_internal left eip and c_rwlock locked */
341                         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
342                         if ( rc ) {
343                                 *res = eip;
344                                 return rc;
345                         }
346                 } else if ( ei2->bei_state & CACHE_ENTRY_DELETED ) {
347                         /* In the midst of deleting? Give it a chance to
348                          * complete.
349                          */
350                         bdb_cache_entryinfo_unlock( eip );
351                         ldap_pvt_thread_yield();
352                         bdb_cache_entryinfo_lock( eip );
353                         *res = eip;
354                         return DB_NOTFOUND;
355                 }
356                 bdb_cache_entryinfo_unlock( eip );
357                 bdb_cache_entryinfo_lock( ei2 );
358
359                 eip = ei2;
360
361                 /* Advance to next lower RDN */
362                 for (ptr = ei.bei_nrdn.bv_val - 2; ptr > ndn->bv_val
363                         && !DN_SEPARATOR(*ptr); ptr--) /* empty */;
364                 if ( ptr >= ndn->bv_val ) {
365                         if (DN_SEPARATOR(*ptr)) ptr++;
366                         ei.bei_nrdn.bv_len = ei.bei_nrdn.bv_val - ptr - 1;
367                         ei.bei_nrdn.bv_val = ptr;
368                 }
369                 if ( ptr < ndn->bv_val ) {
370                         *res = eip;
371                         break;
372                 }
373         }
374
375         return rc;
376 }
377
378 #ifdef BDB_HIER
379 /* Walk up the tree from a child node, looking for an ID that's already
380  * been linked into the cache.
381  */
382 int
383 hdb_cache_find_parent(
384         Operation *op,
385         DB_TXN *txn,
386         u_int32_t       locker,
387         ID id,
388         EntryInfo **res )
389 {
390         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
391         EntryInfo ei, eip, *ei2 = NULL, *ein = NULL, *eir = NULL;
392         int rc;
393         int addlru = 0;
394
395         ei.bei_id = id;
396         ei.bei_kids = NULL;
397         ei.bei_ckids = 0;
398
399         for (;;) {
400                 rc = hdb_dn2id_parent( op, txn, locker, &ei, &eip.bei_id );
401                 if ( rc ) break;
402
403                 /* Save the previous node, if any */
404                 ei2 = ein;
405
406                 /* Create a new node for the current ID */
407                 ein = bdb_cache_entryinfo_new( &bdb->bi_cache );
408                 ein->bei_id = ei.bei_id;
409                 ein->bei_kids = ei.bei_kids;
410                 ein->bei_nrdn = ei.bei_nrdn;
411                 ein->bei_rdn = ei.bei_rdn;
412                 ein->bei_ckids = ei.bei_ckids;
413 #ifdef SLAP_ZONE_ALLOC
414                 ein->bei_bdb = bdb;
415 #endif
416                 ei.bei_ckids = 0;
417                 
418                 /* This node is not fully connected yet */
419                 ein->bei_state = CACHE_ENTRY_NOT_LINKED;
420
421                 /* Insert this node into the ID tree */
422                 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
423                 if ( avl_insert( &bdb->bi_cache.c_idtree, (caddr_t)ein,
424                         bdb_id_cmp, avl_dup_error ) ) {
425
426                         /* Someone else created this node just before us.
427                          * Free our new copy and use the existing one.
428                          */
429                         bdb_cache_entryinfo_destroy( ein );
430                         ein = (EntryInfo *)avl_find( bdb->bi_cache.c_idtree,
431                                 (caddr_t) &ei, bdb_id_cmp );
432                         
433                         /* Link in any kids we've already processed */
434                         if ( ei2 ) {
435                                 bdb_cache_entryinfo_lock( ein );
436                                 avl_insert( &ein->bei_kids, (caddr_t)ei2,
437                                         bdb_rdn_cmp, avl_dup_error );
438                                 ein->bei_ckids++;
439                                 bdb_cache_entryinfo_unlock( ein );
440                         }
441                         addlru = 0;
442
443                 }
444
445                 /* If this is the first time, save this node
446                  * to be returned later.
447                  */
448                 if ( eir == NULL ) eir = ein;
449
450                 /* If there was a previous node, link it to this one */
451                 if ( ei2 ) ei2->bei_parent = ein;
452
453                 /* Look for this node's parent */
454                 if ( eip.bei_id ) {
455                         ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
456                                         (caddr_t) &eip, bdb_id_cmp );
457                 } else {
458                         ei2 = &bdb->bi_cache.c_dntree;
459                 }
460                 bdb->bi_cache.c_eiused++;
461                 if ( ei2 && ( ei2->bei_kids || !ei2->bei_id ))
462                                 bdb->bi_cache.c_leaves++;
463                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
464
465                 if ( addlru ) {
466                         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
467                         bdb_cache_lru_add( bdb, ein );
468                 }
469                 addlru = 1;
470
471                 /* Got the parent, link in and we're done. */
472                 if ( ei2 ) {
473                         bdb_cache_entryinfo_lock( ei2 );
474                         ein->bei_parent = ei2;
475                         avl_insert( &ei2->bei_kids, (caddr_t)ein, bdb_rdn_cmp,
476                                 avl_dup_error);
477                         ei2->bei_ckids++;
478                         bdb_cache_entryinfo_unlock( ei2 );
479                         bdb_cache_entryinfo_lock( eir );
480
481                         /* Reset all the state info */
482                         for (ein = eir; ein != ei2; ein=ein->bei_parent)
483                                 ein->bei_state &= ~CACHE_ENTRY_NOT_LINKED;
484                         *res = eir;
485                         break;
486                 }
487                 ei.bei_kids = NULL;
488                 ei.bei_id = eip.bei_id;
489                 ei.bei_ckids = 1;
490                 avl_insert( &ei.bei_kids, (caddr_t)ein, bdb_rdn_cmp,
491                         avl_dup_error );
492         }
493         return rc;
494 }
495
496 /* Used by hdb_dn2idl when loading the EntryInfo for all the children
497  * of a given node
498  */
499 int hdb_cache_load(
500         struct bdb_info *bdb,
501         EntryInfo *ei,
502         EntryInfo **res )
503 {
504         EntryInfo *ei2;
505         int rc;
506
507         /* See if we already have this one */
508         bdb_cache_entryinfo_lock( ei->bei_parent );
509         ei2 = (EntryInfo *)avl_find( ei->bei_parent->bei_kids, ei, bdb_rdn_cmp );
510         bdb_cache_entryinfo_unlock( ei->bei_parent );
511
512         if ( !ei2 ) {
513                 /* Not found, add it */
514                 struct berval bv;
515
516                 /* bei_rdn was not malloc'd before, do it now */
517                 ber_dupbv( &bv, &ei->bei_rdn );
518                 ei->bei_rdn = bv;
519
520                 rc = bdb_entryinfo_add_internal( bdb, ei, res );
521                 bdb_cache_entryinfo_unlock( ei->bei_parent );
522                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
523         } else {
524                 /* Found, return it */
525                 *res = ei2;
526                 return 0;
527         }
528         return rc;
529 }
530 #endif
531
532 static void *
533 bdb_cache_lru_purge(void *ctx, void *arg)
534 {
535         struct re_s *rtask = arg;
536         struct bdb_info *bdb = rtask->arg;
537         DB_LOCK         lock, *lockp;
538         EntryInfo *elru, *elprev;
539         int count = 0;
540
541         if ( bdb->bi_cache.c_locker ) {
542                 lockp = &lock;
543         } else {
544                 lockp = NULL;
545         }
546
547         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
548
549         /* Look for an unused entry to remove */
550         for (elru = bdb->bi_cache.c_lrutail; elru; elru = elprev ) {
551                 elprev = elru->bei_lruprev;
552
553                 /* If we can successfully writelock it, then
554                  * the object is idle.
555                  */
556                 if ( bdb_cache_entry_db_lock( bdb->bi_dbenv,
557                                 bdb->bi_cache.c_locker, elru, 1, 1, lockp ) == 0 ) {
558
559                         int stop = 0;
560
561                         /* If this node is in the process of linking into the cache,
562                          * or this node is being deleted, skip it.
563                          */
564                         if ( elru->bei_state &
565                                 ( CACHE_ENTRY_NOT_LINKED | CACHE_ENTRY_DELETED )) {
566                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
567                                 continue;
568                         }
569                         /* Free entry for this node if it's present */
570                         if ( elru->bei_e ) {
571                                 elru->bei_e->e_private = NULL;
572 #ifdef SLAP_ZONE_ALLOC
573                                 bdb_entry_return( bdb, elru->bei_e, elru->bei_zseq );
574 #else
575                                 bdb_entry_return( elru->bei_e );
576 #endif
577                                 elru->bei_e = NULL;
578                                 count++;
579                         }
580                         /* ITS#4010 if we're in slapcat, and this node is a leaf
581                          * node, free it.
582                          *
583                          * FIXME: we need to do this for slapd as well, (which is
584                          * why we compute bi_cache.c_leaves now) but at the moment
585                          * we can't because it causes unresolvable deadlocks. 
586                          */
587                         if ( slapMode & SLAP_TOOL_READONLY ) {
588                                 if ( !elru->bei_kids ) {
589                                         /* This does LRU_DELETE for us */
590                                         bdb_cache_delete_internal( &bdb->bi_cache, elru, 0 );
591                                         bdb_cache_delete_cleanup( &bdb->bi_cache, elru );
592                                 }
593                                 /* Leave node on LRU list for a future pass */
594                         } else {
595                                 LRU_DELETE( &bdb->bi_cache, elru );
596                         }
597                         bdb_cache_entry_db_unlock( bdb->bi_dbenv, lockp );
598
599                         if ( count == bdb->bi_cache.c_minfree ) {
600                                 ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
601                                 bdb->bi_cache.c_cursize -= bdb->bi_cache.c_minfree;
602                                 if ( bdb->bi_cache.c_maxsize - bdb->bi_cache.c_cursize >=
603                                         bdb->bi_cache.c_minfree )
604                                         stop = 1;
605                                 count = 0;
606                                 ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
607                         }
608                         if (stop) break;
609                 }
610         }
611
612         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
613
614         /* If we're running as a task, drop the task */
615         if ( ctx ) {
616                 ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
617                 ldap_pvt_runqueue_stoptask( &slapd_rq, rtask );
618                 /* Defer processing till we're needed again */
619                 ldap_pvt_runqueue_resched( &slapd_rq, rtask, 1 );
620                 ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
621         }
622
623         return NULL;
624 }
625
626 /* caller must have lru_head_mutex locked. mutex
627  * will be unlocked on return.
628  */
629 static void
630 bdb_cache_lru_add(
631         struct bdb_info *bdb,
632         EntryInfo *ei )
633 {
634         LRU_ADD( &bdb->bi_cache, ei );
635         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
636
637         /* See if we're above the cache size limit */
638         if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
639                 if ( slapMode & SLAP_TOOL_MODE ) {
640                         struct re_s rtask;
641
642                         rtask.arg = bdb;
643                         bdb_cache_lru_purge( NULL, &rtask );
644                 } else {
645                         ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
646                         if ( bdb->bi_cache_task ) {
647                                 if ( !ldap_pvt_runqueue_isrunning( &slapd_rq,
648                                         bdb->bi_cache_task )) {
649                                         /* We want it to start right now */
650                                         bdb->bi_cache_task->interval.tv_sec = 0;
651                                         ldap_pvt_runqueue_resched( &slapd_rq, bdb->bi_cache_task,
652                                                 0 );
653                                         /* But don't try to reschedule it while it's running */
654                                         bdb->bi_cache_task->interval.tv_sec = 3600;
655                                 }
656                         } else {
657                                 bdb->bi_cache_task = ldap_pvt_runqueue_insert( &slapd_rq, 3600,
658                                         bdb_cache_lru_purge, bdb, "bdb_cache_lru_purge",
659                                         bdb->bi_dbenv_home );
660                         }
661                         ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
662                 }
663         }
664 }
665
666 EntryInfo *
667 bdb_cache_find_info(
668         struct bdb_info *bdb,
669         ID id )
670 {
671         EntryInfo       ei = { 0 },
672                         *ei2;
673
674         ei.bei_id = id;
675
676         ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
677         ei2 = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
678                                         (caddr_t) &ei, bdb_id_cmp );
679         ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
680         return ei2;
681 }
682
683 /*
684  * cache_find_id - find an entry in the cache, given id.
685  * The entry is locked for Read upon return. Call with islocked TRUE if
686  * the supplied *eip was already locked.
687  */
688
689 int
690 bdb_cache_find_id(
691         Operation *op,
692         DB_TXN  *tid,
693         ID                              id,
694         EntryInfo       **eip,
695         int             islocked,
696         u_int32_t       locker,
697         DB_LOCK         *lock )
698 {
699         struct bdb_info *bdb = (struct bdb_info *) op->o_bd->be_private;
700         Entry   *ep = NULL;
701         int     rc = 0, load = 0;
702         EntryInfo ei = { 0 };
703
704         ei.bei_id = id;
705
706 #ifdef SLAP_ZONE_ALLOC
707         slap_zh_rlock(bdb->bi_cache.c_zctx);
708 #endif
709         /* If we weren't given any info, see if we have it already cached */
710         if ( !*eip ) {
711 again:  ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
712                 *eip = (EntryInfo *) avl_find( bdb->bi_cache.c_idtree,
713                         (caddr_t) &ei, bdb_id_cmp );
714                 if ( *eip ) {
715                         /* If the lock attempt fails, the info is in use */
716                         if ( ldap_pvt_thread_mutex_trylock(
717                                         &(*eip)->bei_kids_mutex )) {
718                                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
719                                 /* If this node is being deleted, treat
720                                  * as if the delete has already finished
721                                  */
722                                 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
723                                         return DB_NOTFOUND;
724                                 }
725                                 /* otherwise, wait for the info to free up */
726                                 ldap_pvt_thread_yield();
727                                 goto again;
728                         }
729                         /* If this info isn't hooked up to its parent yet,
730                          * unlock and wait for it to be fully initialized
731                          */
732                         if ( (*eip)->bei_state & CACHE_ENTRY_NOT_LINKED ) {
733                                 bdb_cache_entryinfo_unlock( *eip );
734                                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
735                                 ldap_pvt_thread_yield();
736                                 goto again;
737                         }
738                         islocked = 1;
739                 }
740                 ldap_pvt_thread_rdwr_runlock( &bdb->bi_cache.c_rwlock );
741         }
742
743         /* See if the ID exists in the database; add it to the cache if so */
744         if ( !*eip ) {
745 #ifndef BDB_HIER
746                 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
747                 if ( rc == 0 ) {
748                         rc = bdb_cache_find_ndn( op, tid,
749                                 &ep->e_nname, eip );
750                         if ( *eip ) islocked = 1;
751                         if ( rc ) {
752 #ifdef SLAP_ZONE_ALLOC
753                                 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
754 #else
755                                 bdb_entry_return( ep );
756 #endif
757                                 ep = NULL;
758                         }
759                 }
760 #else
761                 rc = hdb_cache_find_parent(op, tid, locker, id, eip );
762                 if ( rc == 0 && *eip ) islocked = 1;
763 #endif
764         }
765
766         /* Ok, we found the info, do we have the entry? */
767         if ( *eip && rc == 0 ) {
768                 if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
769                         rc = DB_NOTFOUND;
770                 } else {
771                         /* Make sure only one thread tries to load the entry */
772 load1:
773 #ifdef SLAP_ZONE_ALLOC
774                         if ((*eip)->bei_e && !slap_zn_validate(
775                                         bdb->bi_cache.c_zctx, (*eip)->bei_e, (*eip)->bei_zseq)) {
776                                 (*eip)->bei_e = NULL;
777                                 (*eip)->bei_zseq = 0;
778                         }
779 #endif
780                         if ( !(*eip)->bei_e && !((*eip)->bei_state & CACHE_ENTRY_LOADING)) {
781                                 load = 1;
782                                 (*eip)->bei_state |= CACHE_ENTRY_LOADING;
783                         }
784                         if ( islocked ) {
785                                 bdb_cache_entryinfo_unlock( *eip );
786                                 islocked = 0;
787                         }
788                         rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, *eip, 0, 0, lock );
789                         if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
790                                 rc = DB_NOTFOUND;
791                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
792                         } else if ( rc == 0 ) {
793                                 if ( load ) {
794                                         /* Give up original read lock, obtain write lock
795                                          */
796                                     if ( rc == 0 ) {
797                                                 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
798                                                         *eip, 1, 0, lock );
799                                         }
800                                         if ( rc == 0 && !ep) {
801                                                 rc = bdb_id2entry( op->o_bd, tid, locker, id, &ep );
802                                         }
803                                         if ( rc == 0 ) {
804                                                 ep->e_private = *eip;
805 #ifdef BDB_HIER
806                                                 bdb_fix_dn( ep, 0 );
807 #endif
808                                                 (*eip)->bei_e = ep;
809 #ifdef SLAP_ZONE_ALLOC
810                                                 (*eip)->bei_zseq = *((ber_len_t *)ep - 2);
811 #endif
812                                                 ep = NULL;
813                                         }
814                                         (*eip)->bei_state ^= CACHE_ENTRY_LOADING;
815                                         if ( rc == 0 ) {
816                                                 /* If we succeeded, downgrade back to a readlock. */
817                                                 rc = bdb_cache_entry_db_relock( bdb->bi_dbenv, locker,
818                                                         *eip, 0, 0, lock );
819                                         } else {
820                                                 /* Otherwise, release the lock. */
821                                                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
822                                         }
823                                 } else if ( !(*eip)->bei_e ) {
824                                         /* Some other thread is trying to load the entry,
825                                          * give it a chance to finish.
826                                          */
827                                         bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
828                                         ldap_pvt_thread_yield();
829                                         bdb_cache_entryinfo_lock( *eip );
830                                         islocked = 1;
831                                         goto load1;
832 #ifdef BDB_HIER
833                                 } else {
834                                         /* Check for subtree renames
835                                          */
836                                         rc = bdb_fix_dn( (*eip)->bei_e, 1 );
837                                         if ( rc ) {
838                                                 bdb_cache_entry_db_relock( bdb->bi_dbenv,
839                                                         locker, *eip, 1, 0, lock );
840                                                 /* check again in case other modifier did it already */
841                                                 if ( bdb_fix_dn( (*eip)->bei_e, 1 ) )
842                                                         rc = bdb_fix_dn( (*eip)->bei_e, 2 );
843                                                 bdb_cache_entry_db_relock( bdb->bi_dbenv,
844                                                         locker, *eip, 0, 0, lock );
845                                         }
846 #endif
847                                 }
848
849                         }
850                 }
851         }
852         if ( islocked ) {
853                 bdb_cache_entryinfo_unlock( *eip );
854         }
855         if ( ep ) {
856 #ifdef SLAP_ZONE_ALLOC
857                 bdb_entry_return( bdb, ep, (*eip)->bei_zseq );
858 #else
859                 bdb_entry_return( ep );
860 #endif
861         }
862         if ( rc == 0 ) {
863
864                 if ( load ) {
865                         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
866                         bdb->bi_cache.c_cursize++;
867                         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
868                 }
869
870                 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
871
872                 /* If the LRU list has only one entry and this is it, it
873                  * doesn't need to be added again.
874                  */
875                 if ( bdb->bi_cache.c_lruhead == bdb->bi_cache.c_lrutail &&
876                         bdb->bi_cache.c_lruhead == *eip ) {
877                         ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
878                 } else {
879                         /* if entry is on LRU list, remove from old spot */
880                         if ( (*eip)->bei_lrunext || (*eip)->bei_lruprev ) {
881                                 ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_tail_mutex );
882                                 LRU_DELETE( &bdb->bi_cache, *eip );
883                                 ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
884                         }
885                         /* lru_head_mutex is unlocked for us */
886                         bdb_cache_lru_add( bdb, *eip );
887                 }
888         }
889
890 #ifdef SLAP_ZONE_ALLOC
891         if (rc == 0 && (*eip)->bei_e) {
892                 slap_zn_rlock(bdb->bi_cache.c_zctx, (*eip)->bei_e);
893         }
894         slap_zh_runlock(bdb->bi_cache.c_zctx);
895 #endif
896         return rc;
897 }
898
899 int
900 bdb_cache_children(
901         Operation *op,
902         DB_TXN *txn,
903         Entry *e )
904 {
905         int rc;
906
907         if ( BEI(e)->bei_kids ) {
908                 return 0;
909         }
910         if ( BEI(e)->bei_state & CACHE_ENTRY_NO_KIDS ) {
911                 return DB_NOTFOUND;
912         }
913         rc = bdb_dn2id_children( op, txn, e );
914         if ( rc == DB_NOTFOUND ) {
915                 BEI(e)->bei_state |= CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
916         }
917         return rc;
918 }
919
920 /* Update the cache after a successful database Add. */
921 int
922 bdb_cache_add(
923         struct bdb_info *bdb,
924         EntryInfo *eip,
925         Entry *e,
926         struct berval *nrdn,
927         u_int32_t locker )
928 {
929         EntryInfo *new, ei;
930         DB_LOCK lock;
931         int rc;
932 #ifdef BDB_HIER
933         struct berval rdn = e->e_name;
934 #endif
935
936         ei.bei_id = e->e_id;
937         ei.bei_parent = eip;
938         ei.bei_nrdn = *nrdn;
939         ei.bei_lockpad = 0;
940
941         /* Lock this entry so that bdb_add can run to completion.
942          * It can only fail if BDB has run out of lock resources.
943          */
944         rc = bdb_cache_entry_db_lock( bdb->bi_dbenv, locker, &ei, 1, 0, &lock );
945         if ( rc ) {
946                 bdb_cache_entryinfo_unlock( eip );
947                 return rc;
948         }
949
950 #ifdef BDB_HIER
951         if ( nrdn->bv_len != e->e_nname.bv_len ) {
952                 char *ptr = ber_bvchr( &rdn, ',' );
953                 assert( ptr != NULL );
954                 rdn.bv_len = ptr - rdn.bv_val;
955         }
956         ber_dupbv( &ei.bei_rdn, &rdn );
957         if ( eip->bei_dkids ) eip->bei_dkids++;
958 #endif
959
960         rc = bdb_entryinfo_add_internal( bdb, &ei, &new );
961         /* bdb_csn_commit can cause this when adding the database root entry */
962         if ( new->bei_e ) {
963                 new->bei_e->e_private = NULL;
964 #ifdef SLAP_ZONE_ALLOC
965                 bdb_entry_return( bdb, new->bei_e, new->bei_zseq );
966 #else
967                 bdb_entry_return( new->bei_e );
968 #endif
969         }
970         new->bei_e = e;
971         e->e_private = new;
972         new->bei_state = CACHE_ENTRY_NO_KIDS | CACHE_ENTRY_NO_GRANDKIDS;
973         eip->bei_state &= ~CACHE_ENTRY_NO_KIDS;
974         if (eip->bei_parent) {
975                 eip->bei_parent->bei_state &= ~CACHE_ENTRY_NO_GRANDKIDS;
976         }
977         bdb_cache_entryinfo_unlock( eip );
978
979         ++bdb->bi_cache.c_cursize;
980         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
981
982         /* set lru mutex */
983         ldap_pvt_thread_mutex_lock( &bdb->bi_cache.lru_head_mutex );
984
985         /* lru_head_mutex is unlocked for us */
986         bdb_cache_lru_add( bdb, new );
987
988         return rc;
989 }
990
991 int
992 bdb_cache_modify(
993         Entry *e,
994         Attribute *newAttrs,
995         DB_ENV *env,
996         u_int32_t locker,
997         DB_LOCK *lock )
998 {
999         EntryInfo *ei = BEI(e);
1000         int rc;
1001         /* Get write lock on data */
1002         rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1003
1004         /* If we've done repeated mods on a cached entry, then e_attrs
1005          * is no longer contiguous with the entry, and must be freed.
1006          */
1007         if ( ! rc ) {
1008                 if ( (void *)e->e_attrs != (void *)(e+1) ) {
1009                         attrs_free( e->e_attrs ); 
1010                 }
1011                 e->e_attrs = newAttrs;
1012         }
1013         return rc;
1014 }
1015
1016 /*
1017  * Change the rdn in the entryinfo. Also move to a new parent if needed.
1018  */
1019 int
1020 bdb_cache_modrdn(
1021         struct bdb_info *bdb,
1022         Entry *e,
1023         struct berval *nrdn,
1024         Entry *new,
1025         EntryInfo *ein,
1026         u_int32_t locker,
1027         DB_LOCK *lock )
1028 {
1029         EntryInfo *ei = BEI(e), *pei;
1030         int rc;
1031 #ifdef BDB_HIER
1032         struct berval rdn;
1033 #endif
1034
1035         /* Get write lock on data */
1036         rc =  bdb_cache_entry_db_relock( bdb->bi_dbenv, locker, ei, 1, 0, lock );
1037         if ( rc ) return rc;
1038
1039         /* If we've done repeated mods on a cached entry, then e_attrs
1040          * is no longer contiguous with the entry, and must be freed.
1041          */
1042         if ( (void *)e->e_attrs != (void *)(e+1) ) {
1043                 attrs_free( e->e_attrs );
1044         }
1045         e->e_attrs = new->e_attrs;
1046         if( e->e_nname.bv_val < e->e_bv.bv_val ||
1047                 e->e_nname.bv_val > e->e_bv.bv_val + e->e_bv.bv_len )
1048         {
1049                 ch_free(e->e_name.bv_val);
1050                 ch_free(e->e_nname.bv_val);
1051         }
1052         e->e_name = new->e_name;
1053         e->e_nname = new->e_nname;
1054
1055         /* Lock the parent's kids AVL tree */
1056         pei = ei->bei_parent;
1057         bdb_cache_entryinfo_lock( pei );
1058         avl_delete( &pei->bei_kids, (caddr_t) ei, bdb_rdn_cmp );
1059         free( ei->bei_nrdn.bv_val );
1060         ber_dupbv( &ei->bei_nrdn, nrdn );
1061 #ifdef BDB_HIER
1062         free( ei->bei_rdn.bv_val );
1063
1064         rdn = e->e_name;
1065         if ( nrdn->bv_len != e->e_nname.bv_len ) {
1066                 char *ptr = ber_bvchr(&rdn, ',');
1067                 assert( ptr != NULL );
1068                 rdn.bv_len = ptr - rdn.bv_val;
1069         }
1070         ber_dupbv( &ei->bei_rdn, &rdn );
1071 #endif
1072
1073         if (!ein) {
1074                 ein = ei->bei_parent;
1075         } else {
1076                 ei->bei_parent = ein;
1077                 bdb_cache_entryinfo_unlock( pei );
1078                 bdb_cache_entryinfo_lock( ein );
1079         }
1080 #ifdef BDB_HIER
1081         {
1082                 /* Record the generation number of this change */
1083                 ldap_pvt_thread_mutex_lock( &bdb->bi_modrdns_mutex );
1084                 bdb->bi_modrdns++;
1085                 ei->bei_modrdns = bdb->bi_modrdns;
1086                 ldap_pvt_thread_mutex_unlock( &bdb->bi_modrdns_mutex );
1087         }
1088 #endif
1089         avl_insert( &ein->bei_kids, ei, bdb_rdn_cmp, avl_dup_error );
1090         bdb_cache_entryinfo_unlock( ein );
1091         return rc;
1092 }
1093 /*
1094  * cache_delete - delete the entry e from the cache. 
1095  *
1096  * returns:     0       e was deleted ok
1097  *              1       e was not in the cache
1098  *              -1      something bad happened
1099  */
1100 int
1101 bdb_cache_delete(
1102     Cache       *cache,
1103     Entry               *e,
1104     DB_ENV      *env,
1105     u_int32_t   locker,
1106     DB_LOCK     *lock )
1107 {
1108         EntryInfo *ei = BEI(e);
1109         int     rc;
1110
1111         assert( e->e_private != NULL );
1112
1113         /* Set this early, warn off any queriers */
1114         ei->bei_state |= CACHE_ENTRY_DELETED;
1115
1116         /* Lock the entry's info */
1117         bdb_cache_entryinfo_lock( ei );
1118
1119         /* Get write lock on the data */
1120         rc = bdb_cache_entry_db_relock( env, locker, ei, 1, 0, lock );
1121         if ( rc ) {
1122                 /* couldn't lock, undo and give up */
1123                 ei->bei_state ^= CACHE_ENTRY_DELETED;
1124                 bdb_cache_entryinfo_unlock( ei );
1125                 return rc;
1126         }
1127
1128         Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_delete( %ld )\n",
1129                 e->e_id, 0, 0 );
1130
1131         /* set lru mutex */
1132         ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1133
1134         /* set cache write lock */
1135         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1136
1137         rc = bdb_cache_delete_internal( cache, e->e_private, 1 );
1138
1139         /* free cache write lock */
1140         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1141
1142         /* free lru mutex */
1143         ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1144
1145         /* Leave entry info locked */
1146
1147         return( rc );
1148 }
1149
1150 void
1151 bdb_cache_delete_cleanup(
1152         Cache *cache,
1153         EntryInfo *ei )
1154 {
1155         if ( ei->bei_e ) {
1156                 ei->bei_e->e_private = NULL;
1157 #ifdef SLAP_ZONE_ALLOC
1158                 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1159 #else
1160                 bdb_entry_return( ei->bei_e );
1161 #endif
1162                 ei->bei_e = NULL;
1163         }
1164
1165         free( ei->bei_nrdn.bv_val );
1166         ei->bei_nrdn.bv_val = NULL;
1167 #ifdef BDB_HIER
1168         free( ei->bei_rdn.bv_val );
1169         ei->bei_rdn.bv_val = NULL;
1170         ei->bei_modrdns = 0;
1171         ei->bei_ckids = 0;
1172         ei->bei_dkids = 0;
1173 #endif
1174         ei->bei_parent = NULL;
1175         ei->bei_kids = NULL;
1176         ei->bei_lruprev = NULL;
1177
1178         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1179         ei->bei_lrunext = cache->c_eifree;
1180         cache->c_eifree = ei;
1181         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1182         bdb_cache_entryinfo_unlock( ei );
1183 }
1184
1185 static int
1186 bdb_cache_delete_internal(
1187     Cache       *cache,
1188     EntryInfo           *e,
1189     int         decr )
1190 {
1191         int rc = 0;     /* return code */
1192
1193         /* Lock the parent's kids tree */
1194         bdb_cache_entryinfo_lock( e->bei_parent );
1195
1196 #ifdef BDB_HIER
1197         e->bei_parent->bei_ckids--;
1198         if ( decr && e->bei_parent->bei_dkids ) e->bei_parent->bei_dkids--;
1199 #endif
1200         /* dn tree */
1201         if ( avl_delete( &e->bei_parent->bei_kids, (caddr_t) e, bdb_rdn_cmp )
1202                 == NULL )
1203         {
1204                 rc = -1;
1205         }
1206         if ( e->bei_parent->bei_kids )
1207                 cache->c_leaves--;
1208
1209         /* id tree */
1210         if ( avl_delete( &cache->c_idtree, (caddr_t) e, bdb_id_cmp ) == NULL ) {
1211                 rc = -1;
1212         }
1213
1214         if ( rc == 0 ){
1215                 cache->c_eiused--;
1216
1217                 /* lru */
1218                 LRU_DELETE( cache, e );
1219                 if ( e->bei_e ) cache->c_cursize--;
1220         }
1221
1222         bdb_cache_entryinfo_unlock( e->bei_parent );
1223
1224         return( rc );
1225 }
1226
1227 static void
1228 bdb_entryinfo_release( void *data )
1229 {
1230         EntryInfo *ei = (EntryInfo *)data;
1231         if ( ei->bei_kids ) {
1232                 avl_free( ei->bei_kids, NULL );
1233         }
1234         if ( ei->bei_e ) {
1235                 ei->bei_e->e_private = NULL;
1236 #ifdef SLAP_ZONE_ALLOC
1237                 bdb_entry_return( ei->bei_bdb, ei->bei_e, ei->bei_zseq );
1238 #else
1239                 bdb_entry_return( ei->bei_e );
1240 #endif
1241         }
1242         bdb_cache_entryinfo_destroy( ei );
1243 }
1244
1245 void
1246 bdb_cache_release_all( Cache *cache )
1247 {
1248         /* set cache write lock */
1249         ldap_pvt_thread_rdwr_wlock( &cache->c_rwlock );
1250         /* set lru mutex */
1251         ldap_pvt_thread_mutex_lock( &cache->lru_tail_mutex );
1252
1253         Debug( LDAP_DEBUG_TRACE, "====> bdb_cache_release_all\n", 0, 0, 0 );
1254
1255         avl_free( cache->c_dntree.bei_kids, NULL );
1256         avl_free( cache->c_idtree, bdb_entryinfo_release );
1257         for (;cache->c_eifree;cache->c_eifree = cache->c_lruhead) {
1258                 cache->c_lruhead = cache->c_eifree->bei_lrunext;
1259                 bdb_cache_entryinfo_destroy(cache->c_eifree);
1260         }
1261         cache->c_cursize = 0;
1262         cache->c_eiused = 0;
1263         cache->c_leaves = 0;
1264         cache->c_idtree = NULL;
1265         cache->c_lruhead = NULL;
1266         cache->c_lrutail = NULL;
1267         cache->c_dntree.bei_kids = NULL;
1268
1269         /* free lru mutex */
1270         ldap_pvt_thread_mutex_unlock( &cache->lru_tail_mutex );
1271         /* free cache write lock */
1272         ldap_pvt_thread_rdwr_wunlock( &cache->c_rwlock );
1273 }
1274
1275 #ifdef LDAP_DEBUG
1276 #ifdef SLAPD_UNUSED
1277 static void
1278 bdb_lru_print( Cache *cache )
1279 {
1280         EntryInfo       *e;
1281
1282         fprintf( stderr, "LRU queue (head to tail):\n" );
1283         for ( e = cache->c_lruhead; e != NULL; e = e->bei_lrunext ) {
1284                 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1285                         e->bei_nrdn.bv_val, e->bei_id );
1286         }
1287         fprintf( stderr, "LRU queue (tail to head):\n" );
1288         for ( e = cache->c_lrutail; e != NULL; e = e->bei_lruprev ) {
1289                 fprintf( stderr, "\trdn \"%20s\" id %ld\n",
1290                         e->bei_nrdn.bv_val, e->bei_id );
1291         }
1292 }
1293 #endif
1294 #endif
1295
1296 #ifdef BDB_REUSE_LOCKERS
1297 static void
1298 bdb_locker_id_free( void *key, void *data )
1299 {
1300         DB_ENV *env = key;
1301         u_int32_t lockid = (long)data;
1302         int rc;
1303
1304         rc = XLOCK_ID_FREE( env, lockid );
1305         if ( rc == EINVAL ) {
1306                 DB_LOCKREQ lr;
1307                 Debug( LDAP_DEBUG_ANY,
1308                         "bdb_locker_id_free: %lu err %s(%d)\n",
1309                         (unsigned long) lockid, db_strerror(rc), rc );
1310                 /* release all locks held by this locker. */
1311                 lr.op = DB_LOCK_PUT_ALL;
1312                 lr.obj = NULL;
1313                 env->lock_vec( env, lockid, 0, &lr, 1, NULL );
1314                 XLOCK_ID_FREE( env, lockid );
1315         }
1316 }
1317
1318 int
1319 bdb_locker_id( Operation *op, DB_ENV *env, u_int32_t *locker )
1320 {
1321         int i, rc;
1322         u_int32_t lockid;
1323         void *data;
1324         void *ctx;
1325
1326         if ( !env || !locker ) return -1;
1327
1328         /* If no op was provided, try to find the ctx anyway... */
1329         if ( op ) {
1330                 ctx = op->o_threadctx;
1331         } else {
1332                 ctx = ldap_pvt_thread_pool_context();
1333         }
1334
1335         /* Shouldn't happen unless we're single-threaded */
1336         if ( !ctx ) {
1337                 *locker = 0;
1338                 return 0;
1339         }
1340
1341         if ( ldap_pvt_thread_pool_getkey( ctx, env, &data, NULL ) ) {
1342                 for ( i=0, rc=1; rc != 0 && i<4; i++ ) {
1343                         rc = XLOCK_ID( env, &lockid );
1344                         if (rc) ldap_pvt_thread_yield();
1345                 }
1346                 if ( rc != 0) {
1347                         return rc;
1348                 }
1349                 data = (void *)((long)lockid);
1350                 if ( ( rc = ldap_pvt_thread_pool_setkey( ctx, env,
1351                         data, bdb_locker_id_free ) ) ) {
1352                         XLOCK_ID_FREE( env, lockid );
1353                         Debug( LDAP_DEBUG_ANY, "bdb_locker_id: err %s(%d)\n",
1354                                 db_strerror(rc), rc, 0 );
1355
1356                         return rc;
1357                 }
1358         } else {
1359                 lockid = (long)data;
1360         }
1361         *locker = lockid;
1362         return 0;
1363 }
1364 #endif /* BDB_REUSE_LOCKERS */
1365
1366 void
1367 bdb_cache_delete_entry(
1368         struct bdb_info *bdb,
1369         EntryInfo *ei,
1370         u_int32_t locker,
1371         DB_LOCK *lock )
1372 {
1373         ldap_pvt_thread_rdwr_wlock( &bdb->bi_cache.c_rwlock );
1374         if ( bdb_cache_entry_db_lock( bdb->bi_dbenv, bdb->bi_cache.c_locker, ei, 1, 1, lock ) == 0 )
1375         {
1376                 if ( ei->bei_e && !(ei->bei_state & CACHE_ENTRY_NOT_LINKED )) {
1377                         LRU_DELETE( &bdb->bi_cache, ei );
1378                         ei->bei_e->e_private = NULL;
1379 #ifdef SLAP_ZONE_ALLOC
1380                         bdb_entry_return( bdb, ei->bei_e, ei->bei_zseq );
1381 #else
1382                         bdb_entry_return( ei->bei_e );
1383 #endif
1384                         ei->bei_e = NULL;
1385                         --bdb->bi_cache.c_cursize;
1386                 }
1387                 bdb_cache_entry_db_unlock( bdb->bi_dbenv, lock );
1388         }
1389         ldap_pvt_thread_rdwr_wunlock( &bdb->bi_cache.c_rwlock );
1390 }