/* idl.c - ldap id list handling routines */
/* $OpenLDAP$ */
-/*
- * Copyright 1998-2002 The OpenLDAP Foundation, All Rights Reserved.
- * COPYING RESTRICTIONS APPLY, see COPYRIGHT file
+/* This work is part of OpenLDAP Software <http://www.openldap.org/>.
+ *
+ * Copyright 2000-2005 The OpenLDAP Foundation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted only as authorized by the OpenLDAP
+ * Public License.
+ *
+ * A copy of this license is available in the file LICENSE in the
+ * top-level directory of the distribution or, alternatively, at
+ * <http://www.OpenLDAP.org/license.html>.
*/
#include "portable.h"
#define IDL_CMP(x,y) ( x < y ? -1 : ( x > y ? 1 : 0 ) )
-#ifdef SLAP_IDL_CACHE
#define IDL_LRU_DELETE( bdb, e ) do { \
if ( e->idl_lru_prev != NULL ) { \
e->idl_lru_prev->idl_lru_next = e->idl_lru_next; \
const bdb_idl_cache_entry_t *idl1 = v_idl1, *idl2 = v_idl2;
int rc;
- if ((rc = idl1->db - idl2->db )) return rc;
+ if ((rc = SLAP_PTRCMP( idl1->db, idl2->db ))) return rc;
if ((rc = idl1->kstr.bv_len - idl2->kstr.bv_len )) return rc;
return ( memcmp ( idl1->kstr.bv_val, idl2->kstr.bv_val , idl1->kstr.bv_len ) );
}
-#endif
#if IDL_DEBUG > 0
static void idl_check( ID *ids )
int bdb_idl_insert( ID *ids, ID id )
{
- unsigned x = bdb_idl_search( ids, id );
+ unsigned x;
#if IDL_DEBUG > 1
#ifdef NEW_LOGGING
idl_check( ids );
#endif
+ if (BDB_IDL_IS_RANGE( ids )) {
+ /* if already in range, treat as a dup */
+ if (id >= BDB_IDL_FIRST(ids) && id <= BDB_IDL_LAST(ids))
+ return -1;
+ if (id < BDB_IDL_FIRST(ids))
+ ids[1] = id;
+ else if (id > BDB_IDL_LAST(ids))
+ ids[2] = id;
+ return 0;
+ }
+
+ x = bdb_idl_search( ids, id );
assert( x > 0 );
if( x < 1 ) {
DBT *key,
char *buf )
{
- if ( key->size == sizeof( ID ) ) {
+ if ( key->size == 4 /* LUTIL_HASH_BYTES */ ) {
unsigned char *c = key->data;
sprintf( buf, "[%02x%02x%02x%02x]", c[0], c[1], c[2], c[3] );
return buf;
}
}
+/* Find a db/key pair in the IDL cache. If ids is non-NULL,
+ * copy the cached IDL into it, otherwise just return the status.
+ */
+int
+bdb_idl_cache_get(
+ struct bdb_info *bdb,
+ DB *db,
+ DBT *key,
+ ID *ids )
+{
+ bdb_idl_cache_entry_t idl_tmp;
+ bdb_idl_cache_entry_t *matched_idl_entry;
+ int rc = LDAP_NO_SUCH_OBJECT;
+
+ DBT2bv( key, &idl_tmp.kstr );
+ idl_tmp.db = db;
+ ldap_pvt_thread_rdwr_rlock( &bdb->bi_idl_tree_rwlock );
+ matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp,
+ bdb_idl_entry_cmp );
+ if ( matched_idl_entry != NULL ) {
+ if ( matched_idl_entry->idl && ids )
+ BDB_IDL_CPY( ids, matched_idl_entry->idl );
+ ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_lrulock );
+ IDL_LRU_DELETE( bdb, matched_idl_entry );
+ IDL_LRU_ADD( bdb, matched_idl_entry );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_lrulock );
+ if ( matched_idl_entry->idl )
+ rc = LDAP_SUCCESS;
+ else
+ rc = DB_NOTFOUND;
+ }
+ ldap_pvt_thread_rdwr_runlock( &bdb->bi_idl_tree_rwlock );
+
+ return rc;
+}
+
+void
+bdb_idl_cache_put(
+ struct bdb_info *bdb,
+ DB *db,
+ DBT *key,
+ ID *ids,
+ int rc )
+{
+ bdb_idl_cache_entry_t idl_tmp;
+ bdb_idl_cache_entry_t *ee;
+
+ DBT2bv( key, &idl_tmp.kstr );
+
+ ee = (bdb_idl_cache_entry_t *) ch_malloc(
+ sizeof( bdb_idl_cache_entry_t ) );
+ ee->db = db;
+ if ( rc == DB_NOTFOUND) {
+ ee->idl = NULL;
+ } else {
+ ee->idl = (ID*) ch_malloc( BDB_IDL_SIZEOF ( ids ) );
+ BDB_IDL_CPY( ee->idl, ids );
+ }
+ ee->idl_lru_prev = NULL;
+ ee->idl_lru_next = NULL;
+ ber_dupbv( &ee->kstr, &idl_tmp.kstr );
+ ldap_pvt_thread_rdwr_wlock( &bdb->bi_idl_tree_rwlock );
+ if ( avl_insert( &bdb->bi_idl_tree, (caddr_t) ee,
+ bdb_idl_entry_cmp, avl_dup_error ))
+ {
+ ch_free( ee->kstr.bv_val );
+ ch_free( ee->idl );
+ ch_free( ee );
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_idl_tree_rwlock );
+ return;
+ }
+ ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_lrulock );
+ IDL_LRU_ADD( bdb, ee );
+ if ( ++bdb->bi_idl_cache_size > bdb->bi_idl_cache_max_size ) {
+ int i = 0;
+ while ( bdb->bi_idl_lru_tail != NULL && i < 10 ) {
+ ee = bdb->bi_idl_lru_tail;
+ if ( avl_delete( &bdb->bi_idl_tree, (caddr_t) ee,
+ bdb_idl_entry_cmp ) == NULL ) {
+#ifdef NEW_LOGGING
+ LDAP_LOG( INDEX, ERR,
+ "bdb_idl_cache_put: AVL delete failed\n",
+ 0, 0, 0 );
+#else
+ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_cache_put: "
+ "AVL delete failed\n",
+ 0, 0, 0 );
+#endif
+ }
+ IDL_LRU_DELETE( bdb, ee );
+ i++;
+ --bdb->bi_idl_cache_size;
+ ch_free( ee->kstr.bv_val );
+ ch_free( ee->idl );
+ ch_free( ee );
+ }
+ }
+
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_lrulock );
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_idl_tree_rwlock );
+}
+
+void
+bdb_idl_cache_del(
+ struct bdb_info *bdb,
+ DB *db,
+ DBT *key )
+{
+ bdb_idl_cache_entry_t *matched_idl_entry, idl_tmp;
+ DBT2bv( key, &idl_tmp.kstr );
+ idl_tmp.db = db;
+ ldap_pvt_thread_rdwr_wlock( &bdb->bi_idl_tree_rwlock );
+ matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp,
+ bdb_idl_entry_cmp );
+ if ( matched_idl_entry != NULL ) {
+ if ( avl_delete( &bdb->bi_idl_tree, (caddr_t) matched_idl_entry,
+ bdb_idl_entry_cmp ) == NULL ) {
+#ifdef NEW_LOGGING
+ LDAP_LOG( INDEX, ERR,
+ "bdb_idl_cache_del: AVL delete failed\n",
+ 0, 0, 0 );
+#else
+ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_cache_del: "
+ "AVL delete failed\n",
+ 0, 0, 0 );
+#endif
+ }
+ --bdb->bi_idl_cache_size;
+ ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_lrulock );
+ IDL_LRU_DELETE( bdb, matched_idl_entry );
+ ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_lrulock );
+ free( matched_idl_entry->kstr.bv_val );
+ if ( matched_idl_entry->idl )
+ free( matched_idl_entry->idl );
+ free( matched_idl_entry );
+ }
+ ldap_pvt_thread_rdwr_wunlock( &bdb->bi_idl_tree_rwlock );
+}
+
int
bdb_idl_fetch_key(
BackendDB *be,
size_t len;
int rc2;
int flags = bdb->bi_db_opflags | DB_MULTIPLE;
-#ifdef SLAP_IDL_CACHE
- bdb_idl_cache_entry_t idl_tmp;
-#endif
- /* buf must be large enough to grab the entire IDL in one
- * get(), otherwise BDB 4 will leak resources on subsequent
- * get's. We can safely call get() twice - once for the data,
- * and once to get the DB_NOTFOUND result meaning there's
- * no more data. See ITS#2040 for details. This bug is fixed
- * in BDB 4.1 so a smaller buffer will work if stack space is
- * too limited.
+ /* If using BerkeleyDB 4.0, the buf must be large enough to
+ * grab the entire IDL in one get(), otherwise BDB will leak
+ * resources on subsequent get's. We can safely call get()
+ * twice - once for the data, and once to get the DB_NOTFOUND
+ * result meaning there's no more data. See ITS#2040 for details.
+ * This bug is fixed in BDB 4.1 so a smaller buffer will work if
+ * stack space is too limited.
+ *
+ * configure now requires Berkeley DB 4.1.
*/
- ID buf[BDB_IDL_DB_SIZE*5];
+#if (DB_VERSION_MAJOR == 4) && (DB_VERSION_MINOR == 0)
+# define BDB_ENOUGH 5
+#else
+# define BDB_ENOUGH 1
+#endif
+ ID buf[BDB_IDL_DB_SIZE*BDB_ENOUGH];
+
+ char keybuf[16];
- {
- char buf[16];
#ifdef NEW_LOGGING
- LDAP_LOG( INDEX, ARGS,
- "bdb_idl_fetch_key: %s\n",
- bdb_show_key( key, buf ), 0, 0 );
+ LDAP_LOG( INDEX, ARGS,
+ "bdb_idl_fetch_key: %s\n",
+ bdb_show_key( key, keybuf ), 0, 0 );
#else
- Debug( LDAP_DEBUG_ARGS,
- "bdb_idl_fetch_key: %s\n",
- bdb_show_key( key, buf ), 0, 0 );
+ Debug( LDAP_DEBUG_ARGS,
+ "bdb_idl_fetch_key: %s\n",
+ bdb_show_key( key, keybuf ), 0, 0 );
#endif
- }
+
assert( ids != NULL );
-#ifdef SLAP_IDL_CACHE
- if ( bdb->bi_idl_cache_max_size ) {
- bdb_idl_cache_entry_t *matched_idl_entry;
- DBT2bv( key, &idl_tmp.kstr );
- idl_tmp.db = db;
- ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex );
- matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp,
- bdb_idl_entry_cmp );
- if ( matched_idl_entry != NULL ) {
- BDB_IDL_CPY( ids, matched_idl_entry->idl );
- IDL_LRU_DELETE( bdb, matched_idl_entry );
- IDL_LRU_ADD( bdb, matched_idl_entry );
- ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex );
- return LDAP_SUCCESS;
- }
- ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex );
+ if ( bdb->bi_idl_cache_size ) {
+ rc = bdb_idl_cache_get( bdb, db, key, ids );
+ if ( rc != LDAP_NO_SUCH_OBJECT ) return rc;
}
-#endif
DBTzero( &data );
data.ulen = sizeof(buf);
data.flags = DB_DBT_USERMEM;
- if ( tid )
- flags |= DB_RMW;
+ if ( tid ) flags |= DB_RMW;
rc = db->cursor( db, tid, &cursor, bdb->bi_db_opflags );
if( rc != 0 ) {
#endif
return rc;
}
+
rc = cursor->c_get( cursor, key, &data, flags | DB_SET );
if (rc == 0) {
i = ids;
}
data.size = BDB_IDL_SIZEOF(ids);
}
+
rc2 = cursor->c_close( cursor );
if (rc2) {
#ifdef NEW_LOGGING
#endif
return rc2;
}
+
if( rc == DB_NOTFOUND ) {
return rc;
return -1;
}
-#ifdef SLAP_IDL_CACHE
if ( bdb->bi_idl_cache_max_size ) {
- bdb_idl_cache_entry_t *ee;
- ee = (bdb_idl_cache_entry_t *) malloc( sizeof( bdb_idl_cache_entry_t ) );
- ee->db = db;
- ee->idl = (ID*) malloc ( BDB_IDL_SIZEOF ( ids ) );
- ee->idl_lru_prev = NULL;
- ee->idl_lru_next = NULL;
- BDB_IDL_CPY( ee->idl, ids );
- ber_dupbv( &ee->kstr, &idl_tmp.kstr );
- ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex );
- if ( avl_insert( &bdb->bi_idl_tree, (caddr_t) ee,
- bdb_idl_entry_cmp, avl_dup_error )) {
- free( ee->kstr.bv_val );
- free( ee->idl );
- free( ee );
- } else {
- IDL_LRU_ADD( bdb, ee );
- if ( ++bdb->bi_idl_cache_size > bdb->bi_idl_cache_max_size ) {
- int i = 0;
- while ( bdb->bi_idl_lru_tail != NULL && i < 10 ) {
- ee = bdb->bi_idl_lru_tail;
- avl_delete( &bdb->bi_idl_tree, (caddr_t) ee,
- bdb_idl_entry_cmp );
- IDL_LRU_DELETE( bdb, ee );
- i++;
- --bdb->bi_idl_cache_size;
- free( ee->kstr.bv_val );
- free( ee->idl );
- free( ee );
- }
- }
- }
- ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex );
+ bdb_idl_cache_put( bdb, db, key, ids, rc );
}
-#endif
return rc;
}
assert( id != NOID );
-#ifdef SLAP_IDL_CACHE
if ( bdb->bi_idl_cache_size ) {
- bdb_idl_cache_entry_t *matched_idl_entry, idl_tmp;
- DBT2bv( key, &idl_tmp.kstr );
- idl_tmp.db = db;
- ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex );
- matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp,
- bdb_idl_entry_cmp );
- if ( matched_idl_entry != NULL ) {
- avl_delete( &bdb->bi_idl_tree, (caddr_t) matched_idl_entry,
- bdb_idl_entry_cmp );
- --bdb->bi_idl_cache_size;
- IDL_LRU_DELETE( bdb, matched_idl_entry );
- free( matched_idl_entry->kstr.bv_val );
- free( matched_idl_entry->idl );
- free( matched_idl_entry );
- }
- ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex );
+ bdb_idl_cache_del( bdb, db, key );
}
-#endif
DBTzero( &data );
data.size = sizeof( ID );
}
assert( id != NOID );
-#ifdef SLAP_IDL_CACHE
if ( bdb->bi_idl_cache_max_size ) {
- bdb_idl_cache_entry_t *matched_idl_entry, idl_tmp;
- DBT2bv( key, &idl_tmp.kstr );
- idl_tmp.db = db;
- ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex );
- matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp,
- bdb_idl_entry_cmp );
- if ( matched_idl_entry != NULL ) {
- avl_delete( &bdb->bi_idl_tree, (caddr_t) matched_idl_entry,
- bdb_idl_entry_cmp );
- --bdb->bi_idl_cache_size;
- IDL_LRU_DELETE( bdb, matched_idl_entry );
- free( matched_idl_entry->kstr.bv_val );
- free( matched_idl_entry->idl );
- free( matched_idl_entry );
- }
- ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex );
+ bdb_idl_cache_del( bdb, db, key );
}
-#endif
DBTzero( &data );
data.data = &tmp;
/* It's a range, see if we need to rewrite
* the boundaries
*/
- hi = 0;
data.data = &lo;
rc = cursor->c_get( cursor, key, &data, DB_NEXT_DUP );
if ( rc != 0 ) {
err = "c_get lo";
goto fail;
}
- if ( id > lo ) {
- data.data = &hi;
- rc = cursor->c_get( cursor, key, &data, DB_NEXT_DUP );
- if ( rc != 0 ) {
- err = "c_get hi";
- goto fail;
- }
+ data.data = &hi;
+ rc = cursor->c_get( cursor, key, &data, DB_NEXT_DUP );
+ if ( rc != 0 ) {
+ err = "c_get hi";
+ goto fail;
}
if ( id == lo || id == hi ) {
if ( id == lo ) {
goto fail;
}
} else {
+ if ( id == lo ) {
+ /* reposition on lo slot */
+ data.data = &lo;
+ cursor->c_get( cursor, key, &data, DB_PREV );
+ lo = id;
+ }
rc = cursor->c_del( cursor, 0 );
if ( rc != 0 ) {
err = "c_del";