X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=libraries%2Flibldap_r%2Ftpool.c;h=e23f9ad09cf8b252c31e89308af11490431b2a41;hb=78a9d66e53923e399978e8af066e3f085ca39b53;hp=0910124b489a3f83177fb3dc3adc33db4e38cabb;hpb=fabbbafde9104c89e30b64258745a8ada4d0967b;p=openldap diff --git a/libraries/libldap_r/tpool.c b/libraries/libldap_r/tpool.c index 0910124b48..e23f9ad09c 100644 --- a/libraries/libldap_r/tpool.c +++ b/libraries/libldap_r/tpool.c @@ -1,12 +1,16 @@ /* $OpenLDAP$ */ -/* - * Copyright 1998-2003 The OpenLDAP Foundation, Redwood City, California, USA +/* This work is part of OpenLDAP Software . + * + * Copyright 1998-2006 The OpenLDAP Foundation. * All rights reserved. * - * Redistribution and use in source and binary forms are permitted only - * as authorized by the OpenLDAP Public License. A copy of this - * license is available at http://www.OpenLDAP.org/license.html or - * in file LICENSE in the top-level directory of the distribution. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . */ #include "portable.h" @@ -20,16 +24,19 @@ #include #include "ldap-int.h" -#include "ldap_pvt_thread.h" +#include "ldap_pvt_thread.h" /* Get the thread interface */ #include "ldap_queue.h" +#define LDAP_THREAD_POOL_IMPLEMENTATION +#include "ldap_thr_debug.h" /* May rename symbols defined below */ #ifndef LDAP_THREAD_HAVE_TPOOL -enum ldap_int_thread_pool_state { +typedef enum ldap_int_thread_pool_state_e { LDAP_INT_THREAD_POOL_RUNNING, LDAP_INT_THREAD_POOL_FINISHING, - LDAP_INT_THREAD_POOL_STOPPING -}; + LDAP_INT_THREAD_POOL_STOPPING, + LDAP_INT_THREAD_POOL_PAUSING +} ldap_int_thread_pool_state_t; typedef struct ldap_int_thread_key_s { void *ltk_key; @@ -41,6 +48,15 @@ typedef struct ldap_int_thread_key_s { * We don't expect to use many... */ #define MAXKEYS 32 +#define LDAP_MAXTHR 1024 /* must be a power of 2 */ + +static ldap_pvt_thread_t tid_zero; + +static struct { + ldap_pvt_thread_t id; + ldap_int_thread_key_t *ctx; +} thread_keys[LDAP_MAXTHR]; + typedef struct ldap_int_thread_ctx_s { union { @@ -50,18 +66,17 @@ typedef struct ldap_int_thread_ctx_s { } ltc_next; ldap_pvt_thread_start_t *ltc_start_routine; void *ltc_arg; - ldap_pvt_thread_t ltc_thread_id; - ldap_int_thread_key_t ltc_key[MAXKEYS]; } ldap_int_thread_ctx_t; struct ldap_int_thread_pool_s { LDAP_STAILQ_ENTRY(ldap_int_thread_pool_s) ltp_next; ldap_pvt_thread_mutex_t ltp_mutex; ldap_pvt_thread_cond_t ltp_cond; + ldap_pvt_thread_cond_t ltp_pcond; LDAP_STAILQ_HEAD(tcq, ldap_int_thread_ctx_s) ltp_pending_list; LDAP_SLIST_HEAD(tcl, ldap_int_thread_ctx_s) ltp_free_list; LDAP_SLIST_HEAD(tclq, ldap_int_thread_ctx_s) ltp_active_list; - long ltp_state; + ldap_int_thread_pool_state_t ltp_state; long ltp_max_count; long ltp_max_pending; long ltp_pending_count; @@ -78,9 +93,15 @@ static ldap_pvt_thread_mutex_t ldap_pvt_thread_pool_mutex; static void *ldap_int_thread_pool_wrapper( void *pool ); +static ldap_pvt_thread_t ldap_int_main_tid; + +static ldap_int_thread_key_t ldap_int_main_thrctx[LDAP_MAXTHR]; + int ldap_int_thread_pool_startup ( void ) { + ldap_int_main_tid = ldap_pvt_thread_self(); + return ldap_pvt_thread_mutex_init(&ldap_pvt_thread_pool_mutex); } @@ -91,12 +112,137 @@ ldap_int_thread_pool_shutdown ( void ) while ((pool = LDAP_STAILQ_FIRST(&ldap_int_thread_pool_list)) != NULL) { LDAP_STAILQ_REMOVE_HEAD(&ldap_int_thread_pool_list, ltp_next); - ldap_pvt_thread_pool_destroy( &pool, 0); + (ldap_pvt_thread_pool_destroy)(&pool, 0); /* ignore thr_debug macro */ } ldap_pvt_thread_mutex_destroy(&ldap_pvt_thread_pool_mutex); return(0); } +typedef struct ldap_lazy_sem_t { + ldap_pvt_thread_mutex_t ls_mutex; + ldap_pvt_thread_cond_t ls_cond; + int ls_sem_value; + /* + * when more than ls_lazy_count number of resources + * becmoes available, the thread wating for the resources will + * be waken up in order to prevent frequent blocking/waking-up + */ + unsigned int ls_lazy_count; + /* + * only one thread(listener) will wait on this semaphore + * using a flag instead of a list + */ + int ls_wait; +} ldap_lazy_sem_t; + +ldap_lazy_sem_t* thread_pool_sem = NULL; + +int +ldap_lazy_sem_init( unsigned int value, unsigned int lazyness ) +{ + thread_pool_sem = (ldap_lazy_sem_t*) LDAP_CALLOC(1, + sizeof( ldap_lazy_sem_t )); + + if( thread_pool_sem == NULL ) return -1; + + ldap_pvt_thread_mutex_init( &thread_pool_sem->ls_mutex ); + ldap_pvt_thread_cond_init( &thread_pool_sem->ls_cond ); + thread_pool_sem->ls_sem_value = value; + thread_pool_sem->ls_lazy_count = lazyness; + thread_pool_sem->ls_wait = 0; + + return 0; +} + +/* FIXME: move to some approprite header */ +int ldap_lazy_sem_dec( ldap_lazy_sem_t* ls ); +int ldap_lazy_sem_wait ( ldap_lazy_sem_t* ls ); + +/* + * ldap_lazy_sem_wait is used if a caller is blockable(listener). + * Otherwise use ldap_lazy_sem_dec (worker) + */ +int +ldap_lazy_sem_op_submit( ldap_lazy_sem_t* ls ) +{ + if ( ls == NULL ) return -1; + + /* only worker thread has its thread ctx */ + if ( ldap_pvt_thread_pool_context() ) { + /* worker thread */ + return ldap_lazy_sem_dec( ls ); + } else { + /* listener */ + return ldap_lazy_sem_wait( ls ); + } +} + +/* + * test if given semaphore's count is zero. + * If 0, the caller is blocked + * If not, the count is decremented. + */ +int +ldap_lazy_sem_wait ( ldap_lazy_sem_t* ls ) +{ + ldap_pvt_thread_mutex_lock( &ls->ls_mutex ); + +lazy_sem_retry: + if ( ls->ls_sem_value <= 0 ) { + /* no more avaliable resources */ + ls->ls_wait = 1; + ldap_pvt_thread_cond_wait( &ls->ls_cond, &ls->ls_mutex ); + goto lazy_sem_retry; + } else { + /* avaliable resources */ + ls->ls_sem_value--; + } + + ldap_pvt_thread_mutex_unlock( &ls->ls_mutex ); + + return 0; +} + +/* + * decrement the count without blocking + * even when the count becomes less than or equal to 0 + */ +int +ldap_lazy_sem_dec( ldap_lazy_sem_t* ls ) +{ + ldap_pvt_thread_mutex_lock( &ls->ls_mutex ); + + ls->ls_sem_value--; + + ldap_pvt_thread_mutex_unlock( &ls->ls_mutex ); + + return 0; +} + +/* + * Increment the count by one and test if it is greater or + * equal to lazyness. If it is, wake up a blocked thread. + */ +int +ldap_lazy_sem_post( ldap_lazy_sem_t* ls ) +{ + if( ls == NULL ) return (-1); + + ldap_pvt_thread_mutex_lock( &ls->ls_mutex ); + + ls->ls_sem_value++; + if ( ls->ls_wait ) { + if ( ls->ls_sem_value >= ls->ls_lazy_count ) { + ls->ls_wait = 0; + ldap_pvt_thread_cond_signal( &ls->ls_cond ); + } + } + + ldap_pvt_thread_mutex_unlock( &ls->ls_mutex ); + + return 0; +} + int ldap_pvt_thread_pool_init ( ldap_pvt_thread_pool_t *tpool, @@ -116,6 +262,9 @@ ldap_pvt_thread_pool_init ( if (rc != 0) return(rc); rc = ldap_pvt_thread_cond_init(&pool->ltp_cond); + if (rc != 0) + return(rc); + rc = ldap_pvt_thread_cond_init(&pool->ltp_pcond); if (rc != 0) return(rc); pool->ltp_state = LDAP_INT_THREAD_POOL_RUNNING; @@ -154,6 +303,7 @@ ldap_pvt_thread_pool_init ( LDAP_STAILQ_REMOVE(ldap_int_thread_pool_list, pool, ldap_int_thread_pool_s, ltp_next); ldap_pvt_thread_mutex_unlock(&ldap_pvt_thread_pool_mutex); + ldap_pvt_thread_cond_destroy(&pool->ltp_pcond); ldap_pvt_thread_cond_destroy(&pool->ltp_cond); ldap_pvt_thread_mutex_destroy(&pool->ltp_mutex); LDAP_FREE(pool); @@ -165,6 +315,10 @@ ldap_pvt_thread_pool_init ( return(0); } +#define TID_HASH(tid, hash) do { unsigned i; \ + unsigned char *ptr = (unsigned char *)&(tid); \ + for (i=0, hash=0; iltp_mutex); - if (pool->ltp_state != LDAP_INT_THREAD_POOL_RUNNING + if ((pool->ltp_state != LDAP_INT_THREAD_POOL_RUNNING && + pool->ltp_state != LDAP_INT_THREAD_POOL_PAUSING) || (pool->ltp_max_pending > 0 && pool->ltp_pending_count >= pool->ltp_max_pending)) { @@ -195,16 +350,12 @@ ldap_pvt_thread_pool_submit ( if (ctx) { LDAP_SLIST_REMOVE_HEAD(&pool->ltp_free_list, ltc_next.l); } else { - int i; ctx = (ldap_int_thread_ctx_t *) LDAP_MALLOC( sizeof(ldap_int_thread_ctx_t)); if (ctx == NULL) { ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); return(-1); } - for ( i=0; iltc_key[i].ltk_key = NULL; - } } ctx->ltc_start_routine = start_routine; @@ -212,14 +363,14 @@ ldap_pvt_thread_pool_submit ( pool->ltp_pending_count++; LDAP_STAILQ_INSERT_TAIL(&pool->ltp_pending_list, ctx, ltc_next.q); + if (pool->ltp_state == LDAP_INT_THREAD_POOL_PAUSING) { + ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); + return(0); + } ldap_pvt_thread_cond_signal(&pool->ltp_cond); - if ((pool->ltp_open_count <= 0 -#if 0 - || pool->ltp_pending_count > 1 -#endif - || pool->ltp_open_count == pool->ltp_active_count) - && (pool->ltp_max_count <= 0 - || pool->ltp_open_count < pool->ltp_max_count)) + if (pool->ltp_open_count < pool->ltp_active_count + pool->ltp_pending_count + && (pool->ltp_open_count < pool->ltp_max_count || + pool->ltp_max_count <= 0 )) { pool->ltp_open_count++; pool->ltp_starting++; @@ -227,12 +378,30 @@ ldap_pvt_thread_pool_submit ( } ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); +#ifdef LDAP_PVT_THREAD_POOL_SEM_LOAD_CONTROL + ldap_lazy_sem_op_submit( thread_pool_sem ); +#endif + if (need_thread) { - int rc = ldap_pvt_thread_create( &thr, 1, - ldap_int_thread_pool_wrapper, pool ); + int rc; + ldap_pvt_thread_mutex_lock(&pool->ltp_mutex); + + rc = ldap_pvt_thread_create( &thr, 1, + ldap_int_thread_pool_wrapper, pool ); if (rc == 0) { + int hash; pool->ltp_starting--; + + /* assign this thread ID to a key slot; start + * at the thread ID itself (mod LDAP_MAXTHR) and + * look for an empty slot. + */ + TID_HASH(thr, hash); + for (rc = hash & (LDAP_MAXTHR-1); + !ldap_pvt_thread_equal(thread_keys[rc].id, tid_zero); + rc = (rc+1) & (LDAP_MAXTHR-1)); + thread_keys[rc].id = thr; } else { /* couldn't create thread. back out of * ltp_open_count and check for even worse things. @@ -309,17 +478,6 @@ ldap_pvt_thread_pool_backload ( ldap_pvt_thread_pool_t *tpool ) return(count); } -static void ldap_int_thread_pool_keyfree( ldap_int_thread_ctx_t *ctx ) -{ - int i; - for ( i=0; iltc_key[i].ltk_key; i++ ) { - if (ctx->ltc_key[i].ltk_free) - ctx->ltc_key[i].ltk_free( - ctx->ltc_key[i].ltk_key, - ctx->ltc_key[i].ltk_data ); - } -} - int ldap_pvt_thread_pool_destroy ( ldap_pvt_thread_pool_t *tpool, int run_pending ) { @@ -362,20 +520,24 @@ ldap_pvt_thread_pool_destroy ( ldap_pvt_thread_pool_t *tpool, int run_pending ) while ((ctx = LDAP_STAILQ_FIRST(&pool->ltp_pending_list)) != NULL) { LDAP_STAILQ_REMOVE_HEAD(&pool->ltp_pending_list, ltc_next.q); - ldap_int_thread_pool_keyfree( ctx ); LDAP_FREE(ctx); } while ((ctx = LDAP_SLIST_FIRST(&pool->ltp_free_list)) != NULL) { LDAP_SLIST_REMOVE_HEAD(&pool->ltp_free_list, ltc_next.l); - ldap_int_thread_pool_keyfree( ctx ); LDAP_FREE(ctx); } + ldap_pvt_thread_cond_destroy(&pool->ltp_pcond); ldap_pvt_thread_cond_destroy(&pool->ltp_cond); ldap_pvt_thread_mutex_destroy(&pool->ltp_mutex); LDAP_FREE(pool); +#ifdef LDAP_PVT_THREAD_POOL_SEM_LOAD_CONTROL + if ( thread_pool_sem ) { + LDAP_FREE( thread_pool_sem ); + } +#endif return(0); } @@ -385,12 +547,29 @@ ldap_int_thread_pool_wrapper ( { struct ldap_int_thread_pool_s *pool = xpool; ldap_int_thread_ctx_t *ctx; + ldap_int_thread_key_t ltc_key[MAXKEYS]; + ldap_pvt_thread_t tid; + int i, keyslot, hash; if (pool == NULL) return NULL; + for ( i=0; iltp_mutex); + /* store pointer to our keys */ + TID_HASH(tid, hash); + for (i = hash & (LDAP_MAXTHR-1); + !ldap_pvt_thread_equal(thread_keys[i].id, tid); + i = (i+1) & (LDAP_MAXTHR-1)); + thread_keys[i].ctx = ltc_key; + keyslot = i; + while (pool->ltp_state != LDAP_INT_THREAD_POOL_STOPPING) { ctx = LDAP_STAILQ_FIRST(&pool->ltp_pending_list); if (ctx) { @@ -416,61 +595,132 @@ ldap_int_thread_pool_wrapper ( * should be like this: * if (pool->ltp_open_count > 1 && pool->ltp_starting == 0) * check timer, leave thread (break;) + * + * Just use pthread_cond_timedwait if we want to + * check idle time. */ - if (pool->ltp_state == LDAP_INT_THREAD_POOL_RUNNING) + if (pool->ltp_state == LDAP_INT_THREAD_POOL_RUNNING + || pool->ltp_state == LDAP_INT_THREAD_POOL_PAUSING) + { ldap_pvt_thread_cond_wait(&pool->ltp_cond, &pool->ltp_mutex); + } continue; } pool->ltp_pending_count--; + LDAP_SLIST_INSERT_HEAD(&pool->ltp_active_list, ctx, ltc_next.al); pool->ltp_active_count++; ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); - ctx->ltc_thread_id = ldap_pvt_thread_self(); - ctx->ltc_start_routine(ctx, ctx->ltc_arg); + ctx->ltc_start_routine(ltc_key, ctx->ltc_arg); +#ifdef LDAP_PVT_THREAD_POOL_SEM_LOAD_CONTROL + ldap_lazy_sem_post( thread_pool_sem ); +#endif ldap_pvt_thread_mutex_lock(&pool->ltp_mutex); LDAP_SLIST_REMOVE(&pool->ltp_active_list, ctx, ldap_int_thread_ctx_s, ltc_next.al); LDAP_SLIST_INSERT_HEAD(&pool->ltp_free_list, ctx, ltc_next.l); pool->ltp_active_count--; - ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); - - ldap_pvt_thread_yield(); - /* if we use an idle timer, here's - * a good place to update it - */ + if (pool->ltp_state == LDAP_INT_THREAD_POOL_PAUSING) { + if (pool->ltp_active_count < 2) { + ldap_pvt_thread_cond_signal(&pool->ltp_pcond); + } + ldap_pvt_thread_cond_wait(&pool->ltp_cond, &pool->ltp_mutex); + } + } - ldap_pvt_thread_mutex_lock(&pool->ltp_mutex); + for ( i=0; iltp_open_count--; ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); - ldap_pvt_thread_exit(NULL); return(NULL); } +int +ldap_pvt_thread_pool_pause ( + ldap_pvt_thread_pool_t *tpool ) +{ + struct ldap_int_thread_pool_s *pool; + + if (tpool == NULL) + return(-1); + + pool = *tpool; + + if (pool == NULL) + return(0); + + ldap_pvt_thread_mutex_lock(&pool->ltp_mutex); + + /* If someone else has already requested a pause, we have to wait */ + while (pool->ltp_state == LDAP_INT_THREAD_POOL_PAUSING) { + pool->ltp_pending_count++; + pool->ltp_active_count--; + ldap_pvt_thread_cond_wait(&pool->ltp_cond, &pool->ltp_mutex); + pool->ltp_pending_count--; + pool->ltp_active_count++; + } + /* Wait for everyone else to finish */ + pool->ltp_state = LDAP_INT_THREAD_POOL_PAUSING; + while (pool->ltp_active_count > 1) { + ldap_pvt_thread_cond_wait(&pool->ltp_pcond, &pool->ltp_mutex); + } + ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); + return(0); +} + +int +ldap_pvt_thread_pool_resume ( + ldap_pvt_thread_pool_t *tpool ) +{ + struct ldap_int_thread_pool_s *pool; + + if (tpool == NULL) + return(-1); + + pool = *tpool; + + if (pool == NULL) + return(0); + + ldap_pvt_thread_mutex_lock(&pool->ltp_mutex); + + pool->ltp_state = LDAP_INT_THREAD_POOL_RUNNING; + ldap_pvt_thread_cond_broadcast(&pool->ltp_cond); + ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); + return(0); +} + int ldap_pvt_thread_pool_getkey( void *xctx, void *key, void **data, ldap_pvt_thread_pool_keyfree_t **kfree ) { - ldap_int_thread_ctx_t *ctx = xctx; + ldap_int_thread_key_t *ctx = xctx; int i; if ( !ctx || !data ) return EINVAL; - for ( i=0; iltc_key[i].ltk_key; i++ ) { - if ( ctx->ltc_key[i].ltk_key == key ) { - *data = ctx->ltc_key[i].ltk_data; - if ( kfree ) *kfree = ctx->ltc_key[i].ltk_free; + for ( i=0; iltc_key[i].ltk_key || ctx->ltc_key[i].ltk_key == key ) { - ctx->ltc_key[i].ltk_key = key; - ctx->ltc_key[i].ltk_data = data; - ctx->ltc_key[i].ltk_free = kfree; + if ( !ctx[i].ltk_key || ctx[i].ltk_key == key ) { + ctx[i].ltk_key = key; + ctx[i].ltk_data = data; + ctx[i].ltk_free = kfree; return 0; } } return ENOMEM; } +/* Free all elements with this key, no matter which thread they're in. + * May only be called while the pool is paused. + */ +void ldap_pvt_thread_pool_purgekey( void *key ) +{ + int i, j; + ldap_int_thread_key_t *ctx; + + for ( i=0; iltp_mutex); - LDAP_SLIST_FOREACH(ptr, &pool->ltp_active_list, ltc_next.al) - if (ptr != NULL && ptr->ltc_thread_id == tid) break; - if (ptr != NULL && ptr->ltc_thread_id != tid) { - ptr = NULL; - } - ldap_pvt_thread_mutex_unlock(&pool->ltp_mutex); + TID_HASH( tid, hash ); + for (i = hash & (LDAP_MAXTHR-1); + !ldap_pvt_thread_equal(thread_keys[i].id, tid_zero) && + !ldap_pvt_thread_equal(thread_keys[i].id, tid); + i = (i+1) & (LDAP_MAXTHR-1)); - return ptr; + return thread_keys[i].ctx; } -#endif /* LDAP_HAVE_THREAD_POOL */ +void ldap_pvt_thread_pool_context_reset( void *vctx ) +{ + ldap_int_thread_key_t *ctx = vctx; + int i; + + for ( i=0; i