3 * Copyright 1998-2000 The OpenLDAP Foundation, Redwood City, California, USA
6 * Redistribution and use in source and binary forms are permitted only
7 * as authorized by the OpenLDAP Public License. A copy of this
8 * license is available at http://www.OpenLDAP.org/license.html or
9 * in file LICENSE in the top-level directory of the distribution.
12 /* thr_lwp.c - wrappers around SunOS LWP threads */
15 * - slurpd calls the get_stack/free_stack functions. Should be fixed, so
16 * they can become static.
21 #if defined( HAVE_LWP )
29 /* This implementation NEEDS WORK. It currently does not compile */
34 #include <ac/socket.h>
38 #include "ldap_pvt_thread.h"
41 #include <lwp/stackdep.h>
43 #define MAX_STACK 51200
44 #define MAX_THREADS 20
47 * Initialize LWP by spinning of a schedular
50 ldap_int_thread_initialize( void )
56 if (( stack = get_stack( &stackno )) == NULL ) {
60 lwp_create( &tid, lwp_scheduler, MINPRIO, 0, stack, 1, stackno );
65 ldap_int_thread_destroy( void )
67 /* need to destory lwp_scheduler thread and clean up private
74 stkalign_t *stk_stack;
77 static struct stackinfo *stacks;
79 static stkalign_t * ldap_int_thread_get_stack( int *stacknop )
83 if ( stacks == NULL ) {
84 stacks = (struct stackinfo *) LDAP_CALLOC( 1, MAX_THREADS *
85 sizeof(struct stackinfo) );
87 if( stacks == NULL ) {
88 Debug( LDAP_DEBUG_ANY, "stacks allocation failed",
94 for ( i = 0; i < MAX_THREADS; i++ ) {
95 if ( stacks[i].stk_inuse == 0 ) {
100 if ( i == MAX_THREADS ) {
101 Debug( LDAP_DEBUG_ANY,
102 "no more stacks (max %d) - increase MAX_THREADS for more",
107 if ( stacks[i].stk_stack == NULL ) {
108 stacks[i].stk_stack = (stkalign_t *) LDAP_MALLOC(
109 (MAX_STACK / sizeof(stkalign_t) + 1 )
110 * sizeof(stkalign_t) );
112 if( stacks[i].stk_stack == NULL ) {
113 Debug( LDAP_DEBUG_ANY, "stack allocation failed",
120 stacks[i].stk_inuse = 1;
121 return( stacks[i].stk_stack + MAX_STACK / sizeof(stkalign_t) );
125 ldap_int_thread_free_stack( int stackno )
127 if ( stackno < 0 || stackno > MAX_THREADS ) {
128 Debug( LDAP_DEBUG_ANY, "free_stack of bogus stack %d\n",
132 stacks[stackno].stk_inuse = 0;
136 lwp_create_stack( void *(*func)(), void *arg, int stackno )
140 ldap_int_thread_free_stack( stackno );
144 ldap_pvt_thread_create( ldap_pvt_thread_t * thread,
146 void *(*start_routine)( void *),
152 if ( (stack = ldap_int_thread_get_stack( &stackno )) == NULL ) {
155 return( lwp_create( thread, lwp_create_stack, MINPRIO, 0,
156 stack, 3, start_routine, arg, stackno ) );
160 ldap_pvt_thread_exit( void *retval )
166 ldap_pvt_thread_sleep(
167 unsigned int interval
175 if ( lwp_self( &mylwp ) < 0 ) {
181 mon_enter( &sglob->tsl_mon );
183 if ( sglob->tsl_list != NULL ) {
184 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
185 if ( SAMETHREAD( t->tl_tid, mylwp )) {
186 /* We're already sleeping? */
187 t->tl_wake = now + interval;
188 mon_exit( &sglob->tsl_mon );
189 lwp_suspend( mylwp );
195 nt = (tl_t *) LDAP_MALLOC( sizeof( tl_t ));
197 if( nt == NULL ) return -1;
199 nt->tl_next = sglob->tsl_list;
200 nt->tl_wake = now + interval;
202 sglob->tsl_list = nt;
204 mon_exit( &sglob->tsl_mon );
206 lwp_suspend( mylwp );
211 * The lwp_scheduler thread periodically checks to see if any threads
212 * are due to be resumed. If there are, it resumes them. Otherwise,
213 * it computes the lesser of ( 1 second ) or ( the minimum time until
214 * a thread need to be resumed ) and puts itself to sleep for that amount
223 struct timeval interval;
226 while ( !sglob->slurpd_shutdown ) {
227 mon_enter( &sglob->tsl_mon );
231 if ( sglob->tsl_list != NULL ) {
232 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
233 if (( t->tl_wake > 0L ) && ( t->tl_wake < now )) {
234 lwp_resume( t->tl_tid );
238 if (( t->tl_wake > now ) && ( t->tl_wake < min )) {
244 mon_exit( &sglob->tsl_mon );
246 interval.tv_usec = 0L;
248 interval.tv_sec = 1L;
250 interval.tv_sec = min;
253 lwp_sleep( &interval );
256 mon_enter( &sglob->tsl_mon );
258 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
259 lwp_resume( t->tl_tid );
262 mon_exit( &sglob->tsl_mon );
264 free_stack( stackno );
268 ldap_pvt_thread_join( ldap_pvt_thread_t thread, void **thread_return )
275 ldap_pvt_thread_kill( ldap_pvt_thread_t thread, int signo )
281 ldap_pvt_thread_yield( void )
288 ldap_pvt_thread_cond_init( ldap_pvt_thread_cond_t *cond )
291 * lwp cv_create requires the monitor id be passed in
292 * when the cv is created, pthreads passes it when the
293 * condition is waited for. so, we fake the creation
294 * here and actually do it when the cv is waited for
298 cond->lcv_created = 0;
304 ldap_pvt_thread_cond_signal( ldap_pvt_thread_cond_t *cond )
306 return( cond->lcv_created ? cv_notify( cv->lcv_cv ) : 0 );
310 ldap_pvt_thread_cond_wait( ldap_pvt_thread_cond_t *cond,
311 ldap_int_thread_mutex_t *mutex )
313 if ( ! cond->lcv_created ) {
314 cv_create( &cond->lcv_cv, *mutex );
315 cond->lcv_created = 1;
318 return( cv_wait( cond->lcv_cv ) );
322 ldap_pvt_thread_mutex_init( ldap_pvt_thread_mutex_t *mutex )
324 return( mon_create( mutex ) );
328 ldap_pvt_thread_mutex_destroy( ldap_pvt_thread_mutex_t *mutex )
330 return( mon_destroy( *mutex ) );
334 ldap_pvt_thread_mutex_lock( ldap_pvt_thread_mutex_t *mutex )
336 return( mon_enter( *mutex ) );
340 ldap_pvt_thread_mutex_unlock( ldap_pvt_thread_mutex_t *mutex )
342 return( mon_exit( *mutex ) );
346 ldap_pvt_thread_mutex_trylock( ldap_pvt_thread_mutex_t *mp )
348 return( mon_cond_enter( *mp ) );
352 ldap_pvt_thread_cond_destroy( ldap_pvt_thread_cond_t *cv )
354 return( cv->lcv_created ? cv_destroy( cv->lcv_cv ) : 0 );
358 ldap_pvt_thread_cond_broadcast( ldap_pvt_thread_cond_t *cv )
360 return( cv->lcv_created ? cv_broadcast( cv->lcv_cv ) : 0 );
363 #endif /* HAVE_LWP */