2 * Copyright 1998,1999 The OpenLDAP Foundation, Redwood City, California, USA
5 * Redistribution and use in source and binary forms are permitted only
6 * as authorized by the OpenLDAP Public License. A copy of this
7 * license is available at http://www.OpenLDAP.org/license.html or
8 * in file LICENSE in the top-level directory of the distribution.
11 /* thr_lwp.c - wrappers around SunOS LWP threads */
14 * - slurpd calls the get_stack/free_stack functions. Should be fixed, so
15 * they can become static.
19 #include "ldap_pvt_thread.h"
21 #if defined( HAVE_LWP )
29 /* This implementation NEEDS WORK. It currently does not compile */
34 #include <ac/socket.h>
41 #include <lwp/stackdep.h>
43 #define MAX_STACK 51200
44 #define MAX_THREADS 20
47 * Initialize LWP by spinning of a schedular
50 ldap_pvt_thread_initialize( void )
56 if (( stack = get_stack( &stackno )) == NULL ) {
60 lwp_create( &tid, lwp_scheduler, MINPRIO, 0, stack, 1, stackno );
65 ldap_pvt_thread_destroy( void )
67 /* need to destory lwp_scheduler thread and clean up private
74 stkalign_t *stk_stack;
77 static struct stackinfo *stacks;
79 static stkalign_t * ldap_pvt_thread_get_stack( int *stacknop )
83 if ( stacks == NULL ) {
84 stacks = (struct stackinfo *) ch_calloc( 1, MAX_THREADS *
85 sizeof(struct stackinfo) );
88 for ( i = 0; i < MAX_THREADS; i++ ) {
89 if ( stacks[i].stk_inuse == 0 ) {
94 if ( i == MAX_THREADS ) {
95 Debug( LDAP_DEBUG_ANY,
96 "no more stacks (max %d) - increase MAX_THREADS for more",
101 if ( stacks[i].stk_stack == NULL ) {
102 stacks[i].stk_stack = (stkalign_t *) malloc(
103 (MAX_STACK / sizeof(stkalign_t) + 1 )
104 * sizeof(stkalign_t) );
108 stacks[i].stk_inuse = 1;
109 return( stacks[i].stk_stack + MAX_STACK / sizeof(stkalign_t) );
113 ldap_pvt_thread_free_stack( int stackno )
115 if ( stackno < 0 || stackno > MAX_THREADS ) {
116 Debug( LDAP_DEBUG_ANY, "free_stack of bogus stack %d\n",
120 stacks[stackno].stk_inuse = 0;
124 lwp_create_stack( void *(*func)(), void *arg, int stackno )
128 ldap_pvt_thread_free_stack( stackno );
132 ldap_pvt_thread_create( ldap_pvt_thread_t * thread,
134 void *(*start_routine)( void *),
140 if ( (stack = ldap_pvt_thread_get_stack( &stackno )) == NULL ) {
143 return( lwp_create( thread, lwp_create_stack, MINPRIO, 0,
144 stack, 3, start_routine, arg, stackno ) );
148 ldap_pvt_thread_exit( void *retval )
154 ldap_pvt_thread_sleep(
155 unsigned int interval
163 if ( lwp_self( &mylwp ) < 0 ) {
169 mon_enter( &sglob->tsl_mon );
171 if ( sglob->tsl_list != NULL ) {
172 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
173 if ( SAMETHREAD( t->tl_tid, mylwp )) {
174 /* We're already sleeping? */
175 t->tl_wake = now + interval;
176 mon_exit( &sglob->tsl_mon );
177 lwp_suspend( mylwp );
183 nt = (tl_t *) malloc( sizeof( tl_t ));
185 nt->tl_next = sglob->tsl_list;
186 nt->tl_wake = now + interval;
188 sglob->tsl_list = nt;
190 mon_exit( &sglob->tsl_mon );
192 lwp_suspend( mylwp );
197 * The lwp_scheduler thread periodically checks to see if any threads
198 * are due to be resumed. If there are, it resumes them. Otherwise,
199 * it computes the lesser of ( 1 second ) or ( the minimum time until
200 * a thread need to be resumed ) and puts itself to sleep for that amount
209 struct timeval interval;
212 while ( !sglob->slurpd_shutdown ) {
213 mon_enter( &sglob->tsl_mon );
217 if ( sglob->tsl_list != NULL ) {
218 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
219 if (( t->tl_wake > 0L ) && ( t->tl_wake < now )) {
220 lwp_resume( t->tl_tid );
224 if (( t->tl_wake > now ) && ( t->tl_wake < min )) {
230 mon_exit( &sglob->tsl_mon );
232 interval.tv_usec = 0L;
234 interval.tv_sec = 1L;
236 interval.tv_sec = min;
239 lwp_sleep( &interval );
242 mon_enter( &sglob->tsl_mon );
244 for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) {
245 lwp_resume( t->tl_tid );
248 mon_exit( &sglob->tsl_mon );
250 free_stack( stackno );
254 ldap_pvt_thread_join( ldap_pvt_thread_t thread, void **thread_return )
261 ldap_pvt_thread_kill( ldap_pvt_thread_t thread, int signo )
267 ldap_pvt_thread_yield( void )
274 ldap_pvt_thread_cond_init( ldap_pvt_thread_cond_t *cond )
277 * lwp cv_create requires the monitor id be passed in
278 * when the cv is created, pthreads passes it when the
279 * condition is waited for. so, we fake the creation
280 * here and actually do it when the cv is waited for
284 cond->lcv_created = 0;
290 ldap_pvt_thread_cond_signal( ldap_pvt_thread_cond_t *cond )
292 return( cond->lcv_created ? cv_notify( cv->lcv_cv ) : 0 );
296 ldap_pvt_thread_cond_wait( ldap_pvt_thread_cond_t *cond,
297 ldap_pvt_thread_mutex_t *mutex )
299 if ( ! cond->lcv_created ) {
300 cv_create( &cond->lcv_cv, *mutex );
301 cond->lcv_created = 1;
304 return( cv_wait( cond->lcv_cv ) );
308 ldap_pvt_thread_mutex_init( ldap_pvt_thread_mutex_t *mutex )
310 return( mon_create( mutex ) );
314 ldap_pvt_thread_mutex_destroy( ldap_pvt_thread_mutex_t *mutex )
316 return( mon_destroy( *mutex ) );
320 ldap_pvt_thread_mutex_lock( ldap_pvt_thread_mutex_t *mutex )
322 return( mon_enter( *mutex ) );
326 ldap_pvt_thread_mutex_unlock( ldap_pvt_thread_mutex_t *mutex )
328 return( mon_exit( *mutex ) );
332 ldap_pvt_thread_mutex_trylock( ldap_pvt_thread_mutex_t *mp )
334 return( mon_cond_enter( *mp ) );
338 ldap_pvt_thread_cond_destroy( ldap_pvt_thread_cond_t *cv )
340 return( cv->lcv_created ? cv_destroy( cv->lcv_cv ) : 0 );
344 ldap_pvt_thread_cond_broadcast( ldap_pvt_thread_cond_t *cv )
346 return( cv->lcv_created ? cv_broadcast( cv->lcv_cv ) : 0 );
349 #endif /* HAVE_LWP */