From e5a6f515ef48dfeba25dea301e759340cc41bb50 Mon Sep 17 00:00:00 2001 From: Kurt Zeilenga Date: Tue, 6 Apr 1999 20:19:40 +0000 Subject: [PATCH] Move tsleep back to slurpd... it relies on slurpd internals. --- servers/slurpd/tsleep.c | 153 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 servers/slurpd/tsleep.c diff --git a/servers/slurpd/tsleep.c b/servers/slurpd/tsleep.c new file mode 100644 index 0000000000..97ad586424 --- /dev/null +++ b/servers/slurpd/tsleep.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 1996 Regents of the University of Michigan. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and that due credit is given + * to the University of Michigan at Ann Arbor. The name of the University + * may not be used to endorse or promote products derived from this + * software without specific prior written permission. This software + * is provided ``as is'' without express or implied warranty. + */ + +/* + * ldap_pvt_thread_sleep.c - allow a thread to sleep without putting + * the whole process (e.g. pod under lwp) to sleep. + * + * Contains platform-specific code to allow this: + * + * Under non-preemptive threads packages like SunOS lwp, tsleep() adds + * the thread to a list of sleepers. The lwp_scheduler process takes + * care of resuming suspended threads. + * + * Under a fully-preemptive threads package, like Solaris threads, + * tsleep just calls sleep(), and there is no scheduler thread. Life + * is so much simpler... + */ + +#include "portable.h" + +#include +#include +#include /* get sleep() */ + +#include "ldap_pvt_thread.h" + +#if !defined( HAVE_LWP ) + +/* + * Here we assume we have fully preemptive threads and that sleep() + * does the right thing. + */ +unsigned int +ldap_pvt_thread_sleep( + unsigned int interval +) +{ + sleep( interval ); + return 0; +} + +#else + +unsigned int +ldap_pvt_thread_sleep( + unsigned int interval +) +{ + thread_t mylwp; + tl_t *t, *nt; + time_t now; + + + if ( lwp_self( &mylwp ) < 0 ) { + return -1; + } + + time( &now ); + + mon_enter( &sglob->tsl_mon ); + + if ( sglob->tsl_list != NULL ) { + for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) { + if ( SAMETHREAD( t->tl_tid, mylwp )) { + /* We're already sleeping? */ + t->tl_wake = now + interval; + mon_exit( &sglob->tsl_mon ); + lwp_suspend( mylwp ); + return 0; + } + } + } + + nt = (tl_t *) malloc( sizeof( tl_t )); + + nt->tl_next = sglob->tsl_list; + nt->tl_wake = now + interval; + nt->tl_tid = mylwp; + sglob->tsl_list = nt; + + mon_exit( &sglob->tsl_mon ); + + lwp_suspend( mylwp ); + return 0; +} + +/* + * The lwp_scheduler thread periodically checks to see if any threads + * are due to be resumed. If there are, it resumes them. Otherwise, + * it computes the lesser of ( 1 second ) or ( the minimum time until + * a thread need to be resumed ) and puts itself to sleep for that amount + * of time. + */ +void +lwp_scheduler( + int stackno +) +{ + time_t now, min; + struct timeval interval; + tl_t *t; + + while ( !sglob->slurpd_shutdown ) { + mon_enter( &sglob->tsl_mon ); + + time( &now ); + min = 0L; + if ( sglob->tsl_list != NULL ) { + for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) { + if (( t->tl_wake > 0L ) && ( t->tl_wake < now )) { + lwp_resume( t->tl_tid ); + t->tl_wake = 0L; + } + + if (( t->tl_wake > now ) && ( t->tl_wake < min )) { + min = t->tl_wake; + } + } + } + + mon_exit( &sglob->tsl_mon ); + + interval.tv_usec = 0L; + if ( min == 0L ) { + interval.tv_sec = 1L; + } else { + interval.tv_sec = min; + } + + lwp_sleep( &interval ); + } + + mon_enter( &sglob->tsl_mon ); + + for ( t = sglob->tsl_list; t != NULL; t = t->tl_next ) { + lwp_resume( t->tl_tid ); + } + + mon_exit( &sglob->tsl_mon ); + + free_stack( stackno ); +} + +#endif /* HAVE_LWP */ -- 2.39.5