2 Bacula® - The Network Backup Solution
4 Copyright (C) 2001-2008 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version two of the GNU General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of John Walker.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
29 * Bacula Thread Read/Write locking code. It permits
30 * multiple readers but only one writer. Note, however,
31 * that the writer thread is permitted to make multiple
32 * nested write lock calls.
34 * Kern Sibbald, January MMI
38 * This code adapted from "Programming with POSIX Threads", by
46 * Initialize a read/write lock
48 * Returns: 0 on success
51 int rwl_init(brwlock_t *rwl)
55 rwl->r_active = rwl->w_active = 0;
56 rwl->r_wait = rwl->w_wait = 0;
57 if ((stat = pthread_mutex_init(&rwl->mutex, NULL)) != 0) {
60 if ((stat = pthread_cond_init(&rwl->read, NULL)) != 0) {
61 pthread_mutex_destroy(&rwl->mutex);
64 if ((stat = pthread_cond_init(&rwl->write, NULL)) != 0) {
65 pthread_cond_destroy(&rwl->read);
66 pthread_mutex_destroy(&rwl->mutex);
69 rwl->valid = RWLOCK_VALID;
74 * Destroy a read/write lock
76 * Returns: 0 on success
79 int rwl_destroy(brwlock_t *rwl)
81 int stat, stat1, stat2;
83 if (rwl->valid != RWLOCK_VALID) {
86 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
91 * If any threads are active, report EBUSY
93 if (rwl->r_active > 0 || rwl->w_active) {
94 pthread_mutex_unlock(&rwl->mutex);
99 * If any threads are waiting, report EBUSY
101 if (rwl->r_wait > 0 || rwl->w_wait > 0) {
102 pthread_mutex_unlock(&rwl->mutex);
107 if ((stat = pthread_mutex_unlock(&rwl->mutex)) != 0) {
110 stat = pthread_mutex_destroy(&rwl->mutex);
111 stat1 = pthread_cond_destroy(&rwl->read);
112 stat2 = pthread_cond_destroy(&rwl->write);
113 return (stat != 0 ? stat : (stat1 != 0 ? stat1 : stat2));
117 * Handle cleanup when the read lock condition variable
120 static void rwl_read_release(void *arg)
122 brwlock_t *rwl = (brwlock_t *)arg;
125 pthread_mutex_unlock(&rwl->mutex);
129 * Handle cleanup when the write lock condition variable wait
132 static void rwl_write_release(void *arg)
134 brwlock_t *rwl = (brwlock_t *)arg;
137 pthread_mutex_unlock(&rwl->mutex);
141 * Lock for read access, wait until locked (or error).
143 int rwl_readlock(brwlock_t *rwl)
147 if (rwl->valid != RWLOCK_VALID) {
150 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
154 rwl->r_wait++; /* indicate that we are waiting */
155 pthread_cleanup_push(rwl_read_release, (void *)rwl);
156 while (rwl->w_active) {
157 stat = pthread_cond_wait(&rwl->read, &rwl->mutex);
159 break; /* error, bail out */
162 pthread_cleanup_pop(0);
163 rwl->r_wait--; /* we are no longer waiting */
166 rwl->r_active++; /* we are running */
168 pthread_mutex_unlock(&rwl->mutex);
173 * Attempt to lock for read access, don't wait
175 int rwl_readtrylock(brwlock_t *rwl)
179 if (rwl->valid != RWLOCK_VALID) {
182 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
188 rwl->r_active++; /* we are running */
190 stat2 = pthread_mutex_unlock(&rwl->mutex);
191 return (stat == 0 ? stat2 : stat);
197 int rwl_readunlock(brwlock_t *rwl)
201 if (rwl->valid != RWLOCK_VALID) {
204 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
208 if (rwl->r_active == 0 && rwl->w_wait > 0) { /* if writers waiting */
209 stat = pthread_cond_broadcast(&rwl->write);
211 stat2 = pthread_mutex_unlock(&rwl->mutex);
212 return (stat == 0 ? stat2 : stat);
217 * Lock for write access, wait until locked (or error).
218 * Multiple nested write locking is permitted.
220 int rwl_writelock(brwlock_t *rwl)
224 if (rwl->valid != RWLOCK_VALID) {
227 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
230 if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) {
232 pthread_mutex_unlock(&rwl->mutex);
235 if (rwl->w_active || rwl->r_active > 0) {
236 rwl->w_wait++; /* indicate that we are waiting */
237 pthread_cleanup_push(rwl_write_release, (void *)rwl);
238 while (rwl->w_active || rwl->r_active > 0) {
239 if ((stat = pthread_cond_wait(&rwl->write, &rwl->mutex)) != 0) {
240 break; /* error, bail out */
243 pthread_cleanup_pop(0);
244 rwl->w_wait--; /* we are no longer waiting */
247 rwl->w_active++; /* we are running */
248 rwl->writer_id = pthread_self(); /* save writer thread's id */
250 pthread_mutex_unlock(&rwl->mutex);
255 * Attempt to lock for write access, don't wait
257 int rwl_writetrylock(brwlock_t *rwl)
261 if (rwl->valid != RWLOCK_VALID) {
264 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
267 if (rwl->w_active && pthread_equal(rwl->writer_id, pthread_self())) {
269 pthread_mutex_unlock(&rwl->mutex);
272 if (rwl->w_active || rwl->r_active > 0) {
275 rwl->w_active = 1; /* we are running */
276 rwl->writer_id = pthread_self(); /* save writer thread's id */
278 stat2 = pthread_mutex_unlock(&rwl->mutex);
279 return (stat == 0 ? stat2 : stat);
284 * Start any waiting writers in preference to waiting readers
286 int rwl_writeunlock(brwlock_t *rwl)
290 if (rwl->valid != RWLOCK_VALID) {
293 if ((stat = pthread_mutex_lock(&rwl->mutex)) != 0) {
296 if (rwl->w_active <= 0) {
297 pthread_mutex_unlock(&rwl->mutex);
298 Jmsg0(NULL, M_ABORT, 0, _("rwl_writeunlock called too many times.\n"));
301 if (!pthread_equal(pthread_self(), rwl->writer_id)) {
302 pthread_mutex_unlock(&rwl->mutex);
303 Jmsg0(NULL, M_ABORT, 0, _("rwl_writeunlock by non-owner.\n"));
305 if (rwl->w_active > 0) {
306 stat = 0; /* writers still active */
308 /* No more writers, awaken someone */
309 if (rwl->r_wait > 0) { /* if readers waiting */
310 stat = pthread_cond_broadcast(&rwl->read);
311 } else if (rwl->w_wait > 0) {
312 stat = pthread_cond_broadcast(&rwl->write);
315 stat2 = pthread_mutex_unlock(&rwl->mutex);
316 return (stat == 0 ? stat2 : stat);
323 #define ITERATIONS 10000
326 * Keep statics for each thread.
328 typedef struct thread_tag {
337 * Read/write lock and shared data.
339 typedef struct data_tag {
345 thread_t threads[THREADS];
346 data_t data[DATASIZE];
349 * Thread start routine that uses read/write locks.
351 void *thread_routine(void *arg)
353 thread_t *self = (thread_t *)arg;
359 for (iteration=0; iteration < ITERATIONS; iteration++) {
361 * Each "self->interval" iterations, perform an
362 * update operation (write lock instead of read
365 if ((iteration % self->interval) == 0) {
366 status = rwl_writelock(&data[element].lock);
369 Jmsg1(NULL, M_ABORT, 0, _("Write lock failed. ERR=%s\n"), be.bstrerror(status));
371 data[element].data = self->thread_num;
372 data[element].writes++;
374 status = rwl_writeunlock(&data[element].lock);
377 Jmsg1(NULL, M_ABORT, 0, _("Write unlock failed. ERR=%s\n"), be.bstrerror(status));
381 * Look at the current data element to see whether
382 * the current thread last updated it. Count the
383 * times to report later.
385 status = rwl_readlock(&data[element].lock);
388 Jmsg1(NULL, M_ABORT, 0, _("Read lock failed. ERR=%s\n"), be.bstrerror(status));
391 if (data[element].data == self->thread_num)
393 status = rwl_readunlock(&data[element].lock);
396 Jmsg1(NULL, M_ABORT, 0, _("Read unlock failed. ERR=%s\n"), be.bstrerror(status));
400 if (element >= DATASIZE) {
405 Pmsg2(000, _("Thread %d found unchanged elements %d times\n"),
406 self->thread_num, repeats);
411 int main (int argc, char *argv[])
416 unsigned int seed = 1;
417 int thread_writes = 0;
422 * On Solaris 2.5, threads are not timesliced. To ensure
423 * that our threads can run concurrently, we need to
424 * increase the concurrency level to THREADS.
426 thr_setconcurrency (THREADS);
430 * Initialize the shared data.
432 for (data_count = 0; data_count < DATASIZE; data_count++) {
433 data[data_count].data = 0;
434 data[data_count].writes = 0;
435 status = rwl_init (&data[data_count].lock);
438 Jmsg1(NULL, M_ABORT, 0, _("Init rwlock failed. ERR=%s\n"), be.bstrerror(status));
443 * Create THREADS threads to access shared data.
445 for (count = 0; count < THREADS; count++) {
446 threads[count].thread_num = count + 1;
447 threads[count].writes = 0;
448 threads[count].reads = 0;
449 threads[count].interval = rand_r (&seed) % 71;
450 status = pthread_create (&threads[count].thread_id,
451 NULL, thread_routine, (void*)&threads[count]);
454 Jmsg1(NULL, M_ABORT, 0, _("Create thread failed. ERR=%s\n"), be.bstrerror(status));
459 * Wait for all threads to complete, and collect
462 for (count = 0; count < THREADS; count++) {
463 status = pthread_join (threads[count].thread_id, NULL);
466 Jmsg1(NULL, M_ABORT, 0, _("Join thread failed. ERR=%s\n"), be.bstrerror(status));
468 thread_writes += threads[count].writes;
469 printf (_("%02d: interval %d, writes %d, reads %d\n"),
470 count, threads[count].interval,
471 threads[count].writes, threads[count].reads);
475 * Collect statistics for the data.
477 for (data_count = 0; data_count < DATASIZE; data_count++) {
478 data_writes += data[data_count].writes;
479 printf (_("data %02d: value %d, %d writes\n"),
480 data_count, data[data_count].data, data[data_count].writes);
481 rwl_destroy (&data[data_count].lock);
484 printf (_("Total: %d thread writes, %d data writes\n"),
485 thread_writes, data_writes);
491 #ifdef TEST_RW_TRY_LOCK
495 * Demonstrate use of non-blocking read-write locks.
497 * Special notes: On a Solaris system, call thr_setconcurrency()
498 * to allow interleaved thread execution, since threads are not
506 #define ITERATIONS 1000
510 * Keep statistics for each thread.
512 typedef struct thread_tag {
522 * Read-write lock and shared data
524 typedef struct data_tag {
530 thread_t threads[THREADS];
531 data_t data[DATASIZE];
534 * Thread start routine that uses read-write locks
536 void *thread_routine (void *arg)
538 thread_t *self = (thread_t*)arg;
543 element = 0; /* Current data element */
545 for (iteration = 0; iteration < ITERATIONS; iteration++) {
546 if ((iteration % self->interval) == 0) {
547 status = rwl_writetrylock (&data[element].lock);
549 self->w_collisions++;
550 else if (status == 0) {
551 data[element].data++;
552 data[element].updates++;
554 rwl_writeunlock (&data[element].lock);
556 err_abort (status, _("Try write lock"));
558 status = rwl_readtrylock (&data[element].lock);
560 self->r_collisions++;
561 else if (status != 0) {
562 err_abort (status, _("Try read lock"));
564 if (data[element].data != data[element].updates)
565 printf ("%d: data[%d] %d != %d\n",
566 self->thread_num, element,
567 data[element].data, data[element].updates);
568 rwl_readunlock (&data[element].lock);
573 if (element >= DATASIZE)
579 int main (int argc, char *argv[])
581 int count, data_count;
582 unsigned int seed = 1;
583 int thread_updates = 0, data_updates = 0;
588 * On Solaris 2.5, threads are not timesliced. To ensure
589 * that our threads can run concurrently, we need to
590 * increase the concurrency level to THREADS.
592 DPRINTF (("Setting concurrency level to %d\n", THREADS));
593 thr_setconcurrency (THREADS);
597 * Initialize the shared data.
599 for (data_count = 0; data_count < DATASIZE; data_count++) {
600 data[data_count].data = 0;
601 data[data_count].updates = 0;
602 rwl_init (&data[data_count].lock);
606 * Create THREADS threads to access shared data.
608 for (count = 0; count < THREADS; count++) {
609 threads[count].thread_num = count;
610 threads[count].r_collisions = 0;
611 threads[count].w_collisions = 0;
612 threads[count].updates = 0;
613 threads[count].interval = rand_r (&seed) % ITERATIONS;
614 status = pthread_create (&threads[count].thread_id,
615 NULL, thread_routine, (void*)&threads[count]);
617 err_abort (status, _("Create thread"));
621 * Wait for all threads to complete, and collect
624 for (count = 0; count < THREADS; count++) {
625 status = pthread_join (threads[count].thread_id, NULL);
627 err_abort (status, _("Join thread"));
628 thread_updates += threads[count].updates;
629 printf (_("%02d: interval %d, updates %d, "
630 "r_collisions %d, w_collisions %d\n"),
631 count, threads[count].interval,
632 threads[count].updates,
633 threads[count].r_collisions, threads[count].w_collisions);
637 * Collect statistics for the data.
639 for (data_count = 0; data_count < DATASIZE; data_count++) {
640 data_updates += data[data_count].updates;
641 printf (_("data %02d: value %d, %d updates\n"),
642 data_count, data[data_count].data, data[data_count].updates);
643 rwl_destroy (&data[data_count].lock);