lock.c revision 1.1.1.1 1 /* Locking in multithreaded situations.
2 Copyright (C) 2005-2006 Free Software Foundation, Inc.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2, or (at your option)
7 any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software Foundation,
16 Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
17
18 /* Written by Bruno Haible <bruno (at) clisp.org>, 2005.
19 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
20 gthr-win32.h. */
21
22 #include <config.h>
23
24 #include "lock.h"
25
26 /* ========================================================================= */
27
28 #if USE_POSIX_THREADS
29
30 /* Use the POSIX threads library. */
31
32 # if PTHREAD_IN_USE_DETECTION_HARD
33
34 /* The function to be executed by a dummy thread. */
35 static void *
36 dummy_thread_func (void *arg)
37 {
38 return arg;
39 }
40
41 int
42 glthread_in_use (void)
43 {
44 static int tested;
45 static int result; /* 1: linked with -lpthread, 0: only with libc */
46
47 if (!tested)
48 {
49 pthread_t thread;
50
51 if (pthread_create (&thread, NULL, dummy_thread_func, NULL) != 0)
52 /* Thread creation failed. */
53 result = 0;
54 else
55 {
56 /* Thread creation works. */
57 void *retval;
58 if (pthread_join (thread, &retval) != 0)
59 abort ();
60 result = 1;
61 }
62 tested = 1;
63 }
64 return result;
65 }
66
67 # endif
68
69 /* -------------------------- gl_lock_t datatype -------------------------- */
70
71 /* ------------------------- gl_rwlock_t datatype ------------------------- */
72
73 # if HAVE_PTHREAD_RWLOCK
74
75 # if !defined PTHREAD_RWLOCK_INITIALIZER
76
77 void
78 glthread_rwlock_init (gl_rwlock_t *lock)
79 {
80 if (pthread_rwlock_init (&lock->rwlock, NULL) != 0)
81 abort ();
82 lock->initialized = 1;
83 }
84
85 void
86 glthread_rwlock_rdlock (gl_rwlock_t *lock)
87 {
88 if (!lock->initialized)
89 {
90 if (pthread_mutex_lock (&lock->guard) != 0)
91 abort ();
92 if (!lock->initialized)
93 glthread_rwlock_init (lock);
94 if (pthread_mutex_unlock (&lock->guard) != 0)
95 abort ();
96 }
97 if (pthread_rwlock_rdlock (&lock->rwlock) != 0)
98 abort ();
99 }
100
101 void
102 glthread_rwlock_wrlock (gl_rwlock_t *lock)
103 {
104 if (!lock->initialized)
105 {
106 if (pthread_mutex_lock (&lock->guard) != 0)
107 abort ();
108 if (!lock->initialized)
109 glthread_rwlock_init (lock);
110 if (pthread_mutex_unlock (&lock->guard) != 0)
111 abort ();
112 }
113 if (pthread_rwlock_wrlock (&lock->rwlock) != 0)
114 abort ();
115 }
116
117 void
118 glthread_rwlock_unlock (gl_rwlock_t *lock)
119 {
120 if (!lock->initialized)
121 abort ();
122 if (pthread_rwlock_unlock (&lock->rwlock) != 0)
123 abort ();
124 }
125
126 void
127 glthread_rwlock_destroy (gl_rwlock_t *lock)
128 {
129 if (!lock->initialized)
130 abort ();
131 if (pthread_rwlock_destroy (&lock->rwlock) != 0)
132 abort ();
133 lock->initialized = 0;
134 }
135
136 # endif
137
138 # else
139
140 void
141 glthread_rwlock_init (gl_rwlock_t *lock)
142 {
143 if (pthread_mutex_init (&lock->lock, NULL) != 0)
144 abort ();
145 if (pthread_cond_init (&lock->waiting_readers, NULL) != 0)
146 abort ();
147 if (pthread_cond_init (&lock->waiting_writers, NULL) != 0)
148 abort ();
149 lock->waiting_writers_count = 0;
150 lock->runcount = 0;
151 }
152
153 void
154 glthread_rwlock_rdlock (gl_rwlock_t *lock)
155 {
156 if (pthread_mutex_lock (&lock->lock) != 0)
157 abort ();
158 /* Test whether only readers are currently running, and whether the runcount
159 field will not overflow. */
160 /* POSIX says: "It is implementation-defined whether the calling thread
161 acquires the lock when a writer does not hold the lock and there are
162 writers blocked on the lock." Let's say, no: give the writers a higher
163 priority. */
164 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
165 {
166 /* This thread has to wait for a while. Enqueue it among the
167 waiting_readers. */
168 if (pthread_cond_wait (&lock->waiting_readers, &lock->lock) != 0)
169 abort ();
170 }
171 lock->runcount++;
172 if (pthread_mutex_unlock (&lock->lock) != 0)
173 abort ();
174 }
175
176 void
177 glthread_rwlock_wrlock (gl_rwlock_t *lock)
178 {
179 if (pthread_mutex_lock (&lock->lock) != 0)
180 abort ();
181 /* Test whether no readers or writers are currently running. */
182 while (!(lock->runcount == 0))
183 {
184 /* This thread has to wait for a while. Enqueue it among the
185 waiting_writers. */
186 lock->waiting_writers_count++;
187 if (pthread_cond_wait (&lock->waiting_writers, &lock->lock) != 0)
188 abort ();
189 lock->waiting_writers_count--;
190 }
191 lock->runcount--; /* runcount becomes -1 */
192 if (pthread_mutex_unlock (&lock->lock) != 0)
193 abort ();
194 }
195
196 void
197 glthread_rwlock_unlock (gl_rwlock_t *lock)
198 {
199 if (pthread_mutex_lock (&lock->lock) != 0)
200 abort ();
201 if (lock->runcount < 0)
202 {
203 /* Drop a writer lock. */
204 if (!(lock->runcount == -1))
205 abort ();
206 lock->runcount = 0;
207 }
208 else
209 {
210 /* Drop a reader lock. */
211 if (!(lock->runcount > 0))
212 abort ();
213 lock->runcount--;
214 }
215 if (lock->runcount == 0)
216 {
217 /* POSIX recommends that "write locks shall take precedence over read
218 locks", to avoid "writer starvation". */
219 if (lock->waiting_writers_count > 0)
220 {
221 /* Wake up one of the waiting writers. */
222 if (pthread_cond_signal (&lock->waiting_writers) != 0)
223 abort ();
224 }
225 else
226 {
227 /* Wake up all waiting readers. */
228 if (pthread_cond_broadcast (&lock->waiting_readers) != 0)
229 abort ();
230 }
231 }
232 if (pthread_mutex_unlock (&lock->lock) != 0)
233 abort ();
234 }
235
236 void
237 glthread_rwlock_destroy (gl_rwlock_t *lock)
238 {
239 if (pthread_mutex_destroy (&lock->lock) != 0)
240 abort ();
241 if (pthread_cond_destroy (&lock->waiting_readers) != 0)
242 abort ();
243 if (pthread_cond_destroy (&lock->waiting_writers) != 0)
244 abort ();
245 }
246
247 # endif
248
249 /* --------------------- gl_recursive_lock_t datatype --------------------- */
250
251 # if HAVE_PTHREAD_MUTEX_RECURSIVE
252
253 # if !(defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
254
255 void
256 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
257 {
258 pthread_mutexattr_t attributes;
259
260 if (pthread_mutexattr_init (&attributes) != 0)
261 abort ();
262 if (pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE) != 0)
263 abort ();
264 if (pthread_mutex_init (&lock->recmutex, &attributes) != 0)
265 abort ();
266 if (pthread_mutexattr_destroy (&attributes) != 0)
267 abort ();
268 lock->initialized = 1;
269 }
270
271 void
272 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
273 {
274 if (!lock->initialized)
275 {
276 if (pthread_mutex_lock (&lock->guard) != 0)
277 abort ();
278 if (!lock->initialized)
279 glthread_recursive_lock_init (lock);
280 if (pthread_mutex_unlock (&lock->guard) != 0)
281 abort ();
282 }
283 if (pthread_mutex_lock (&lock->recmutex) != 0)
284 abort ();
285 }
286
287 void
288 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
289 {
290 if (!lock->initialized)
291 abort ();
292 if (pthread_mutex_unlock (&lock->recmutex) != 0)
293 abort ();
294 }
295
296 void
297 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
298 {
299 if (!lock->initialized)
300 abort ();
301 if (pthread_mutex_destroy (&lock->recmutex) != 0)
302 abort ();
303 lock->initialized = 0;
304 }
305
306 # endif
307
308 # else
309
310 void
311 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
312 {
313 if (pthread_mutex_init (&lock->mutex, NULL) != 0)
314 abort ();
315 lock->owner = (pthread_t) 0;
316 lock->depth = 0;
317 }
318
319 void
320 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
321 {
322 pthread_t self = pthread_self ();
323 if (lock->owner != self)
324 {
325 if (pthread_mutex_lock (&lock->mutex) != 0)
326 abort ();
327 lock->owner = self;
328 }
329 if (++(lock->depth) == 0) /* wraparound? */
330 abort ();
331 }
332
333 void
334 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
335 {
336 if (lock->owner != pthread_self ())
337 abort ();
338 if (lock->depth == 0)
339 abort ();
340 if (--(lock->depth) == 0)
341 {
342 lock->owner = (pthread_t) 0;
343 if (pthread_mutex_unlock (&lock->mutex) != 0)
344 abort ();
345 }
346 }
347
348 void
349 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
350 {
351 if (lock->owner != (pthread_t) 0)
352 abort ();
353 if (pthread_mutex_destroy (&lock->mutex) != 0)
354 abort ();
355 }
356
357 # endif
358
359 /* -------------------------- gl_once_t datatype -------------------------- */
360
361 static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
362
363 int
364 glthread_once_singlethreaded (pthread_once_t *once_control)
365 {
366 /* We don't know whether pthread_once_t is an integer type, a floating-point
367 type, a pointer type, or a structure type. */
368 char *firstbyte = (char *)once_control;
369 if (*firstbyte == *(const char *)&fresh_once)
370 {
371 /* First time use of once_control. Invert the first byte. */
372 *firstbyte = ~ *(const char *)&fresh_once;
373 return 1;
374 }
375 else
376 return 0;
377 }
378
379 #endif
380
381 /* ========================================================================= */
382
383 #if USE_PTH_THREADS
384
385 /* Use the GNU Pth threads library. */
386
387 /* -------------------------- gl_lock_t datatype -------------------------- */
388
389 /* ------------------------- gl_rwlock_t datatype ------------------------- */
390
391 /* --------------------- gl_recursive_lock_t datatype --------------------- */
392
393 /* -------------------------- gl_once_t datatype -------------------------- */
394
395 void
396 glthread_once_call (void *arg)
397 {
398 void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
399 void (*initfunction) (void) = *gl_once_temp_addr;
400 initfunction ();
401 }
402
403 int
404 glthread_once_singlethreaded (pth_once_t *once_control)
405 {
406 /* We know that pth_once_t is an integer type. */
407 if (*once_control == PTH_ONCE_INIT)
408 {
409 /* First time use of once_control. Invert the marker. */
410 *once_control = ~ PTH_ONCE_INIT;
411 return 1;
412 }
413 else
414 return 0;
415 }
416
417 #endif
418
419 /* ========================================================================= */
420
421 #if USE_SOLARIS_THREADS
422
423 /* Use the old Solaris threads library. */
424
425 /* -------------------------- gl_lock_t datatype -------------------------- */
426
427 /* ------------------------- gl_rwlock_t datatype ------------------------- */
428
429 /* --------------------- gl_recursive_lock_t datatype --------------------- */
430
431 void
432 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
433 {
434 if (mutex_init (&lock->mutex, USYNC_THREAD, NULL) != 0)
435 abort ();
436 lock->owner = (thread_t) 0;
437 lock->depth = 0;
438 }
439
440 void
441 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
442 {
443 thread_t self = thr_self ();
444 if (lock->owner != self)
445 {
446 if (mutex_lock (&lock->mutex) != 0)
447 abort ();
448 lock->owner = self;
449 }
450 if (++(lock->depth) == 0) /* wraparound? */
451 abort ();
452 }
453
454 void
455 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
456 {
457 if (lock->owner != thr_self ())
458 abort ();
459 if (lock->depth == 0)
460 abort ();
461 if (--(lock->depth) == 0)
462 {
463 lock->owner = (thread_t) 0;
464 if (mutex_unlock (&lock->mutex) != 0)
465 abort ();
466 }
467 }
468
469 void
470 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
471 {
472 if (lock->owner != (thread_t) 0)
473 abort ();
474 if (mutex_destroy (&lock->mutex) != 0)
475 abort ();
476 }
477
478 /* -------------------------- gl_once_t datatype -------------------------- */
479
480 void
481 glthread_once (gl_once_t *once_control, void (*initfunction) (void))
482 {
483 if (!once_control->inited)
484 {
485 /* Use the mutex to guarantee that if another thread is already calling
486 the initfunction, this thread waits until it's finished. */
487 if (mutex_lock (&once_control->mutex) != 0)
488 abort ();
489 if (!once_control->inited)
490 {
491 once_control->inited = 1;
492 initfunction ();
493 }
494 if (mutex_unlock (&once_control->mutex) != 0)
495 abort ();
496 }
497 }
498
499 int
500 glthread_once_singlethreaded (gl_once_t *once_control)
501 {
502 /* We know that gl_once_t contains an integer type. */
503 if (!once_control->inited)
504 {
505 /* First time use of once_control. Invert the marker. */
506 once_control->inited = ~ 0;
507 return 1;
508 }
509 else
510 return 0;
511 }
512
513 #endif
514
515 /* ========================================================================= */
516
517 #if USE_WIN32_THREADS
518
519 /* -------------------------- gl_lock_t datatype -------------------------- */
520
521 void
522 glthread_lock_init (gl_lock_t *lock)
523 {
524 InitializeCriticalSection (&lock->lock);
525 lock->guard.done = 1;
526 }
527
528 void
529 glthread_lock_lock (gl_lock_t *lock)
530 {
531 if (!lock->guard.done)
532 {
533 if (InterlockedIncrement (&lock->guard.started) == 0)
534 /* This thread is the first one to need this lock. Initialize it. */
535 glthread_lock_init (lock);
536 else
537 /* Yield the CPU while waiting for another thread to finish
538 initializing this lock. */
539 while (!lock->guard.done)
540 Sleep (0);
541 }
542 EnterCriticalSection (&lock->lock);
543 }
544
545 void
546 glthread_lock_unlock (gl_lock_t *lock)
547 {
548 if (!lock->guard.done)
549 abort ();
550 LeaveCriticalSection (&lock->lock);
551 }
552
553 void
554 glthread_lock_destroy (gl_lock_t *lock)
555 {
556 if (!lock->guard.done)
557 abort ();
558 DeleteCriticalSection (&lock->lock);
559 lock->guard.done = 0;
560 }
561
562 /* ------------------------- gl_rwlock_t datatype ------------------------- */
563
564 static inline void
565 gl_waitqueue_init (gl_waitqueue_t *wq)
566 {
567 wq->array = NULL;
568 wq->count = 0;
569 wq->alloc = 0;
570 wq->offset = 0;
571 }
572
573 /* Enqueues the current thread, represented by an event, in a wait queue.
574 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
575 static HANDLE
576 gl_waitqueue_add (gl_waitqueue_t *wq)
577 {
578 HANDLE event;
579 unsigned int index;
580
581 if (wq->count == wq->alloc)
582 {
583 unsigned int new_alloc = 2 * wq->alloc + 1;
584 HANDLE *new_array =
585 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
586 if (new_array == NULL)
587 /* No more memory. */
588 return INVALID_HANDLE_VALUE;
589 /* Now is a good opportunity to rotate the array so that its contents
590 starts at offset 0. */
591 if (wq->offset > 0)
592 {
593 unsigned int old_count = wq->count;
594 unsigned int old_alloc = wq->alloc;
595 unsigned int old_offset = wq->offset;
596 unsigned int i;
597 if (old_offset + old_count > old_alloc)
598 {
599 unsigned int limit = old_offset + old_count - old_alloc;
600 for (i = 0; i < limit; i++)
601 new_array[old_alloc + i] = new_array[i];
602 }
603 for (i = 0; i < old_count; i++)
604 new_array[i] = new_array[old_offset + i];
605 wq->offset = 0;
606 }
607 wq->array = new_array;
608 wq->alloc = new_alloc;
609 }
610 event = CreateEvent (NULL, TRUE, FALSE, NULL);
611 if (event == INVALID_HANDLE_VALUE)
612 /* No way to allocate an event. */
613 return INVALID_HANDLE_VALUE;
614 index = wq->offset + wq->count;
615 if (index >= wq->alloc)
616 index -= wq->alloc;
617 wq->array[index] = event;
618 wq->count++;
619 return event;
620 }
621
622 /* Notifies the first thread from a wait queue and dequeues it. */
623 static inline void
624 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
625 {
626 SetEvent (wq->array[wq->offset + 0]);
627 wq->offset++;
628 wq->count--;
629 if (wq->count == 0 || wq->offset == wq->alloc)
630 wq->offset = 0;
631 }
632
633 /* Notifies all threads from a wait queue and dequeues them all. */
634 static inline void
635 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
636 {
637 unsigned int i;
638
639 for (i = 0; i < wq->count; i++)
640 {
641 unsigned int index = wq->offset + i;
642 if (index >= wq->alloc)
643 index -= wq->alloc;
644 SetEvent (wq->array[index]);
645 }
646 wq->count = 0;
647 wq->offset = 0;
648 }
649
650 void
651 glthread_rwlock_init (gl_rwlock_t *lock)
652 {
653 InitializeCriticalSection (&lock->lock);
654 gl_waitqueue_init (&lock->waiting_readers);
655 gl_waitqueue_init (&lock->waiting_writers);
656 lock->runcount = 0;
657 lock->guard.done = 1;
658 }
659
660 void
661 glthread_rwlock_rdlock (gl_rwlock_t *lock)
662 {
663 if (!lock->guard.done)
664 {
665 if (InterlockedIncrement (&lock->guard.started) == 0)
666 /* This thread is the first one to need this lock. Initialize it. */
667 glthread_rwlock_init (lock);
668 else
669 /* Yield the CPU while waiting for another thread to finish
670 initializing this lock. */
671 while (!lock->guard.done)
672 Sleep (0);
673 }
674 EnterCriticalSection (&lock->lock);
675 /* Test whether only readers are currently running, and whether the runcount
676 field will not overflow. */
677 if (!(lock->runcount + 1 > 0))
678 {
679 /* This thread has to wait for a while. Enqueue it among the
680 waiting_readers. */
681 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
682 if (event != INVALID_HANDLE_VALUE)
683 {
684 DWORD result;
685 LeaveCriticalSection (&lock->lock);
686 /* Wait until another thread signals this event. */
687 result = WaitForSingleObject (event, INFINITE);
688 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
689 abort ();
690 CloseHandle (event);
691 /* The thread which signalled the event already did the bookkeeping:
692 removed us from the waiting_readers, incremented lock->runcount. */
693 if (!(lock->runcount > 0))
694 abort ();
695 return;
696 }
697 else
698 {
699 /* Allocation failure. Weird. */
700 do
701 {
702 LeaveCriticalSection (&lock->lock);
703 Sleep (1);
704 EnterCriticalSection (&lock->lock);
705 }
706 while (!(lock->runcount + 1 > 0));
707 }
708 }
709 lock->runcount++;
710 LeaveCriticalSection (&lock->lock);
711 }
712
713 void
714 glthread_rwlock_wrlock (gl_rwlock_t *lock)
715 {
716 if (!lock->guard.done)
717 {
718 if (InterlockedIncrement (&lock->guard.started) == 0)
719 /* This thread is the first one to need this lock. Initialize it. */
720 glthread_rwlock_init (lock);
721 else
722 /* Yield the CPU while waiting for another thread to finish
723 initializing this lock. */
724 while (!lock->guard.done)
725 Sleep (0);
726 }
727 EnterCriticalSection (&lock->lock);
728 /* Test whether no readers or writers are currently running. */
729 if (!(lock->runcount == 0))
730 {
731 /* This thread has to wait for a while. Enqueue it among the
732 waiting_writers. */
733 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
734 if (event != INVALID_HANDLE_VALUE)
735 {
736 DWORD result;
737 LeaveCriticalSection (&lock->lock);
738 /* Wait until another thread signals this event. */
739 result = WaitForSingleObject (event, INFINITE);
740 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
741 abort ();
742 CloseHandle (event);
743 /* The thread which signalled the event already did the bookkeeping:
744 removed us from the waiting_writers, set lock->runcount = -1. */
745 if (!(lock->runcount == -1))
746 abort ();
747 return;
748 }
749 else
750 {
751 /* Allocation failure. Weird. */
752 do
753 {
754 LeaveCriticalSection (&lock->lock);
755 Sleep (1);
756 EnterCriticalSection (&lock->lock);
757 }
758 while (!(lock->runcount == 0));
759 }
760 }
761 lock->runcount--; /* runcount becomes -1 */
762 LeaveCriticalSection (&lock->lock);
763 }
764
765 void
766 glthread_rwlock_unlock (gl_rwlock_t *lock)
767 {
768 if (!lock->guard.done)
769 abort ();
770 EnterCriticalSection (&lock->lock);
771 if (lock->runcount < 0)
772 {
773 /* Drop a writer lock. */
774 if (!(lock->runcount == -1))
775 abort ();
776 lock->runcount = 0;
777 }
778 else
779 {
780 /* Drop a reader lock. */
781 if (!(lock->runcount > 0))
782 abort ();
783 lock->runcount--;
784 }
785 if (lock->runcount == 0)
786 {
787 /* POSIX recommends that "write locks shall take precedence over read
788 locks", to avoid "writer starvation". */
789 if (lock->waiting_writers.count > 0)
790 {
791 /* Wake up one of the waiting writers. */
792 lock->runcount--;
793 gl_waitqueue_notify_first (&lock->waiting_writers);
794 }
795 else
796 {
797 /* Wake up all waiting readers. */
798 lock->runcount += lock->waiting_readers.count;
799 gl_waitqueue_notify_all (&lock->waiting_readers);
800 }
801 }
802 LeaveCriticalSection (&lock->lock);
803 }
804
805 void
806 glthread_rwlock_destroy (gl_rwlock_t *lock)
807 {
808 if (!lock->guard.done)
809 abort ();
810 if (lock->runcount != 0)
811 abort ();
812 DeleteCriticalSection (&lock->lock);
813 if (lock->waiting_readers.array != NULL)
814 free (lock->waiting_readers.array);
815 if (lock->waiting_writers.array != NULL)
816 free (lock->waiting_writers.array);
817 lock->guard.done = 0;
818 }
819
820 /* --------------------- gl_recursive_lock_t datatype --------------------- */
821
822 void
823 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
824 {
825 lock->owner = 0;
826 lock->depth = 0;
827 InitializeCriticalSection (&lock->lock);
828 lock->guard.done = 1;
829 }
830
831 void
832 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
833 {
834 if (!lock->guard.done)
835 {
836 if (InterlockedIncrement (&lock->guard.started) == 0)
837 /* This thread is the first one to need this lock. Initialize it. */
838 glthread_recursive_lock_init (lock);
839 else
840 /* Yield the CPU while waiting for another thread to finish
841 initializing this lock. */
842 while (!lock->guard.done)
843 Sleep (0);
844 }
845 {
846 DWORD self = GetCurrentThreadId ();
847 if (lock->owner != self)
848 {
849 EnterCriticalSection (&lock->lock);
850 lock->owner = self;
851 }
852 if (++(lock->depth) == 0) /* wraparound? */
853 abort ();
854 }
855 }
856
857 void
858 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
859 {
860 if (lock->owner != GetCurrentThreadId ())
861 abort ();
862 if (lock->depth == 0)
863 abort ();
864 if (--(lock->depth) == 0)
865 {
866 lock->owner = 0;
867 LeaveCriticalSection (&lock->lock);
868 }
869 }
870
871 void
872 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
873 {
874 if (lock->owner != 0)
875 abort ();
876 DeleteCriticalSection (&lock->lock);
877 lock->guard.done = 0;
878 }
879
880 /* -------------------------- gl_once_t datatype -------------------------- */
881
882 void
883 glthread_once (gl_once_t *once_control, void (*initfunction) (void))
884 {
885 if (once_control->inited <= 0)
886 {
887 if (InterlockedIncrement (&once_control->started) == 0)
888 {
889 /* This thread is the first one to come to this once_control. */
890 InitializeCriticalSection (&once_control->lock);
891 EnterCriticalSection (&once_control->lock);
892 once_control->inited = 0;
893 initfunction ();
894 once_control->inited = 1;
895 LeaveCriticalSection (&once_control->lock);
896 }
897 else
898 {
899 /* Undo last operation. */
900 InterlockedDecrement (&once_control->started);
901 /* Some other thread has already started the initialization.
902 Yield the CPU while waiting for the other thread to finish
903 initializing and taking the lock. */
904 while (once_control->inited < 0)
905 Sleep (0);
906 if (once_control->inited <= 0)
907 {
908 /* Take the lock. This blocks until the other thread has
909 finished calling the initfunction. */
910 EnterCriticalSection (&once_control->lock);
911 LeaveCriticalSection (&once_control->lock);
912 if (!(once_control->inited > 0))
913 abort ();
914 }
915 }
916 }
917 }
918
919 #endif
920
921 /* ========================================================================= */
922