Threads.c revision 249c3046
1/************************************************************
2Copyright (c) 1993, Oracle and/or its affiliates. All rights reserved.
3
4Permission is hereby granted, free of charge, to any person obtaining a
5copy of this software and associated documentation files (the "Software"),
6to deal in the Software without restriction, including without limitation
7the rights to use, copy, modify, merge, publish, distribute, sublicense,
8and/or sell copies of the Software, and to permit persons to whom the
9Software is furnished to do so, subject to the following conditions:
10
11The above copyright notice and this permission notice (including the next
12paragraph) shall be included in all copies or substantial portions of the
13Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21DEALINGS IN THE SOFTWARE.
22
23********************************************************/
24
25/*
26
27Copyright 1994, 1998  The Open Group
28
29Permission to use, copy, modify, distribute, and sell this software and its
30documentation for any purpose is hereby granted without fee, provided that
31the above copyright notice appear in all copies and that both that
32copyright notice and this permission notice appear in supporting
33documentation.
34
35The above copyright notice and this permission notice shall be included in
36all copies or substantial portions of the Software.
37
38THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
41OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
42AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
43CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
44
45Except as contained in this notice, the name of The Open Group shall not be
46used in advertising or otherwise to promote the sale, use or other dealings
47in this Software without prior written authorization from The Open Group.
48
49*/
50
51#ifdef HAVE_CONFIG_H
52#include <config.h>
53#endif
54#include "IntrinsicI.h"
55
56#ifdef XTHREADS
57
58#define xmalloc __XtMalloc
59#define xfree XtFree
60#include <X11/Xthreads.h>
61
62#ifndef NDEBUG
63#define NDEBUG
64#endif
65#include <assert.h>
66#include <stdio.h>
67
68typedef struct _ThreadStack {
69    unsigned int size;
70    int sp;
71    struct _Tstack {
72	xthread_t t;
73	xcondition_t c;
74    } *st;
75} ThreadStack;
76
77typedef struct _LockRec {
78    xmutex_t mutex;
79    int level;
80    ThreadStack stack;
81#ifndef _XMUTEX_NESTS
82    xthread_t holder;
83    xcondition_t cond;
84#endif
85} LockRec;
86
87
88#define STACK_INCR 16
89
90static LockPtr process_lock = NULL;
91
92static void
93InitProcessLock(void)
94{
95    if(!process_lock) {
96    	process_lock = XtNew(LockRec);
97    	process_lock->mutex = xmutex_malloc();
98    	xmutex_init(process_lock->mutex);
99    	process_lock->level = 0;
100#ifndef _XMUTEX_NESTS
101    	process_lock->cond = xcondition_malloc();
102    	xcondition_init(process_lock->cond);
103    	xthread_clear_id(process_lock->holder);
104#endif
105    }
106}
107
108static void
109ProcessLock(void)
110{
111#ifdef _XMUTEX_NESTS
112    xmutex_lock(process_lock->mutex);
113    process_lock->level++;
114#else
115    xthread_t this_thread = xthread_self();
116
117    xmutex_lock(process_lock->mutex);
118
119    if (!xthread_have_id(process_lock->holder)) {
120	process_lock->holder = this_thread;
121	xmutex_unlock(process_lock->mutex);
122	return;
123    }
124
125    if (xthread_equal(process_lock->holder,this_thread)) {
126	process_lock->level++;
127	xmutex_unlock(process_lock->mutex);
128	return;
129    }
130
131    while(xthread_have_id(process_lock->holder))
132	xcondition_wait(process_lock->cond, process_lock->mutex);
133
134    process_lock->holder = this_thread;
135    assert(xthread_equal(process_lock->holder, this_thread));
136    xmutex_unlock(process_lock->mutex);
137#endif
138}
139
140static void
141ProcessUnlock(void)
142{
143#ifdef _XMUTEX_NESTS
144    process_lock->level--;
145    xmutex_unlock(process_lock->mutex);
146#else
147    xmutex_lock(process_lock->mutex);
148    assert(xthread_equal(process_lock->holder, xthread_self()));
149    if (process_lock->level != 0) {
150	process_lock->level--;
151	xmutex_unlock(process_lock->mutex);
152	return;
153    }
154
155    xthread_clear_id(process_lock->holder);
156    xcondition_signal(process_lock->cond);
157
158    xmutex_unlock(process_lock->mutex);
159#endif
160}
161
162
163static void
164AppLock(XtAppContext app)
165{
166    LockPtr app_lock = app->lock_info;
167#ifdef _XMUTEX_NESTS
168    xmutex_lock(app_lock->mutex);
169    app_lock->level++;
170#else
171    xthread_t self = xthread_self();
172    xmutex_lock(app_lock->mutex);
173    if (!xthread_have_id(app_lock->holder)) {
174	app_lock->holder = self;
175    	assert(xthread_equal(app_lock->holder, self));
176	xmutex_unlock(app_lock->mutex);
177	return;
178    }
179    if (xthread_equal(app_lock->holder, self)) {
180	app_lock->level++;
181	xmutex_unlock(app_lock->mutex);
182	return;
183    }
184    while(xthread_have_id(app_lock->holder)) {
185	xcondition_wait(app_lock->cond, app_lock->mutex);
186    }
187    app_lock->holder = self;
188    assert(xthread_equal(app_lock->holder, self));
189    xmutex_unlock(app_lock->mutex);
190#endif
191}
192
193static void
194AppUnlock(XtAppContext app)
195{
196    LockPtr app_lock = app->lock_info;
197#ifdef _XMUTEX_NESTS
198    app_lock->level--;
199    xmutex_unlock(app_lock->mutex);
200#else
201    xthread_t self;
202
203    self = xthread_self();
204    xmutex_lock(app_lock->mutex);
205    assert(xthread_equal(app_lock->holder, self));
206    if (app_lock->level != 0) {
207	app_lock->level--;
208	xmutex_unlock(app_lock->mutex);
209	return;
210    }
211    xthread_clear_id(app_lock->holder);
212    xcondition_signal(app_lock->cond);
213    xmutex_unlock(app_lock->mutex);
214#endif
215}
216
217static void
218YieldAppLock(
219    XtAppContext app,
220    Boolean* push_thread,
221    Boolean* pushed_thread,
222    int* level)
223{
224    LockPtr app_lock = app->lock_info;
225    xthread_t self = xthread_self();
226#ifndef _XMUTEX_NESTS
227    xmutex_lock(app_lock->mutex);
228    assert(xthread_equal(app_lock->holder, self));
229#endif
230    *level = app_lock->level;
231    if (*push_thread) {
232	*push_thread = FALSE;
233	*pushed_thread = TRUE;
234
235	if(app_lock->stack.sp == (int)app_lock->stack.size - 1) {
236	    unsigned ii;
237	    app_lock->stack.st = (struct _Tstack *)
238		XtRealloc ((char *)app_lock->stack.st,
239		(app_lock->stack.size + STACK_INCR) * sizeof (struct _Tstack));
240	    ii = app_lock->stack.size;
241	    app_lock->stack.size += STACK_INCR;
242	    for ( ; ii < app_lock->stack.size; ii++) {
243		app_lock->stack.st[ii].c = xcondition_malloc();
244		xcondition_init(app_lock->stack.st[ii].c);
245	    }
246	}
247	app_lock->stack.st[++(app_lock->stack.sp)].t = self;
248    }
249#ifdef _XMUTEX_NESTS
250    while (app_lock->level > 0) {
251	app_lock->level--;
252	xmutex_unlock(app_lock->mutex);
253    }
254#else
255    xcondition_signal(app_lock->cond);
256    app_lock->level = 0;
257    xthread_clear_id(app_lock->holder);
258    xmutex_unlock(app_lock->mutex);
259#endif
260}
261
262static void
263RestoreAppLock(
264    XtAppContext app,
265    int level,
266    Boolean* pushed_thread)
267{
268    LockPtr app_lock = app->lock_info;
269    xthread_t self = xthread_self();
270    xmutex_lock(app_lock->mutex);
271#ifdef _XMUTEX_NESTS
272    app_lock->level++;
273#else
274    while(xthread_have_id(app_lock->holder)) {
275	xcondition_wait(app_lock->cond, app_lock->mutex);
276    }
277#endif
278    if (!xthread_equal(app_lock->stack.st[app_lock->stack.sp].t, self)) {
279	int ii;
280	for (ii = app_lock->stack.sp - 1; ii >= 0; ii--) {
281	    if (xthread_equal(app_lock->stack.st[ii].t, self)) {
282		xcondition_wait(app_lock->stack.st[ii].c, app_lock->mutex);
283		break;
284	    }
285	}
286#ifndef _XMUTEX_NESTS
287	while(xthread_have_id(app_lock->holder)) {
288	    xcondition_wait(app_lock->cond, app_lock->mutex);
289	}
290#endif
291    }
292#ifdef _XMUTEX_NESTS
293    while (app_lock->level < level) {
294	xmutex_lock(app_lock->mutex);
295	app_lock->level++;
296    }
297#else
298    app_lock->holder = self;
299    app_lock->level = level;
300    assert(xthread_equal(app_lock->holder, self));
301#endif
302    if (*pushed_thread) {
303	*pushed_thread = FALSE;
304	(app_lock->stack.sp)--;
305	if (app_lock->stack.sp >= 0) {
306	    xcondition_signal (app_lock->stack.st[app_lock->stack.sp].c);
307	}
308    }
309#ifndef _XMUTEX_NESTS
310    xmutex_unlock(app_lock->mutex);
311#endif
312}
313
314static void
315FreeAppLock(XtAppContext app)
316{
317    unsigned ii;
318    LockPtr app_lock = app->lock_info;
319
320    if(app_lock) {
321	xmutex_clear(app_lock->mutex);
322	xmutex_free(app_lock->mutex);
323#ifndef _XMUTEX_NESTS
324	xcondition_clear(app_lock->cond);
325	xcondition_free(app_lock->cond);
326#endif
327	if(app_lock->stack.st != (struct _Tstack *)NULL) {
328	    for (ii = 0; ii < app_lock->stack.size; ii++) {
329		xcondition_clear(app_lock->stack.st[ii].c);
330		xcondition_free(app_lock->stack.st[ii].c);
331	    }
332	    XtFree((char *)app_lock->stack.st);
333	}
334	XtFree((char *)app_lock);
335	app->lock_info = NULL;
336    }
337}
338
339static void
340InitAppLock(XtAppContext app)
341{
342    int ii;
343    LockPtr app_lock;
344
345    app->lock = AppLock;
346    app->unlock = AppUnlock;
347    app->yield_lock = YieldAppLock;
348    app->restore_lock = RestoreAppLock;
349    app->free_lock = FreeAppLock;
350
351    app_lock = app->lock_info = XtNew(LockRec);
352    app_lock->mutex = xmutex_malloc();
353    xmutex_init(app_lock->mutex);
354    app_lock->level = 0;
355#ifndef _XMUTEX_NESTS
356    app_lock->cond = xcondition_malloc();
357    xcondition_init(app_lock->cond);
358    xthread_clear_id(app_lock->holder);
359#endif
360    app_lock->stack.size = STACK_INCR;
361    app_lock->stack.sp = -1;
362    app_lock->stack.st =
363	(struct _Tstack *)__XtMalloc(sizeof(struct _Tstack)*STACK_INCR);
364    for (ii = 0; ii < STACK_INCR; ii++) {
365	app_lock->stack.st[ii].c = xcondition_malloc();
366	xcondition_init(app_lock->stack.st[ii].c);
367    }
368}
369
370#endif /* defined(XTHREADS) */
371
372void XtAppLock(XtAppContext app)
373{
374#ifdef XTHREADS
375    if(app->lock)
376	(*app->lock)(app);
377#endif
378}
379
380void XtAppUnlock(XtAppContext app)
381{
382#ifdef XTHREADS
383    if(app->unlock)
384	(*app->unlock)(app);
385#endif
386}
387
388void XtProcessLock(void)
389{
390#ifdef XTHREADS
391    if(_XtProcessLock)
392	(*_XtProcessLock)();
393#endif
394}
395
396void XtProcessUnlock(void)
397{
398#ifdef XTHREADS
399    if(_XtProcessUnlock)
400	(*_XtProcessUnlock)();
401#endif
402}
403
404Boolean XtToolkitThreadInitialize(void)
405{
406#ifdef XTHREADS
407    if (_XtProcessLock == NULL) {
408#ifdef xthread_init
409	xthread_init();
410#endif
411	InitProcessLock();
412	_XtProcessLock = ProcessLock;
413	_XtProcessUnlock = ProcessUnlock;
414	_XtInitAppLock = InitAppLock;
415    }
416    return True;
417#else
418    return False;
419#endif
420}
421
422