Threads.c revision 0568f49b
1/************************************************************
2Copyright (c) 1993, Oracle and/or its affiliates. All rights reserved.
3
4Permission is hereby granted, free of charge, to any person obtaining a
5copy of this software and associated documentation files (the "Software"),
6to deal in the Software without restriction, including without limitation
7the rights to use, copy, modify, merge, publish, distribute, sublicense,
8and/or sell copies of the Software, and to permit persons to whom the
9Software is furnished to do so, subject to the following conditions:
10
11The above copyright notice and this permission notice (including the next
12paragraph) shall be included in all copies or substantial portions of the
13Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21DEALINGS IN THE SOFTWARE.
22
23********************************************************/
24
25/*
26
27Copyright 1994, 1998  The Open Group
28
29Permission to use, copy, modify, distribute, and sell this software and its
30documentation for any purpose is hereby granted without fee, provided that
31the above copyright notice appear in all copies and that both that
32copyright notice and this permission notice appear in supporting
33documentation.
34
35The above copyright notice and this permission notice shall be included in
36all copies or substantial portions of the Software.
37
38THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
41OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
42AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
43CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
44
45Except as contained in this notice, the name of The Open Group shall not be
46used in advertising or otherwise to promote the sale, use or other dealings
47in this Software without prior written authorization from The Open Group.
48
49*/
50
51#ifdef HAVE_CONFIG_H
52#include <config.h>
53#endif
54#include "IntrinsicI.h"
55
56#ifdef XTHREADS
57
58#define xmalloc __XtMalloc
59#define xfree XtFree
60#include <X11/Xthreads.h>
61
62#ifndef NDEBUG
63#define NDEBUG
64#endif
65#include <assert.h>
66#include <stdio.h>
67
68typedef struct _ThreadStack {
69    unsigned int size;
70    int sp;
71    struct _Tstack {
72	xthread_t t;
73	xcondition_t c;
74    } *st;
75} ThreadStack;
76
77typedef struct _LockRec {
78    xmutex_t mutex;
79    int level;
80    ThreadStack stack;
81#ifndef _XMUTEX_NESTS
82    xthread_t holder;
83    xcondition_t cond;
84#endif
85} LockRec;
86
87
88#define STACK_INCR 16
89
90static LockPtr process_lock = NULL;
91
92static void
93InitProcessLock(void)
94{
95    if(!process_lock) {
96    	process_lock = XtNew(LockRec);
97    	process_lock->mutex = xmutex_malloc();
98    	xmutex_init(process_lock->mutex);
99    	process_lock->level = 0;
100#ifndef _XMUTEX_NESTS
101    	process_lock->cond = xcondition_malloc();
102    	xcondition_init(process_lock->cond);
103    	xthread_clear_id(process_lock->holder);
104#endif
105    }
106}
107
108static void
109ProcessLock(void)
110{
111#ifdef _XMUTEX_NESTS
112    xmutex_lock(process_lock->mutex);
113    process_lock->level++;
114#else
115    xthread_t this_thread = xthread_self();
116
117    xmutex_lock(process_lock->mutex);
118
119    if (!xthread_have_id(process_lock->holder)) {
120	process_lock->holder = this_thread;
121	xmutex_unlock(process_lock->mutex);
122	return;
123    }
124
125    if (xthread_equal(process_lock->holder,this_thread)) {
126	process_lock->level++;
127	xmutex_unlock(process_lock->mutex);
128	return;
129    }
130
131    while(xthread_have_id(process_lock->holder))
132	xcondition_wait(process_lock->cond, process_lock->mutex);
133
134    process_lock->holder = this_thread;
135    assert(xthread_equal(process_lock->holder, this_thread));
136    xmutex_unlock(process_lock->mutex);
137#endif
138}
139
140static void
141ProcessUnlock(void)
142{
143#ifdef _XMUTEX_NESTS
144    process_lock->level--;
145    xmutex_unlock(process_lock->mutex);
146#else
147    xmutex_lock(process_lock->mutex);
148    assert(xthread_equal(process_lock->holder, xthread_self()));
149    if (process_lock->level != 0) {
150	process_lock->level--;
151	xmutex_unlock(process_lock->mutex);
152	return;
153    }
154
155    xthread_clear_id(process_lock->holder);
156    xcondition_signal(process_lock->cond);
157
158    xmutex_unlock(process_lock->mutex);
159#endif
160}
161
162
163static void
164AppLock(XtAppContext app)
165{
166    LockPtr app_lock = app->lock_info;
167#ifdef _XMUTEX_NESTS
168    xmutex_lock(app_lock->mutex);
169    app_lock->level++;
170#else
171    xthread_t self = xthread_self();
172    xmutex_lock(app_lock->mutex);
173    if (!xthread_have_id(app_lock->holder)) {
174	app_lock->holder = self;
175    	assert(xthread_equal(app_lock->holder, self));
176	xmutex_unlock(app_lock->mutex);
177	return;
178    }
179    if (xthread_equal(app_lock->holder, self)) {
180	app_lock->level++;
181	xmutex_unlock(app_lock->mutex);
182	return;
183    }
184    while(xthread_have_id(app_lock->holder)) {
185	xcondition_wait(app_lock->cond, app_lock->mutex);
186    }
187    app_lock->holder = self;
188    assert(xthread_equal(app_lock->holder, self));
189    xmutex_unlock(app_lock->mutex);
190#endif
191}
192
193static void
194AppUnlock(XtAppContext app)
195{
196    LockPtr app_lock = app->lock_info;
197#ifdef _XMUTEX_NESTS
198    app_lock->level--;
199    xmutex_unlock(app_lock->mutex);
200#else
201    xthread_t self;
202    self = xthread_self();
203    (void)self;
204
205    xmutex_lock(app_lock->mutex);
206    assert(xthread_equal(app_lock->holder, self));
207    if (app_lock->level != 0) {
208	app_lock->level--;
209	xmutex_unlock(app_lock->mutex);
210	return;
211    }
212    xthread_clear_id(app_lock->holder);
213    xcondition_signal(app_lock->cond);
214    xmutex_unlock(app_lock->mutex);
215#endif
216}
217
218static void
219YieldAppLock(
220    XtAppContext app,
221    Boolean* push_thread,
222    Boolean* pushed_thread,
223    int* level)
224{
225    LockPtr app_lock = app->lock_info;
226    xthread_t self = xthread_self();
227#ifndef _XMUTEX_NESTS
228    xmutex_lock(app_lock->mutex);
229    assert(xthread_equal(app_lock->holder, self));
230#endif
231    *level = app_lock->level;
232    if (*push_thread) {
233	*push_thread = FALSE;
234	*pushed_thread = TRUE;
235
236	if(app_lock->stack.sp == (int)app_lock->stack.size - 1) {
237	    unsigned ii;
238	    app_lock->stack.st = (struct _Tstack *)
239		XtRealloc ((char *)app_lock->stack.st,
240		(Cardinal)((app_lock->stack.size + STACK_INCR) * sizeof (struct _Tstack)));
241	    ii = app_lock->stack.size;
242	    app_lock->stack.size += STACK_INCR;
243	    for ( ; ii < app_lock->stack.size; ii++) {
244		app_lock->stack.st[ii].c = xcondition_malloc();
245		xcondition_init(app_lock->stack.st[ii].c);
246	    }
247	}
248	app_lock->stack.st[++(app_lock->stack.sp)].t = self;
249    }
250#ifdef _XMUTEX_NESTS
251    while (app_lock->level > 0) {
252	app_lock->level--;
253	xmutex_unlock(app_lock->mutex);
254    }
255#else
256    xcondition_signal(app_lock->cond);
257    app_lock->level = 0;
258    xthread_clear_id(app_lock->holder);
259    xmutex_unlock(app_lock->mutex);
260#endif
261}
262
263static void
264RestoreAppLock(
265    XtAppContext app,
266    int level,
267    Boolean* pushed_thread)
268{
269    LockPtr app_lock = app->lock_info;
270    xthread_t self = xthread_self();
271    xmutex_lock(app_lock->mutex);
272#ifdef _XMUTEX_NESTS
273    app_lock->level++;
274#else
275    while(xthread_have_id(app_lock->holder)) {
276	xcondition_wait(app_lock->cond, app_lock->mutex);
277    }
278#endif
279    if (!xthread_equal(app_lock->stack.st[app_lock->stack.sp].t, self)) {
280	int ii;
281	for (ii = app_lock->stack.sp - 1; ii >= 0; ii--) {
282	    if (xthread_equal(app_lock->stack.st[ii].t, self)) {
283		xcondition_wait(app_lock->stack.st[ii].c, app_lock->mutex);
284		break;
285	    }
286	}
287#ifndef _XMUTEX_NESTS
288	while(xthread_have_id(app_lock->holder)) {
289	    xcondition_wait(app_lock->cond, app_lock->mutex);
290	}
291#endif
292    }
293#ifdef _XMUTEX_NESTS
294    while (app_lock->level < level) {
295	xmutex_lock(app_lock->mutex);
296	app_lock->level++;
297    }
298#else
299    app_lock->holder = self;
300    app_lock->level = level;
301    assert(xthread_equal(app_lock->holder, self));
302#endif
303    if (*pushed_thread) {
304	*pushed_thread = FALSE;
305	(app_lock->stack.sp)--;
306	if (app_lock->stack.sp >= 0) {
307	    xcondition_signal (app_lock->stack.st[app_lock->stack.sp].c);
308	}
309    }
310#ifndef _XMUTEX_NESTS
311    xmutex_unlock(app_lock->mutex);
312#endif
313}
314
315static void
316FreeAppLock(XtAppContext app)
317{
318    unsigned ii;
319    LockPtr app_lock = app->lock_info;
320
321    if(app_lock) {
322	xmutex_clear(app_lock->mutex);
323	xmutex_free(app_lock->mutex);
324#ifndef _XMUTEX_NESTS
325	xcondition_clear(app_lock->cond);
326	xcondition_free(app_lock->cond);
327#endif
328	if(app_lock->stack.st != (struct _Tstack *)NULL) {
329	    for (ii = 0; ii < app_lock->stack.size; ii++) {
330		xcondition_clear(app_lock->stack.st[ii].c);
331		xcondition_free(app_lock->stack.st[ii].c);
332	    }
333	    XtFree((char *)app_lock->stack.st);
334	}
335	XtFree((char *)app_lock);
336	app->lock_info = NULL;
337    }
338}
339
340static void
341InitAppLock(XtAppContext app)
342{
343    int ii;
344    LockPtr app_lock;
345
346    app->lock = AppLock;
347    app->unlock = AppUnlock;
348    app->yield_lock = YieldAppLock;
349    app->restore_lock = RestoreAppLock;
350    app->free_lock = FreeAppLock;
351
352    app_lock = app->lock_info = XtNew(LockRec);
353    app_lock->mutex = xmutex_malloc();
354    xmutex_init(app_lock->mutex);
355    app_lock->level = 0;
356#ifndef _XMUTEX_NESTS
357    app_lock->cond = xcondition_malloc();
358    xcondition_init(app_lock->cond);
359    xthread_clear_id(app_lock->holder);
360#endif
361    app_lock->stack.size = STACK_INCR;
362    app_lock->stack.sp = -1;
363    app_lock->stack.st =
364	(struct _Tstack *)__XtMalloc(sizeof(struct _Tstack)*STACK_INCR);
365    for (ii = 0; ii < STACK_INCR; ii++) {
366	app_lock->stack.st[ii].c = xcondition_malloc();
367	xcondition_init(app_lock->stack.st[ii].c);
368    }
369}
370
371#endif /* defined(XTHREADS) */
372
373void XtAppLock(XtAppContext app)
374{
375#ifdef XTHREADS
376    if(app->lock)
377	(*app->lock)(app);
378#endif
379}
380
381void XtAppUnlock(XtAppContext app)
382{
383#ifdef XTHREADS
384    if(app->unlock)
385	(*app->unlock)(app);
386#endif
387}
388
389void XtProcessLock(void)
390{
391#ifdef XTHREADS
392    if(_XtProcessLock)
393	(*_XtProcessLock)();
394#endif
395}
396
397void XtProcessUnlock(void)
398{
399#ifdef XTHREADS
400    if(_XtProcessUnlock)
401	(*_XtProcessUnlock)();
402#endif
403}
404
405Boolean XtToolkitThreadInitialize(void)
406{
407#ifdef XTHREADS
408    if (_XtProcessLock == NULL) {
409#ifdef xthread_init
410	xthread_init();
411#endif
412	InitProcessLock();
413	_XtProcessLock = ProcessLock;
414	_XtProcessUnlock = ProcessUnlock;
415	_XtInitAppLock = InitAppLock;
416    }
417    return True;
418#else
419    return False;
420#endif
421}
422
423