pthread_cond.c revision 1.78 1 /* $NetBSD: pthread_cond.c,v 1.78 2025/03/31 14:07:10 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_cond.c,v 1.78 2025/03/31 14:07:10 riastradh Exp $");
34
35 /* Need to use libc-private names for atomic operations. */
36 #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
37
38 #include <stdatomic.h>
39 #include <stdlib.h>
40 #include <errno.h>
41 #include <sys/time.h>
42 #include <sys/types.h>
43
44 #include "pthread.h"
45 #include "pthread_int.h"
46 #include "reentrant.h"
47
48 #define atomic_load_relaxed(p) \
49 atomic_load_explicit(p, memory_order_relaxed)
50
51 int _sys___nanosleep50(const struct timespec *, struct timespec *);
52
53 int _pthread_cond_has_waiters_np(pthread_cond_t *);
54
55 __weak_alias(pthread_cond_has_waiters_np,_pthread_cond_has_waiters_np)
56
57 __strong_alias(__libc_cond_init,pthread_cond_init)
58 __strong_alias(__libc_cond_signal,pthread_cond_signal)
59 __strong_alias(__libc_cond_broadcast,pthread_cond_broadcast)
60 __strong_alias(__libc_cond_wait,pthread_cond_wait)
61 __strong_alias(__libc_cond_timedwait,pthread_cond_timedwait)
62 __strong_alias(__libc_cond_destroy,pthread_cond_destroy)
63
64 /*
65 * A dummy waiter that's used to flag that pthread_cond_signal() is in
66 * progress and nobody else should try to modify the waiter list until
67 * it completes.
68 */
69 static struct pthread__waiter pthread__cond_dummy;
70
71 static clockid_t
72 pthread_cond_getclock(const pthread_cond_t *cond)
73 {
74
75 pthread__error(EINVAL, "Invalid condition variable",
76 cond->ptc_magic == _PT_COND_MAGIC);
77
78 return cond->ptc_private ?
79 *(clockid_t *)cond->ptc_private : CLOCK_REALTIME;
80 }
81
82 int
83 pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
84 {
85 if (__predict_false(__uselibcstub))
86 return __libc_cond_init_stub(cond, attr);
87
88 pthread__error(EINVAL, "Invalid condition variable attribute",
89 (attr == NULL) || (attr->ptca_magic == _PT_CONDATTR_MAGIC));
90
91 cond->ptc_magic = _PT_COND_MAGIC;
92 cond->ptc_waiters = NULL;
93 cond->ptc_mutex = NULL;
94 if (attr && attr->ptca_private) {
95 cond->ptc_private = malloc(sizeof(clockid_t));
96 if (cond->ptc_private == NULL)
97 return errno;
98 *(clockid_t *)cond->ptc_private =
99 *(clockid_t *)attr->ptca_private;
100 } else
101 cond->ptc_private = NULL;
102
103 return 0;
104 }
105
106
107 int
108 pthread_cond_destroy(pthread_cond_t *cond)
109 {
110 if (__predict_false(__uselibcstub))
111 return __libc_cond_destroy_stub(cond);
112
113 pthread__error(EINVAL, "Invalid condition variable",
114 cond->ptc_magic == _PT_COND_MAGIC);
115 pthread__error(EBUSY, "Destroying condition variable in use",
116 cond->ptc_waiters == NULL);
117
118 cond->ptc_magic = _PT_COND_DEAD;
119 free(cond->ptc_private);
120
121 return 0;
122 }
123
124 int
125 pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
126 const struct timespec *abstime)
127 {
128 struct pthread__waiter waiter, *next, *head;
129 pthread_t self;
130 int error, cancel;
131 clockid_t clkid = pthread_cond_getclock(cond);
132
133 if (__predict_false(__uselibcstub))
134 return __libc_cond_timedwait_stub(cond, mutex, abstime);
135
136 pthread__error(EINVAL, "Invalid condition variable",
137 cond->ptc_magic == _PT_COND_MAGIC);
138 pthread__error(EINVAL, "Invalid mutex",
139 mutex->ptm_magic == _PT_MUTEX_MAGIC);
140 pthread__error(EPERM, "Mutex not locked in condition wait",
141 mutex->ptm_owner != NULL);
142
143 self = pthread__self();
144 pthread__assert(self->pt_lid != 0);
145
146 if (__predict_false(atomic_load_relaxed(&self->pt_cancel) &
147 PT_CANCEL_CANCELLED)) {
148 membar_acquire();
149 pthread__cancelled();
150 }
151
152 /* Note this thread as waiting on the CV. */
153 cond->ptc_mutex = mutex;
154 for (head = cond->ptc_waiters;; head = next) {
155 /* Wait while pthread_cond_signal() in progress. */
156 if (__predict_false(head == &pthread__cond_dummy)) {
157 sched_yield();
158 next = cond->ptc_waiters;
159 continue;
160 }
161 waiter.lid = self->pt_lid;
162 waiter.next = head;
163 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
164 membar_producer();
165 #endif
166 next = atomic_cas_ptr(&cond->ptc_waiters, head, &waiter);
167 if (__predict_true(next == head)) {
168 break;
169 }
170 }
171
172 /* Drop the interlock and wait. */
173 error = 0;
174 pthread_mutex_unlock(mutex);
175 while (waiter.lid &&
176 !(cancel = atomic_load_relaxed(&self->pt_cancel) &
177 PT_CANCEL_CANCELLED)) {
178 int rv = _lwp_park(clkid, TIMER_ABSTIME, __UNCONST(abstime),
179 0, NULL, NULL);
180 if (rv == 0) {
181 continue;
182 }
183 if (errno != EINTR && errno != EALREADY) {
184 error = errno;
185 break;
186 }
187 }
188 pthread_mutex_lock(mutex);
189
190 /*
191 * If this thread absorbed a wakeup from pthread_cond_signal() and
192 * cannot take the wakeup, we should ensure that another thread does.
193 *
194 * And if awoken early, we may still be on the waiter list and must
195 * remove self.
196 */
197 if (__predict_false(cancel | error)) {
198 pthread_cond_broadcast(cond);
199
200 /*
201 * Might have raced with another thread to do the wakeup.
202 * Wait until released, otherwise "waiter" is still globally
203 * visible.
204 */
205 pthread_mutex_unlock(mutex);
206 while (__predict_false(waiter.lid)) {
207 (void)_lwp_park(CLOCK_MONOTONIC, 0, NULL, 0, NULL,
208 NULL);
209 }
210 pthread_mutex_lock(mutex);
211 } else {
212 pthread__assert(!waiter.lid);
213 }
214
215 /*
216 * If cancelled then exit. POSIX dictates that the mutex must be
217 * held if this happens.
218 */
219 if (cancel) {
220 membar_acquire();
221 pthread__cancelled();
222 }
223
224 return error;
225 }
226
227 int
228 pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
229 {
230 if (__predict_false(__uselibcstub))
231 return __libc_cond_wait_stub(cond, mutex);
232
233 return pthread_cond_timedwait(cond, mutex, NULL);
234 }
235
236 int
237 pthread_cond_signal(pthread_cond_t *cond)
238 {
239 struct pthread__waiter *head, *next;
240 pthread_mutex_t *mutex;
241 pthread_t self;
242
243 if (__predict_false(__uselibcstub))
244 return __libc_cond_signal_stub(cond);
245
246 pthread__error(EINVAL, "Invalid condition variable",
247 cond->ptc_magic == _PT_COND_MAGIC);
248
249 /* Take ownership of one waiter. */
250 self = pthread_self();
251 mutex = cond->ptc_mutex;
252 for (head = cond->ptc_waiters;; head = next) {
253 /* Wait while pthread_cond_signal() in progress. */
254 if (__predict_false(head == &pthread__cond_dummy)) {
255 sched_yield();
256 next = cond->ptc_waiters;
257 continue;
258 }
259 if (head == NULL) {
260 return 0;
261 }
262 /* Block concurrent access to the waiter list. */
263 next = atomic_cas_ptr(&cond->ptc_waiters, head,
264 &pthread__cond_dummy);
265 if (__predict_true(next == head)) {
266 break;
267 }
268 }
269
270 /* Now that list is locked, read pointer to next and then unlock. */
271 membar_enter();
272 cond->ptc_waiters = head->next;
273 membar_producer();
274 head->next = NULL;
275
276 /* Now transfer waiter to the mutex. */
277 pthread__mutex_deferwake(self, mutex, head);
278 return 0;
279 }
280
281 int
282 pthread_cond_broadcast(pthread_cond_t *cond)
283 {
284 struct pthread__waiter *head, *next;
285 pthread_mutex_t *mutex;
286 pthread_t self;
287
288 if (__predict_false(__uselibcstub))
289 return __libc_cond_broadcast_stub(cond);
290
291 pthread__error(EINVAL, "Invalid condition variable",
292 cond->ptc_magic == _PT_COND_MAGIC);
293
294 if (cond->ptc_waiters == NULL)
295 return 0;
296
297 /* Take ownership of current set of waiters. */
298 self = pthread_self();
299 mutex = cond->ptc_mutex;
300 for (head = cond->ptc_waiters;; head = next) {
301 /* Wait while pthread_cond_signal() in progress. */
302 if (__predict_false(head == &pthread__cond_dummy)) {
303 sched_yield();
304 next = cond->ptc_waiters;
305 continue;
306 }
307 if (head == NULL) {
308 return 0;
309 }
310 next = atomic_cas_ptr(&cond->ptc_waiters, head, NULL);
311 if (__predict_true(next == head)) {
312 break;
313 }
314 }
315 membar_enter();
316
317 /* Now transfer waiters to the mutex. */
318 pthread__mutex_deferwake(self, mutex, head);
319 return 0;
320 }
321
322 int
323 _pthread_cond_has_waiters_np(pthread_cond_t *cond)
324 {
325
326 return cond->ptc_waiters != NULL;
327 }
328
329 int
330 pthread_condattr_init(pthread_condattr_t *attr)
331 {
332
333 attr->ptca_magic = _PT_CONDATTR_MAGIC;
334 attr->ptca_private = NULL;
335
336 return 0;
337 }
338
339 int
340 pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clck)
341 {
342
343 pthread__error(EINVAL, "Invalid condition variable attribute",
344 attr->ptca_magic == _PT_CONDATTR_MAGIC);
345
346 switch (clck) {
347 case CLOCK_MONOTONIC:
348 case CLOCK_REALTIME:
349 if (attr->ptca_private == NULL)
350 attr->ptca_private = malloc(sizeof(clockid_t));
351 if (attr->ptca_private == NULL)
352 return errno;
353 *(clockid_t *)attr->ptca_private = clck;
354 return 0;
355 default:
356 return EINVAL;
357 }
358 }
359
360 int
361 pthread_condattr_getclock(const pthread_condattr_t *__restrict attr,
362 clockid_t *__restrict clock_id)
363 {
364
365 pthread__error(EINVAL, "Invalid condition variable attribute",
366 attr->ptca_magic == _PT_CONDATTR_MAGIC);
367
368 if (attr == NULL || attr->ptca_private == NULL)
369 return EINVAL;
370 *clock_id = *(clockid_t *)attr->ptca_private;
371 return 0;
372 }
373
374 int
375 pthread_condattr_destroy(pthread_condattr_t *attr)
376 {
377
378 pthread__error(EINVAL, "Invalid condition variable attribute",
379 attr->ptca_magic == _PT_CONDATTR_MAGIC);
380
381 attr->ptca_magic = _PT_CONDATTR_DEAD;
382 free(attr->ptca_private);
383
384 return 0;
385 }
386
387 #ifdef _PTHREAD_PSHARED
388 int
389 pthread_condattr_getpshared(const pthread_condattr_t * __restrict attr,
390 int * __restrict pshared)
391 {
392
393 pthread__error(EINVAL, "Invalid condition variable attribute",
394 attr->ptca_magic == _PT_CONDATTR_MAGIC);
395
396 *pshared = PTHREAD_PROCESS_PRIVATE;
397 return 0;
398 }
399
400 int
401 pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
402 {
403
404 pthread__error(EINVAL, "Invalid condition variable attribute",
405 attr->ptca_magic == _PT_CONDATTR_MAGIC);
406
407 switch(pshared) {
408 case PTHREAD_PROCESS_PRIVATE:
409 return 0;
410 case PTHREAD_PROCESS_SHARED:
411 return ENOSYS;
412 }
413 return EINVAL;
414 }
415 #endif
416