sys_futex.c revision 1.21 1 /* $NetBSD: sys_futex.c,v 1.21 2025/01/18 07:26:06 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.21 2025/01/18 07:26:06 riastradh Exp $");
34
35 /*
36 * Futexes
37 *
38 * The futex system call coordinates notifying threads waiting for
39 * changes on a 32-bit word of memory. The word can be managed by
40 * CPU atomic operations in userland, without system calls, as long
41 * as there is no contention.
42 *
43 * The simplest use case demonstrating the utility is:
44 *
45 * // 32-bit word of memory shared among threads or
46 * // processes in userland. lock & 1 means owned;
47 * // lock & 2 means there are waiters waiting.
48 * volatile int lock = 0;
49 *
50 * int v;
51 *
52 * // Acquire a lock.
53 * do {
54 * v = lock;
55 * if (v & 1) {
56 * // Lock is held. Set a bit to say that
57 * // there are waiters, and wait for lock
58 * // to change to anything other than v;
59 * // then retry.
60 * if (atomic_cas_uint(&lock, v, v | 2) != v)
61 * continue;
62 * futex(FUTEX_WAIT, &lock, v | 2, NULL, NULL, 0);
63 * continue;
64 * }
65 * } while (atomic_cas_uint(&lock, v, v | 1) != v);
66 * membar_acquire();
67 *
68 * ...
69 *
70 * // Release the lock. Optimistically assume there are
71 * // no waiters first until demonstrated otherwise.
72 * membar_release();
73 * if (atomic_cas_uint(&lock, 1, 0) != 1) {
74 * // There may be waiters.
75 * v = atomic_swap_uint(&lock, 0);
76 * // If there are still waiters, wake one.
77 * if (v & 2)
78 * futex(FUTEX_WAKE, &lock, 1, NULL, NULL, 0);
79 * }
80 *
81 * The goal is to avoid the futex system call unless there is
82 * contention; then if there is contention, to guarantee no missed
83 * wakeups.
84 *
85 * For a simple implementation, futex(FUTEX_WAIT) could queue
86 * itself to be woken, double-check the lock word, and then sleep;
87 * spurious wakeups are generally a fact of life, so any
88 * FUTEX_WAKE could just wake every FUTEX_WAIT in the system.
89 *
90 * If this were all there is to it, we could then increase
91 * parallelism by refining the approximation: partition the
92 * waiters into buckets by hashing the lock addresses to reduce
93 * the incidence of spurious wakeups. But this is not all.
94 *
95 * The futex(FUTEX_CMP_REQUEUE, &lock, n, &lock2, m, val)
96 * operation not only wakes n waiters on lock if lock == val, but
97 * also _transfers_ m additional waiters to lock2. Unless wakeups
98 * on lock2 also trigger wakeups on lock, we cannot move waiters
99 * to lock2 if they merely share the same hash as waiters on lock.
100 * Thus, we can't approximately distribute waiters into queues by
101 * a hash function; we must distinguish futex queues exactly by
102 * lock address.
103 *
104 * For now, we use a global red/black tree to index futexes. This
105 * should be replaced by a lockless radix tree with a thread to
106 * free entries no longer in use once all lookups on all CPUs have
107 * completed.
108 *
109 * Specifically, we maintain two maps:
110 *
111 * futex_tab.va[vmspace, va] for private futexes
112 * futex_tab.oa[uvm_voaddr] for shared futexes
113 *
114 * This implementation does not support priority inheritance.
115 */
116
117 #include <sys/param.h>
118 #include <sys/types.h>
119 #include <sys/atomic.h>
120 #include <sys/condvar.h>
121 #include <sys/futex.h>
122 #include <sys/mutex.h>
123 #include <sys/rbtree.h>
124 #include <sys/queue.h>
125
126 #include <sys/syscall.h>
127 #include <sys/syscallargs.h>
128 #include <sys/syscallvar.h>
129
130 #include <uvm/uvm_extern.h>
131
132 /*
133 * Lock order:
134 *
135 * futex_tab.lock
136 * futex::fx_qlock ordered by kva of struct futex
137 * -> futex_wait::fw_lock only one at a time
138 * futex_wait::fw_lock only one at a time
139 * -> futex::fx_abortlock only one at a time
140 */
141
142 /*
143 * union futex_key
144 *
145 * A futex is addressed either by a vmspace+va (private) or by
146 * a uvm_voaddr (shared).
147 */
148 union futex_key {
149 struct {
150 struct vmspace *vmspace;
151 vaddr_t va;
152 } fk_private;
153 struct uvm_voaddr fk_shared;
154 };
155
156 /*
157 * struct futex
158 *
159 * Kernel state for a futex located at a particular address in a
160 * particular virtual address space.
161 *
162 * N.B. fx_refcnt is an unsigned long because we need to be able
163 * to operate on it atomically on all systems while at the same
164 * time rendering practically impossible the chance of it reaching
165 * its max value. In practice, we're limited by the number of LWPs
166 * that can be present on the system at any given time, and the
167 * assumption is that limit will be good enough on a 32-bit platform.
168 * See futex_wake() for why overflow needs to be avoided.
169 */
170 struct futex {
171 union futex_key fx_key;
172 unsigned long fx_refcnt;
173 bool fx_shared;
174 bool fx_on_tree;
175 struct rb_node fx_node;
176
177 kmutex_t fx_qlock;
178 TAILQ_HEAD(, futex_wait) fx_queue;
179
180 kmutex_t fx_abortlock;
181 LIST_HEAD(, futex_wait) fx_abortlist;
182 kcondvar_t fx_abortcv;
183 };
184
185 /*
186 * struct futex_wait
187 *
188 * State for a thread to wait on a futex. Threads wait on fw_cv
189 * for fw_bitset to be set to zero. The thread may transition to
190 * a different futex queue at any time under the futex's lock.
191 */
192 struct futex_wait {
193 kmutex_t fw_lock;
194 kcondvar_t fw_cv;
195 struct futex *fw_futex;
196 TAILQ_ENTRY(futex_wait) fw_entry; /* queue lock */
197 LIST_ENTRY(futex_wait) fw_abort; /* queue abortlock */
198 int fw_bitset;
199 bool fw_aborting; /* fw_lock */
200 };
201
202 /*
203 * futex_tab
204 *
205 * Global trees of futexes by vmspace/va and VM object address.
206 *
207 * XXX This obviously doesn't scale in parallel. We could use a
208 * pserialize-safe data structure, but there may be a high cost to
209 * frequent deletion since we don't cache futexes after we're done
210 * with them. We could use hashed locks. But for now, just make
211 * sure userland can't DoS the serial performance, by using a
212 * balanced binary tree for lookup.
213 *
214 * XXX We could use a per-process tree for the table indexed by
215 * virtual address to reduce contention between processes.
216 */
217 static struct {
218 kmutex_t lock;
219 struct rb_tree va;
220 struct rb_tree oa;
221 } futex_tab __cacheline_aligned;
222
223 static int
224 compare_futex_key(void *cookie, const void *n, const void *k)
225 {
226 const struct futex *fa = n;
227 const union futex_key *fka = &fa->fx_key;
228 const union futex_key *fkb = k;
229
230 if ((uintptr_t)fka->fk_private.vmspace <
231 (uintptr_t)fkb->fk_private.vmspace)
232 return -1;
233 if ((uintptr_t)fka->fk_private.vmspace >
234 (uintptr_t)fkb->fk_private.vmspace)
235 return +1;
236 if (fka->fk_private.va < fkb->fk_private.va)
237 return -1;
238 if (fka->fk_private.va > fkb->fk_private.va)
239 return +1;
240 return 0;
241 }
242
243 static int
244 compare_futex(void *cookie, const void *na, const void *nb)
245 {
246 const struct futex *fa = na;
247 const struct futex *fb = nb;
248
249 return compare_futex_key(cookie, fa, &fb->fx_key);
250 }
251
252 static const rb_tree_ops_t futex_rb_ops = {
253 .rbto_compare_nodes = compare_futex,
254 .rbto_compare_key = compare_futex_key,
255 .rbto_node_offset = offsetof(struct futex, fx_node),
256 };
257
258 static int
259 compare_futex_shared_key(void *cookie, const void *n, const void *k)
260 {
261 const struct futex *fa = n;
262 const union futex_key *fka = &fa->fx_key;
263 const union futex_key *fkb = k;
264
265 return uvm_voaddr_compare(&fka->fk_shared, &fkb->fk_shared);
266 }
267
268 static int
269 compare_futex_shared(void *cookie, const void *na, const void *nb)
270 {
271 const struct futex *fa = na;
272 const struct futex *fb = nb;
273
274 return compare_futex_shared_key(cookie, fa, &fb->fx_key);
275 }
276
277 static const rb_tree_ops_t futex_shared_rb_ops = {
278 .rbto_compare_nodes = compare_futex_shared,
279 .rbto_compare_key = compare_futex_shared_key,
280 .rbto_node_offset = offsetof(struct futex, fx_node),
281 };
282
283 static void futex_wait_dequeue(struct futex_wait *, struct futex *);
284
285 /*
286 * futex_load(uaddr, kaddr)
287 *
288 * Perform a single atomic load to read *uaddr, and return the
289 * result in *kaddr. Return 0 on success, EFAULT if uaddr is not
290 * mapped.
291 */
292 static inline int
293 futex_load(int *uaddr, int *kaddr)
294 {
295 return ufetch_int((u_int *)uaddr, (u_int *)kaddr);
296 }
297
298 /*
299 * futex_test(uaddr, expected)
300 *
301 * True if *uaddr == expected. False if *uaddr != expected, or if
302 * uaddr is not mapped.
303 */
304 static bool
305 futex_test(int *uaddr, int expected)
306 {
307 int val;
308 int error;
309
310 error = futex_load(uaddr, &val);
311 if (error)
312 return false;
313 return val == expected;
314 }
315
316 /*
317 * futex_sys_init()
318 *
319 * Initialize the futex subsystem.
320 */
321 void
322 futex_sys_init(void)
323 {
324
325 mutex_init(&futex_tab.lock, MUTEX_DEFAULT, IPL_NONE);
326 rb_tree_init(&futex_tab.va, &futex_rb_ops);
327 rb_tree_init(&futex_tab.oa, &futex_shared_rb_ops);
328 }
329
330 /*
331 * futex_sys_fini()
332 *
333 * Finalize the futex subsystem.
334 */
335 void
336 futex_sys_fini(void)
337 {
338
339 KASSERT(RB_TREE_MIN(&futex_tab.oa) == NULL);
340 KASSERT(RB_TREE_MIN(&futex_tab.va) == NULL);
341 mutex_destroy(&futex_tab.lock);
342 }
343
344 /*
345 * futex_queue_init(f)
346 *
347 * Initialize the futex queue. Caller must call futex_queue_fini
348 * when done.
349 *
350 * Never sleeps.
351 */
352 static void
353 futex_queue_init(struct futex *f)
354 {
355
356 mutex_init(&f->fx_qlock, MUTEX_DEFAULT, IPL_NONE);
357 mutex_init(&f->fx_abortlock, MUTEX_DEFAULT, IPL_NONE);
358 cv_init(&f->fx_abortcv, "fqabort");
359 LIST_INIT(&f->fx_abortlist);
360 TAILQ_INIT(&f->fx_queue);
361 }
362
363 /*
364 * futex_queue_drain(f)
365 *
366 * Wait for any aborting waiters in f; then empty the queue of
367 * any stragglers and wake them. Caller must guarantee no new
368 * references to f.
369 *
370 * May sleep.
371 */
372 static void
373 futex_queue_drain(struct futex *f)
374 {
375 struct futex_wait *fw, *fw_next;
376
377 mutex_enter(&f->fx_abortlock);
378 while (!LIST_EMPTY(&f->fx_abortlist))
379 cv_wait(&f->fx_abortcv, &f->fx_abortlock);
380 mutex_exit(&f->fx_abortlock);
381
382 mutex_enter(&f->fx_qlock);
383 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
384 mutex_enter(&fw->fw_lock);
385 futex_wait_dequeue(fw, f);
386 cv_broadcast(&fw->fw_cv);
387 mutex_exit(&fw->fw_lock);
388 }
389 mutex_exit(&f->fx_qlock);
390 }
391
392 /*
393 * futex_queue_fini(fq)
394 *
395 * Finalize the futex queue initialized by futex_queue_init. Queue
396 * must be empty. Caller must not use f again until a subsequent
397 * futex_queue_init.
398 */
399 static void
400 futex_queue_fini(struct futex *f)
401 {
402
403 KASSERT(TAILQ_EMPTY(&f->fx_queue));
404 KASSERT(LIST_EMPTY(&f->fx_abortlist));
405 mutex_destroy(&f->fx_qlock);
406 mutex_destroy(&f->fx_abortlock);
407 cv_destroy(&f->fx_abortcv);
408 }
409
410 /*
411 * futex_key_init(key, vm, va, shared)
412 *
413 * Initialize a futex key for lookup, etc.
414 */
415 static int
416 futex_key_init(union futex_key *fk, struct vmspace *vm, vaddr_t va, bool shared)
417 {
418 int error = 0;
419
420 if (__predict_false(shared)) {
421 if (!uvm_voaddr_acquire(&vm->vm_map, va, &fk->fk_shared))
422 error = EFAULT;
423 } else {
424 fk->fk_private.vmspace = vm;
425 fk->fk_private.va = va;
426 }
427
428 return error;
429 }
430
431 /*
432 * futex_key_fini(key, shared)
433 *
434 * Release a futex key.
435 */
436 static void
437 futex_key_fini(union futex_key *fk, bool shared)
438 {
439 if (__predict_false(shared))
440 uvm_voaddr_release(&fk->fk_shared);
441 memset(fk, 0, sizeof(*fk));
442 }
443
444 /*
445 * futex_create(fk, shared)
446 *
447 * Create a futex. Initial reference count is 1, representing the
448 * caller. Returns NULL on failure. Always takes ownership of the
449 * key, either transferring it to the newly-created futex, or releasing
450 * the key if creation fails.
451 *
452 * Never sleeps for memory, but may sleep to acquire a lock.
453 */
454 static struct futex *
455 futex_create(union futex_key *fk, bool shared)
456 {
457 struct futex *f;
458
459 f = kmem_alloc(sizeof(*f), KM_NOSLEEP);
460 if (f == NULL) {
461 futex_key_fini(fk, shared);
462 return NULL;
463 }
464 f->fx_key = *fk;
465 f->fx_refcnt = 1;
466 f->fx_shared = shared;
467 f->fx_on_tree = false;
468 futex_queue_init(f);
469
470 return f;
471 }
472
473 /*
474 * futex_destroy(f)
475 *
476 * Destroy a futex created with futex_create. Reference count
477 * must be zero.
478 *
479 * May sleep.
480 */
481 static void
482 futex_destroy(struct futex *f)
483 {
484
485 ASSERT_SLEEPABLE();
486
487 KASSERT(atomic_load_relaxed(&f->fx_refcnt) == 0);
488 KASSERT(!f->fx_on_tree);
489
490 /* Drain and destroy the private queue. */
491 futex_queue_drain(f);
492 futex_queue_fini(f);
493
494 futex_key_fini(&f->fx_key, f->fx_shared);
495
496 kmem_free(f, sizeof(*f));
497 }
498
499 /*
500 * futex_hold(f)
501 *
502 * Attempt to acquire a reference to f. Return 0 on success,
503 * ENFILE on too many references.
504 *
505 * Never sleeps.
506 */
507 static int
508 futex_hold(struct futex *f)
509 {
510 unsigned long refcnt;
511
512 do {
513 refcnt = atomic_load_relaxed(&f->fx_refcnt);
514 if (refcnt == ULONG_MAX)
515 return ENFILE;
516 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt + 1) != refcnt);
517
518 return 0;
519 }
520
521 /*
522 * futex_rele(f)
523 *
524 * Release a reference to f acquired with futex_create or
525 * futex_hold.
526 *
527 * May sleep to free f.
528 */
529 static void
530 futex_rele(struct futex *f)
531 {
532 unsigned long refcnt;
533
534 ASSERT_SLEEPABLE();
535
536 do {
537 refcnt = atomic_load_relaxed(&f->fx_refcnt);
538 if (refcnt == 1)
539 goto trylast;
540 membar_release();
541 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
542 return;
543
544 trylast:
545 mutex_enter(&futex_tab.lock);
546 if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) {
547 membar_acquire();
548 if (f->fx_on_tree) {
549 if (__predict_false(f->fx_shared))
550 rb_tree_remove_node(&futex_tab.oa, f);
551 else
552 rb_tree_remove_node(&futex_tab.va, f);
553 f->fx_on_tree = false;
554 }
555 } else {
556 /* References remain -- don't destroy it. */
557 f = NULL;
558 }
559 mutex_exit(&futex_tab.lock);
560 if (f != NULL)
561 futex_destroy(f);
562 }
563
564 /*
565 * futex_rele_not_last(f)
566 *
567 * Release a reference to f acquired with futex_create or
568 * futex_hold.
569 *
570 * This version asserts that we are not dropping the last
571 * reference to f.
572 */
573 static void
574 futex_rele_not_last(struct futex *f)
575 {
576 unsigned long refcnt;
577
578 do {
579 refcnt = atomic_load_relaxed(&f->fx_refcnt);
580 KASSERT(refcnt > 1);
581 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
582 }
583
584 /*
585 * futex_lookup_by_key(key, shared, &f)
586 *
587 * Try to find an existing futex va reference in the specified key
588 * On success, return 0, set f to found futex or to NULL if not found,
589 * and increment f's reference count if found.
590 *
591 * Return ENFILE if reference count too high.
592 *
593 * Internal lookup routine shared by futex_lookup() and
594 * futex_lookup_create().
595 */
596 static int
597 futex_lookup_by_key(union futex_key *fk, bool shared, struct futex **fp)
598 {
599 struct futex *f;
600 int error = 0;
601
602 mutex_enter(&futex_tab.lock);
603 if (__predict_false(shared)) {
604 f = rb_tree_find_node(&futex_tab.oa, fk);
605 } else {
606 f = rb_tree_find_node(&futex_tab.va, fk);
607 }
608 if (f) {
609 error = futex_hold(f);
610 if (error)
611 f = NULL;
612 }
613 *fp = f;
614 mutex_exit(&futex_tab.lock);
615
616 return error;
617 }
618
619 /*
620 * futex_insert(f, fp)
621 *
622 * Try to insert the futex f into the tree by va. If there
623 * already is a futex for its va, acquire a reference to it, and
624 * store it in *fp; otherwise store f in *fp.
625 *
626 * Return 0 on success, ENFILE if there already is a futex but its
627 * reference count is too high.
628 */
629 static int
630 futex_insert(struct futex *f, struct futex **fp)
631 {
632 struct futex *f0;
633 int error;
634
635 KASSERT(atomic_load_relaxed(&f->fx_refcnt) != 0);
636 KASSERT(!f->fx_on_tree);
637
638 mutex_enter(&futex_tab.lock);
639 if (__predict_false(f->fx_shared))
640 f0 = rb_tree_insert_node(&futex_tab.oa, f);
641 else
642 f0 = rb_tree_insert_node(&futex_tab.va, f);
643 if (f0 == f) {
644 f->fx_on_tree = true;
645 error = 0;
646 } else {
647 KASSERT(atomic_load_relaxed(&f0->fx_refcnt) != 0);
648 KASSERT(f0->fx_on_tree);
649 error = futex_hold(f0);
650 if (error)
651 goto out;
652 }
653 *fp = f0;
654 out: mutex_exit(&futex_tab.lock);
655
656 return error;
657 }
658
659 /*
660 * futex_lookup(uaddr, shared, &f)
661 *
662 * Find a futex at the userland pointer uaddr in the current
663 * process's VM space. On success, return the futex in f and
664 * increment its reference count.
665 *
666 * Caller must call futex_rele when done.
667 */
668 static int
669 futex_lookup(int *uaddr, bool shared, struct futex **fp)
670 {
671 union futex_key fk;
672 struct vmspace *vm = curproc->p_vmspace;
673 vaddr_t va = (vaddr_t)uaddr;
674 int error;
675
676 /*
677 * Reject unaligned user pointers so we don't cross page
678 * boundaries and so atomics will work.
679 */
680 if ((va & 3) != 0)
681 return EINVAL;
682
683 /* Look it up. */
684 error = futex_key_init(&fk, vm, va, shared);
685 if (error)
686 return error;
687
688 error = futex_lookup_by_key(&fk, shared, fp);
689 futex_key_fini(&fk, shared);
690 if (error)
691 return error;
692
693 KASSERT(*fp == NULL || (*fp)->fx_shared == shared);
694 KASSERT(*fp == NULL || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
695
696 /*
697 * Success! (Caller must still check whether we found
698 * anything, but nothing went _wrong_ like trying to use
699 * unmapped memory.)
700 */
701 KASSERT(error == 0);
702
703 return error;
704 }
705
706 /*
707 * futex_lookup_create(uaddr, shared, &f)
708 *
709 * Find or create a futex at the userland pointer uaddr in the
710 * current process's VM space. On success, return the futex in f
711 * and increment its reference count.
712 *
713 * Caller must call futex_rele when done.
714 */
715 static int
716 futex_lookup_create(int *uaddr, bool shared, struct futex **fp)
717 {
718 union futex_key fk;
719 struct vmspace *vm = curproc->p_vmspace;
720 struct futex *f = NULL;
721 vaddr_t va = (vaddr_t)uaddr;
722 int error;
723
724 /*
725 * Reject unaligned user pointers so we don't cross page
726 * boundaries and so atomics will work.
727 */
728 if ((va & 3) != 0)
729 return EINVAL;
730
731 error = futex_key_init(&fk, vm, va, shared);
732 if (error)
733 return error;
734
735 /*
736 * Optimistically assume there already is one, and try to find
737 * it.
738 */
739 error = futex_lookup_by_key(&fk, shared, fp);
740 if (error || *fp != NULL) {
741 /*
742 * We either found one, or there was an error.
743 * In either case, we are done with the key.
744 */
745 futex_key_fini(&fk, shared);
746 goto out;
747 }
748
749 /*
750 * Create a futex record. This transfers ownership of the key
751 * in all cases.
752 */
753 f = futex_create(&fk, shared);
754 if (f == NULL) {
755 error = ENOMEM;
756 goto out;
757 }
758
759 /*
760 * Insert our new futex, or use existing if someone else beat
761 * us to it.
762 */
763 error = futex_insert(f, fp);
764 if (error)
765 goto out;
766 if (*fp == f)
767 f = NULL; /* don't release on exit */
768
769 /* Success! */
770 KASSERT(error == 0);
771
772 out: if (f != NULL)
773 futex_rele(f);
774 KASSERT(error || *fp != NULL);
775 KASSERT(error || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
776 return error;
777 }
778
779 /*
780 * futex_wait_init(fw, bitset)
781 *
782 * Initialize a record for a thread to wait on a futex matching
783 * the specified bit set. Should be passed to futex_wait_enqueue
784 * before futex_wait, and should be passed to futex_wait_fini when
785 * done.
786 */
787 static void
788 futex_wait_init(struct futex_wait *fw, int bitset)
789 {
790
791 KASSERT(bitset);
792
793 mutex_init(&fw->fw_lock, MUTEX_DEFAULT, IPL_NONE);
794 cv_init(&fw->fw_cv, "futex");
795 fw->fw_futex = NULL;
796 fw->fw_bitset = bitset;
797 fw->fw_aborting = false;
798 }
799
800 /*
801 * futex_wait_fini(fw)
802 *
803 * Finalize a record for a futex waiter. Must not be on any
804 * futex's queue.
805 */
806 static void
807 futex_wait_fini(struct futex_wait *fw)
808 {
809
810 KASSERT(fw->fw_futex == NULL);
811
812 cv_destroy(&fw->fw_cv);
813 mutex_destroy(&fw->fw_lock);
814 }
815
816 /*
817 * futex_wait_enqueue(fw, f)
818 *
819 * Put fw on the futex queue. Must be done before futex_wait.
820 * Caller must hold fw's lock and f's lock, and fw must not be on
821 * any existing futex's waiter list.
822 */
823 static void
824 futex_wait_enqueue(struct futex_wait *fw, struct futex *f)
825 {
826
827 KASSERT(mutex_owned(&f->fx_qlock));
828 KASSERT(mutex_owned(&fw->fw_lock));
829 KASSERT(fw->fw_futex == NULL);
830 KASSERT(!fw->fw_aborting);
831
832 fw->fw_futex = f;
833 TAILQ_INSERT_TAIL(&f->fx_queue, fw, fw_entry);
834 }
835
836 /*
837 * futex_wait_dequeue(fw, f)
838 *
839 * Remove fw from the futex queue. Precludes subsequent
840 * futex_wait until a futex_wait_enqueue. Caller must hold fw's
841 * lock and f's lock, and fw must be on f.
842 */
843 static void
844 futex_wait_dequeue(struct futex_wait *fw, struct futex *f)
845 {
846
847 KASSERT(mutex_owned(&f->fx_qlock));
848 KASSERT(mutex_owned(&fw->fw_lock));
849 KASSERT(fw->fw_futex == f);
850
851 TAILQ_REMOVE(&f->fx_queue, fw, fw_entry);
852 fw->fw_futex = NULL;
853 }
854
855 /*
856 * futex_wait_abort(fw)
857 *
858 * Caller is no longer waiting for fw. Remove it from any queue
859 * if it was on one. Caller must hold fw->fw_lock.
860 */
861 static void
862 futex_wait_abort(struct futex_wait *fw)
863 {
864 struct futex *f;
865
866 KASSERT(mutex_owned(&fw->fw_lock));
867
868 /*
869 * Grab the futex queue. It can't go away as long as we hold
870 * fw_lock. However, we can't take the queue lock because
871 * that's a lock order reversal.
872 */
873 f = fw->fw_futex;
874
875 /* Put us on the abort list so that fq won't go away. */
876 mutex_enter(&f->fx_abortlock);
877 LIST_INSERT_HEAD(&f->fx_abortlist, fw, fw_abort);
878 mutex_exit(&f->fx_abortlock);
879
880 /*
881 * Mark fw as aborting so it won't lose wakeups and won't be
882 * transferred to any other queue.
883 */
884 fw->fw_aborting = true;
885
886 /* f is now stable, so we can release fw_lock. */
887 mutex_exit(&fw->fw_lock);
888
889 /* Now we can remove fw under the queue lock. */
890 mutex_enter(&f->fx_qlock);
891 mutex_enter(&fw->fw_lock);
892 futex_wait_dequeue(fw, f);
893 mutex_exit(&fw->fw_lock);
894 mutex_exit(&f->fx_qlock);
895
896 /*
897 * Finally, remove us from the abort list and notify anyone
898 * waiting for the abort to complete if we were the last to go.
899 */
900 mutex_enter(&f->fx_abortlock);
901 LIST_REMOVE(fw, fw_abort);
902 if (LIST_EMPTY(&f->fx_abortlist))
903 cv_broadcast(&f->fx_abortcv);
904 mutex_exit(&f->fx_abortlock);
905
906 /*
907 * Release our reference to the futex now that we are not
908 * waiting for it.
909 */
910 futex_rele(f);
911
912 /*
913 * Reacquire the fw lock as caller expects. Verify that we're
914 * aborting and no longer associated with a futex.
915 */
916 mutex_enter(&fw->fw_lock);
917 KASSERT(fw->fw_aborting);
918 KASSERT(fw->fw_futex == NULL);
919 }
920
921 /*
922 * futex_wait(fw, deadline, clkid)
923 *
924 * fw must be a waiter on a futex's queue. Wait until deadline on
925 * the clock clkid, or forever if deadline is NULL, for a futex
926 * wakeup. Return 0 on explicit wakeup or destruction of futex,
927 * ETIMEDOUT on timeout, EINTR/ERESTART on signal. Either way, fw
928 * will no longer be on a futex queue on return.
929 */
930 static int
931 futex_wait(struct futex_wait *fw, const struct timespec *deadline,
932 clockid_t clkid)
933 {
934 int error = 0;
935
936 /* Test and wait under the wait lock. */
937 mutex_enter(&fw->fw_lock);
938
939 for (;;) {
940 /* If we're done yet, stop and report success. */
941 if (fw->fw_bitset == 0 || fw->fw_futex == NULL) {
942 error = 0;
943 break;
944 }
945
946 /* If anything went wrong in the last iteration, stop. */
947 if (error)
948 break;
949
950 /* Not done yet. Wait. */
951 if (deadline) {
952 struct timespec ts;
953
954 /* Check our watch. */
955 error = clock_gettime1(clkid, &ts);
956 if (error)
957 break;
958
959 /* If we're past the deadline, ETIMEDOUT. */
960 if (timespeccmp(deadline, &ts, <=)) {
961 error = ETIMEDOUT;
962 break;
963 }
964
965 /* Count how much time is left. */
966 timespecsub(deadline, &ts, &ts);
967
968 /* Wait for that much time, allowing signals. */
969 error = cv_timedwait_sig(&fw->fw_cv, &fw->fw_lock,
970 tstohz(&ts));
971 } else {
972 /* Wait indefinitely, allowing signals. */
973 error = cv_wait_sig(&fw->fw_cv, &fw->fw_lock);
974 }
975 }
976
977 /*
978 * If we were woken up, the waker will have removed fw from the
979 * queue. But if anything went wrong, we must remove fw from
980 * the queue ourselves. While here, convert EWOULDBLOCK to
981 * ETIMEDOUT.
982 */
983 if (error) {
984 futex_wait_abort(fw);
985 if (error == EWOULDBLOCK)
986 error = ETIMEDOUT;
987 }
988
989 mutex_exit(&fw->fw_lock);
990
991 return error;
992 }
993
994 /*
995 * futex_wake(f, nwake, f2, nrequeue, bitset)
996 *
997 * Wake up to nwake waiters on f matching bitset; then, if f2 is
998 * provided, move up to nrequeue remaining waiters on f matching
999 * bitset to f2. Return the number of waiters actually woken.
1000 * Caller must hold the locks of f and f2, if provided.
1001 */
1002 static unsigned
1003 futex_wake(struct futex *f, unsigned nwake, struct futex *f2,
1004 unsigned nrequeue, int bitset)
1005 {
1006 struct futex_wait *fw, *fw_next;
1007 unsigned nwoken = 0;
1008 int hold_error __diagused;
1009
1010 KASSERT(mutex_owned(&f->fx_qlock));
1011 KASSERT(f2 == NULL || mutex_owned(&f2->fx_qlock));
1012
1013 /* Wake up to nwake waiters, and count the number woken. */
1014 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1015 if ((fw->fw_bitset & bitset) == 0)
1016 continue;
1017 if (nwake > 0) {
1018 mutex_enter(&fw->fw_lock);
1019 if (__predict_false(fw->fw_aborting)) {
1020 mutex_exit(&fw->fw_lock);
1021 continue;
1022 }
1023 futex_wait_dequeue(fw, f);
1024 fw->fw_bitset = 0;
1025 cv_broadcast(&fw->fw_cv);
1026 mutex_exit(&fw->fw_lock);
1027 nwake--;
1028 nwoken++;
1029 /*
1030 * Drop the futex reference on behalf of the
1031 * waiter. We assert this is not the last
1032 * reference on the futex (our caller should
1033 * also have one).
1034 */
1035 futex_rele_not_last(f);
1036 } else {
1037 break;
1038 }
1039 }
1040
1041 if (f2) {
1042 /* Move up to nrequeue waiters from f's queue to f2's queue. */
1043 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1044 if ((fw->fw_bitset & bitset) == 0)
1045 continue;
1046 if (nrequeue > 0) {
1047 mutex_enter(&fw->fw_lock);
1048 if (__predict_false(fw->fw_aborting)) {
1049 mutex_exit(&fw->fw_lock);
1050 continue;
1051 }
1052 futex_wait_dequeue(fw, f);
1053 futex_wait_enqueue(fw, f2);
1054 mutex_exit(&fw->fw_lock);
1055 nrequeue--;
1056 /*
1057 * Transfer the reference from f to f2.
1058 * As above, we assert that we are not
1059 * dropping the last reference to f here.
1060 *
1061 * XXX futex_hold() could theoretically
1062 * XXX fail here.
1063 */
1064 futex_rele_not_last(f);
1065 hold_error = futex_hold(f2);
1066 KASSERT(hold_error == 0);
1067 } else {
1068 break;
1069 }
1070 }
1071 } else {
1072 KASSERT(nrequeue == 0);
1073 }
1074
1075 /* Return the number of waiters woken. */
1076 return nwoken;
1077 }
1078
1079 /*
1080 * futex_queue_lock(f)
1081 *
1082 * Acquire the queue lock of f. Pair with futex_queue_unlock. Do
1083 * not use if caller needs to acquire two locks; use
1084 * futex_queue_lock2 instead.
1085 */
1086 static void
1087 futex_queue_lock(struct futex *f)
1088 {
1089 mutex_enter(&f->fx_qlock);
1090 }
1091
1092 /*
1093 * futex_queue_unlock(f)
1094 *
1095 * Release the queue lock of f.
1096 */
1097 static void
1098 futex_queue_unlock(struct futex *f)
1099 {
1100 mutex_exit(&f->fx_qlock);
1101 }
1102
1103 /*
1104 * futex_queue_lock2(f, f2)
1105 *
1106 * Acquire the queue locks of both f and f2, which may be null, or
1107 * which may have the same underlying queue. If they are
1108 * distinct, an arbitrary total order is chosen on the locks.
1109 *
1110 * Callers should only ever acquire multiple queue locks
1111 * simultaneously using futex_queue_lock2.
1112 */
1113 static void
1114 futex_queue_lock2(struct futex *f, struct futex *f2)
1115 {
1116
1117 /*
1118 * If both are null, do nothing; if one is null and the other
1119 * is not, lock the other and be done with it.
1120 */
1121 if (f == NULL && f2 == NULL) {
1122 return;
1123 } else if (f == NULL) {
1124 mutex_enter(&f2->fx_qlock);
1125 return;
1126 } else if (f2 == NULL) {
1127 mutex_enter(&f->fx_qlock);
1128 return;
1129 }
1130
1131 /* If both futexes are the same, acquire only one. */
1132 if (f == f2) {
1133 mutex_enter(&f->fx_qlock);
1134 return;
1135 }
1136
1137 /* Otherwise, use the ordering on the kva of the futex pointer. */
1138 if ((uintptr_t)f < (uintptr_t)f2) {
1139 mutex_enter(&f->fx_qlock);
1140 mutex_enter(&f2->fx_qlock);
1141 } else {
1142 mutex_enter(&f2->fx_qlock);
1143 mutex_enter(&f->fx_qlock);
1144 }
1145 }
1146
1147 /*
1148 * futex_queue_unlock2(f, f2)
1149 *
1150 * Release the queue locks of both f and f2, which may be null, or
1151 * which may have the same underlying queue.
1152 */
1153 static void
1154 futex_queue_unlock2(struct futex *f, struct futex *f2)
1155 {
1156
1157 /*
1158 * If both are null, do nothing; if one is null and the other
1159 * is not, unlock the other and be done with it.
1160 */
1161 if (f == NULL && f2 == NULL) {
1162 return;
1163 } else if (f == NULL) {
1164 mutex_exit(&f2->fx_qlock);
1165 return;
1166 } else if (f2 == NULL) {
1167 mutex_exit(&f->fx_qlock);
1168 return;
1169 }
1170
1171 /* If both futexes are the same, release only one. */
1172 if (f == f2) {
1173 mutex_exit(&f->fx_qlock);
1174 return;
1175 }
1176
1177 /* Otherwise, use the ordering on the kva of the futex pointer. */
1178 if ((uintptr_t)f < (uintptr_t)f2) {
1179 mutex_exit(&f2->fx_qlock);
1180 mutex_exit(&f->fx_qlock);
1181 } else {
1182 mutex_exit(&f->fx_qlock);
1183 mutex_exit(&f2->fx_qlock);
1184 }
1185 }
1186
1187 /*
1188 * futex_func_wait(uaddr, val, val3, timeout, clkid, clkflags, retval)
1189 *
1190 * Implement futex(FUTEX_WAIT).
1191 */
1192 static int
1193 futex_func_wait(bool shared, int *uaddr, int val, int val3,
1194 const struct timespec *timeout, clockid_t clkid, int clkflags,
1195 register_t *retval)
1196 {
1197 struct futex *f;
1198 struct futex_wait wait, *fw = &wait;
1199 struct timespec ts;
1200 const struct timespec *deadline;
1201 int error;
1202
1203 /*
1204 * If there's nothing to wait for, and nobody will ever wake
1205 * us, then don't set anything up to wait -- just stop here.
1206 */
1207 if (val3 == 0)
1208 return EINVAL;
1209
1210 /* Optimistically test before anything else. */
1211 if (!futex_test(uaddr, val))
1212 return EAGAIN;
1213
1214 /* Determine a deadline on the specified clock. */
1215 if (timeout == NULL || (clkflags & TIMER_ABSTIME) == TIMER_ABSTIME) {
1216 deadline = timeout;
1217 } else {
1218 error = clock_gettime1(clkid, &ts);
1219 if (error)
1220 return error;
1221 timespecadd(&ts, timeout, &ts);
1222 deadline = &ts;
1223 }
1224
1225 /* Get the futex, creating it if necessary. */
1226 error = futex_lookup_create(uaddr, shared, &f);
1227 if (error)
1228 return error;
1229 KASSERT(f);
1230
1231 /* Get ready to wait. */
1232 futex_wait_init(fw, val3);
1233
1234 /*
1235 * Under the queue lock, check the value again: if it has
1236 * already changed, EAGAIN; otherwise enqueue the waiter.
1237 * Since FUTEX_WAKE will use the same lock and be done after
1238 * modifying the value, the order in which we check and enqueue
1239 * is immaterial.
1240 */
1241 futex_queue_lock(f);
1242 if (!futex_test(uaddr, val)) {
1243 futex_queue_unlock(f);
1244 error = EAGAIN;
1245 goto out;
1246 }
1247 mutex_enter(&fw->fw_lock);
1248 futex_wait_enqueue(fw, f);
1249 mutex_exit(&fw->fw_lock);
1250 futex_queue_unlock(f);
1251
1252 /*
1253 * We cannot drop our reference to the futex here, because
1254 * we might be enqueued on a different one when we are awakened.
1255 * The references will be managed on our behalf in the requeue
1256 * and wake cases.
1257 */
1258 f = NULL;
1259
1260 /* Wait. */
1261 error = futex_wait(fw, deadline, clkid);
1262 if (error)
1263 goto out;
1264
1265 /* Return 0 on success, error on failure. */
1266 *retval = 0;
1267
1268 out: if (f != NULL)
1269 futex_rele(f);
1270 futex_wait_fini(fw);
1271 return error;
1272 }
1273
1274 /*
1275 * futex_func_wake(uaddr, val, val3, retval)
1276 *
1277 * Implement futex(FUTEX_WAKE) and futex(FUTEX_WAKE_BITSET).
1278 */
1279 static int
1280 futex_func_wake(bool shared, int *uaddr, int val, int val3, register_t *retval)
1281 {
1282 struct futex *f;
1283 unsigned int nwoken = 0;
1284 int error = 0;
1285
1286 /* Reject negative number of wakeups. */
1287 if (val < 0) {
1288 error = EINVAL;
1289 goto out;
1290 }
1291
1292 /* Look up the futex, if any. */
1293 error = futex_lookup(uaddr, shared, &f);
1294 if (error)
1295 goto out;
1296
1297 /* If there's no futex, there are no waiters to wake. */
1298 if (f == NULL)
1299 goto out;
1300
1301 /*
1302 * Under f's queue lock, wake the waiters and remember the
1303 * number woken.
1304 */
1305 futex_queue_lock(f);
1306 nwoken = futex_wake(f, val, NULL, 0, val3);
1307 futex_queue_unlock(f);
1308
1309 /* Release the futex. */
1310 futex_rele(f);
1311
1312 out:
1313 /* Return the number of waiters woken. */
1314 *retval = nwoken;
1315
1316 /* Success! */
1317 return error;
1318 }
1319
1320 /*
1321 * futex_func_requeue(op, uaddr, val, uaddr2, val2, val3, retval)
1322 *
1323 * Implement futex(FUTEX_REQUEUE) and futex(FUTEX_CMP_REQUEUE).
1324 */
1325 static int
1326 futex_func_requeue(bool shared, int op, int *uaddr, int val, int *uaddr2,
1327 int val2, int val3, register_t *retval)
1328 {
1329 struct futex *f = NULL, *f2 = NULL;
1330 unsigned nwoken = 0; /* default to zero woken on early return */
1331 int error;
1332
1333 /* Reject negative number of wakeups or requeues. */
1334 if (val < 0 || val2 < 0) {
1335 error = EINVAL;
1336 goto out;
1337 }
1338
1339 /*
1340 * Look up or create the source futex. For FUTEX_CMP_REQUEUE,
1341 * we always create it, rather than bail if it has no waiters,
1342 * because FUTEX_CMP_REQUEUE always tests the futex word in
1343 * order to report EAGAIN.
1344 */
1345 error = (op == FUTEX_CMP_REQUEUE
1346 ? futex_lookup_create(uaddr, shared, &f)
1347 : futex_lookup(uaddr, shared, &f));
1348 if (error)
1349 goto out;
1350
1351 /* If there is none for FUTEX_REQUEUE, nothing to do. */
1352 if (f == NULL) {
1353 KASSERT(op != FUTEX_CMP_REQUEUE);
1354 goto out;
1355 }
1356
1357 /*
1358 * We may need to create the destination futex because it's
1359 * entirely possible it does not currently have any waiters.
1360 */
1361 error = futex_lookup_create(uaddr2, shared, &f2);
1362 if (error)
1363 goto out;
1364
1365 /*
1366 * Under the futexes' queue locks, check the value; if
1367 * unchanged from val3, wake the waiters.
1368 */
1369 futex_queue_lock2(f, f2);
1370 if (op == FUTEX_CMP_REQUEUE && !futex_test(uaddr, val3)) {
1371 error = EAGAIN;
1372 } else {
1373 error = 0;
1374 nwoken = futex_wake(f, val, f2, val2, FUTEX_BITSET_MATCH_ANY);
1375 }
1376 futex_queue_unlock2(f, f2);
1377
1378 out:
1379 /* Return the number of waiters woken. */
1380 *retval = nwoken;
1381
1382 /* Release the futexes if we got them. */
1383 if (f2)
1384 futex_rele(f2);
1385 if (f)
1386 futex_rele(f);
1387 return error;
1388 }
1389
1390 /*
1391 * futex_validate_op_cmp(val3)
1392 *
1393 * Validate an op/cmp argument for FUTEX_WAKE_OP.
1394 */
1395 static int
1396 futex_validate_op_cmp(int val3)
1397 {
1398 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1399 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1400
1401 if (op & FUTEX_OP_OPARG_SHIFT) {
1402 int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK);
1403 if (oparg < 0)
1404 return EINVAL;
1405 if (oparg >= 32)
1406 return EINVAL;
1407 op &= ~FUTEX_OP_OPARG_SHIFT;
1408 }
1409
1410 switch (op) {
1411 case FUTEX_OP_SET:
1412 case FUTEX_OP_ADD:
1413 case FUTEX_OP_OR:
1414 case FUTEX_OP_ANDN:
1415 case FUTEX_OP_XOR:
1416 break;
1417 default:
1418 return EINVAL;
1419 }
1420
1421 switch (cmp) {
1422 case FUTEX_OP_CMP_EQ:
1423 case FUTEX_OP_CMP_NE:
1424 case FUTEX_OP_CMP_LT:
1425 case FUTEX_OP_CMP_LE:
1426 case FUTEX_OP_CMP_GT:
1427 case FUTEX_OP_CMP_GE:
1428 break;
1429 default:
1430 return EINVAL;
1431 }
1432
1433 return 0;
1434 }
1435
1436 /*
1437 * futex_compute_op(oldval, val3)
1438 *
1439 * Apply a FUTEX_WAIT_OP operation to oldval.
1440 */
1441 static int
1442 futex_compute_op(int oldval, int val3)
1443 {
1444 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1445 int oparg = __SHIFTOUT(val3, FUTEX_OP_OPARG_MASK);
1446
1447 if (op & FUTEX_OP_OPARG_SHIFT) {
1448 KASSERT(oparg >= 0);
1449 KASSERT(oparg < 32);
1450 oparg = 1u << oparg;
1451 op &= ~FUTEX_OP_OPARG_SHIFT;
1452 }
1453
1454 switch (op) {
1455 case FUTEX_OP_SET:
1456 return oparg;
1457
1458 case FUTEX_OP_ADD:
1459 /*
1460 * Avoid signed arithmetic overflow by doing
1461 * arithmetic unsigned and converting back to signed
1462 * at the end.
1463 */
1464 return (int)((unsigned)oldval + (unsigned)oparg);
1465
1466 case FUTEX_OP_OR:
1467 return oldval | oparg;
1468
1469 case FUTEX_OP_ANDN:
1470 return oldval & ~oparg;
1471
1472 case FUTEX_OP_XOR:
1473 return oldval ^ oparg;
1474
1475 default:
1476 panic("invalid futex op");
1477 }
1478 }
1479
1480 /*
1481 * futex_compute_cmp(oldval, val3)
1482 *
1483 * Apply a FUTEX_WAIT_OP comparison to oldval.
1484 */
1485 static bool
1486 futex_compute_cmp(int oldval, int val3)
1487 {
1488 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1489 int cmparg = __SHIFTOUT(val3, FUTEX_OP_CMPARG_MASK);
1490
1491 switch (cmp) {
1492 case FUTEX_OP_CMP_EQ:
1493 return (oldval == cmparg);
1494
1495 case FUTEX_OP_CMP_NE:
1496 return (oldval != cmparg);
1497
1498 case FUTEX_OP_CMP_LT:
1499 return (oldval < cmparg);
1500
1501 case FUTEX_OP_CMP_LE:
1502 return (oldval <= cmparg);
1503
1504 case FUTEX_OP_CMP_GT:
1505 return (oldval > cmparg);
1506
1507 case FUTEX_OP_CMP_GE:
1508 return (oldval >= cmparg);
1509
1510 default:
1511 panic("invalid futex cmp operation");
1512 }
1513 }
1514
1515 /*
1516 * futex_func_wake_op(uaddr, val, uaddr2, val2, val3, retval)
1517 *
1518 * Implement futex(FUTEX_WAKE_OP).
1519 */
1520 static int
1521 futex_func_wake_op(bool shared, int *uaddr, int val, int *uaddr2, int val2,
1522 int val3, register_t *retval)
1523 {
1524 struct futex *f = NULL, *f2 = NULL;
1525 int oldval, newval, actual;
1526 unsigned nwoken = 0;
1527 int error;
1528
1529 /* Reject negative number of wakeups. */
1530 if (val < 0 || val2 < 0) {
1531 error = EINVAL;
1532 goto out;
1533 }
1534
1535 /* Reject invalid operations before we start doing things. */
1536 if ((error = futex_validate_op_cmp(val3)) != 0)
1537 goto out;
1538
1539 /* Look up the first futex, if any. */
1540 error = futex_lookup(uaddr, shared, &f);
1541 if (error)
1542 goto out;
1543
1544 /* Look up the second futex, if any. */
1545 error = futex_lookup(uaddr2, shared, &f2);
1546 if (error)
1547 goto out;
1548
1549 /*
1550 * Under the queue locks:
1551 *
1552 * 1. Read/modify/write: *uaddr2 op= oparg.
1553 * 2. Unconditionally wake uaddr.
1554 * 3. Conditionally wake uaddr2, if it previously matched val2.
1555 */
1556 futex_queue_lock2(f, f2);
1557 do {
1558 error = futex_load(uaddr2, &oldval);
1559 if (error)
1560 goto out_unlock;
1561 newval = futex_compute_op(oldval, val3);
1562 error = ucas_int(uaddr2, oldval, newval, &actual);
1563 if (error)
1564 goto out_unlock;
1565 } while (actual != oldval);
1566 nwoken = (f ? futex_wake(f, val, NULL, 0, FUTEX_BITSET_MATCH_ANY) : 0);
1567 if (f2 && futex_compute_cmp(oldval, val3))
1568 nwoken += futex_wake(f2, val2, NULL, 0,
1569 FUTEX_BITSET_MATCH_ANY);
1570
1571 /* Success! */
1572 error = 0;
1573 out_unlock:
1574 futex_queue_unlock2(f, f2);
1575
1576 out:
1577 /* Return the number of waiters woken. */
1578 *retval = nwoken;
1579
1580 /* Release the futexes, if we got them. */
1581 if (f2)
1582 futex_rele(f2);
1583 if (f)
1584 futex_rele(f);
1585 return error;
1586 }
1587
1588 /*
1589 * do_futex(uaddr, op, val, timeout, uaddr2, val2, val3)
1590 *
1591 * Implement the futex system call with all the parameters
1592 * parsed out.
1593 */
1594 int
1595 do_futex(int *uaddr, int op, int val, const struct timespec *timeout,
1596 int *uaddr2, int val2, int val3, register_t *retval)
1597 {
1598 const bool shared = (op & FUTEX_PRIVATE_FLAG) ? false : true;
1599 const clockid_t clkid = (op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME
1600 : CLOCK_MONOTONIC;
1601
1602 op &= FUTEX_CMD_MASK;
1603
1604 switch (op) {
1605 case FUTEX_WAIT:
1606 return futex_func_wait(shared, uaddr, val,
1607 FUTEX_BITSET_MATCH_ANY, timeout, clkid, TIMER_RELTIME,
1608 retval);
1609
1610 case FUTEX_WAKE:
1611 val3 = FUTEX_BITSET_MATCH_ANY;
1612 /* FALLTHROUGH */
1613 case FUTEX_WAKE_BITSET:
1614 return futex_func_wake(shared, uaddr, val, val3, retval);
1615
1616 case FUTEX_REQUEUE:
1617 case FUTEX_CMP_REQUEUE:
1618 return futex_func_requeue(shared, op, uaddr, val, uaddr2,
1619 val2, val3, retval);
1620
1621 case FUTEX_WAIT_BITSET:
1622 return futex_func_wait(shared, uaddr, val, val3, timeout,
1623 clkid, TIMER_ABSTIME, retval);
1624
1625 case FUTEX_WAKE_OP:
1626 return futex_func_wake_op(shared, uaddr, val, uaddr2, val2,
1627 val3, retval);
1628
1629 case FUTEX_FD:
1630 default:
1631 return ENOSYS;
1632 }
1633 }
1634
1635 /*
1636 * sys___futex(l, uap, retval)
1637 *
1638 * __futex(2) system call: generic futex operations.
1639 */
1640 int
1641 sys___futex(struct lwp *l, const struct sys___futex_args *uap,
1642 register_t *retval)
1643 {
1644 /* {
1645 syscallarg(int *) uaddr;
1646 syscallarg(int) op;
1647 syscallarg(int) val;
1648 syscallarg(const struct timespec *) timeout;
1649 syscallarg(int *) uaddr2;
1650 syscallarg(int) val2;
1651 syscallarg(int) val3;
1652 } */
1653 struct timespec ts, *tsp;
1654 int error;
1655
1656 /*
1657 * Copy in the timeout argument, if specified.
1658 */
1659 if (SCARG(uap, timeout)) {
1660 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
1661 if (error)
1662 return error;
1663 tsp = &ts;
1664 } else {
1665 tsp = NULL;
1666 }
1667
1668 return do_futex(SCARG(uap, uaddr), SCARG(uap, op), SCARG(uap, val),
1669 tsp, SCARG(uap, uaddr2), SCARG(uap, val2), SCARG(uap, val3),
1670 retval);
1671 }
1672
1673 /*
1674 * sys___futex_set_robust_list(l, uap, retval)
1675 *
1676 * __futex_set_robust_list(2) system call for robust futexes.
1677 */
1678 int
1679 sys___futex_set_robust_list(struct lwp *l,
1680 const struct sys___futex_set_robust_list_args *uap, register_t *retval)
1681 {
1682 /* {
1683 syscallarg(void *) head;
1684 syscallarg(size_t) len;
1685 } */
1686 void *head = SCARG(uap, head);
1687
1688 if (SCARG(uap, len) != _FUTEX_ROBUST_HEAD_SIZE)
1689 return EINVAL;
1690 if ((uintptr_t)head % sizeof(u_long))
1691 return EINVAL;
1692
1693 l->l_robust_head = (uintptr_t)head;
1694
1695 return 0;
1696 }
1697
1698 /*
1699 * sys___futex_get_robust_list(l, uap, retval)
1700 *
1701 * __futex_get_robust_list(2) system call for robust futexes.
1702 */
1703 int
1704 sys___futex_get_robust_list(struct lwp *l,
1705 const struct sys___futex_get_robust_list_args *uap, register_t *retval)
1706 {
1707 /* {
1708 syscallarg(lwpid_t) lwpid;
1709 syscallarg(void **) headp;
1710 syscallarg(size_t *) lenp;
1711 } */
1712 void *head;
1713 const size_t len = _FUTEX_ROBUST_HEAD_SIZE;
1714 int error;
1715
1716 error = futex_robust_head_lookup(l, SCARG(uap, lwpid), &head);
1717 if (error)
1718 return error;
1719
1720 /* Copy out the head pointer and the head structure length. */
1721 error = copyout(&head, SCARG(uap, headp), sizeof(head));
1722 if (__predict_true(error == 0)) {
1723 error = copyout(&len, SCARG(uap, lenp), sizeof(len));
1724 }
1725
1726 return error;
1727 }
1728
1729 /*
1730 * release_futex(uva, tid)
1731 *
1732 * Try to release the robust futex at uva in the current process
1733 * on lwp exit. If anything goes wrong, silently fail. It is the
1734 * userland program's obligation to arrange correct behaviour.
1735 */
1736 static void
1737 release_futex(uintptr_t const uptr, lwpid_t const tid, bool const is_pi,
1738 bool const is_pending)
1739 {
1740 int *uaddr;
1741 struct futex *f;
1742 int oldval, newval, actual;
1743 int error;
1744
1745 /* If it's misaligned, tough. */
1746 if (__predict_false(uptr & 3))
1747 return;
1748 uaddr = (int *)uptr;
1749
1750 error = futex_load(uaddr, &oldval);
1751 if (__predict_false(error))
1752 return;
1753
1754 /*
1755 * There are two race conditions we need to handle here:
1756 *
1757 * 1. User space cleared the futex word but died before
1758 * being able to issue the wakeup. No wakeups will
1759 * ever be issued, oops!
1760 *
1761 * 2. Awakened waiter died before being able to acquire
1762 * the futex in user space. Any other waiters are
1763 * now stuck, oops!
1764 *
1765 * In both of these cases, the futex word will be 0 (because
1766 * it's updated before the wake is issued). The best we can
1767 * do is detect this situation if it's the pending futex and
1768 * issue a wake without modifying the futex word.
1769 *
1770 * XXX eventual PI handling?
1771 */
1772 if (__predict_false(is_pending && (oldval & ~FUTEX_WAITERS) == 0)) {
1773 register_t retval;
1774 (void) futex_func_wake(/*shared*/true, uaddr, 1,
1775 FUTEX_BITSET_MATCH_ANY, &retval);
1776 return;
1777 }
1778
1779 /* Optimistically test whether we need to do anything at all. */
1780 if ((oldval & FUTEX_TID_MASK) != tid)
1781 return;
1782
1783 /*
1784 * We need to handle the case where this thread owned the futex,
1785 * but it was uncontended. In this case, there won't be any
1786 * kernel state to look up. All we can do is mark the futex
1787 * as a zombie to be mopped up the next time another thread
1788 * attempts to acquire it.
1789 *
1790 * N.B. It's important to ensure to set FUTEX_OWNER_DIED in
1791 * this loop, even if waiters appear while we're are doing
1792 * so. This is beause FUTEX_WAITERS is set by user space
1793 * before calling __futex() to wait, and the futex needs
1794 * to be marked as a zombie when the new waiter gets into
1795 * the kernel.
1796 */
1797 if ((oldval & FUTEX_WAITERS) == 0) {
1798 do {
1799 error = futex_load(uaddr, &oldval);
1800 if (error)
1801 return;
1802 if ((oldval & FUTEX_TID_MASK) != tid)
1803 return;
1804 newval = oldval | FUTEX_OWNER_DIED;
1805 error = ucas_int(uaddr, oldval, newval, &actual);
1806 if (error)
1807 return;
1808 } while (actual != oldval);
1809
1810 /*
1811 * If where is still no indication of waiters, then there is
1812 * no more work for us to do.
1813 */
1814 if ((oldval & FUTEX_WAITERS) == 0)
1815 return;
1816 }
1817
1818 /*
1819 * Look for a shared futex since we have no positive indication
1820 * it is private. If we can't, tough.
1821 */
1822 error = futex_lookup(uaddr, /*shared*/true, &f);
1823 if (error)
1824 return;
1825
1826 /*
1827 * If there's no kernel state for this futex, there's nothing to
1828 * release.
1829 */
1830 if (f == NULL)
1831 return;
1832
1833 /* Work under the futex queue lock. */
1834 futex_queue_lock(f);
1835
1836 /*
1837 * Fetch the word: if the tid doesn't match ours, skip;
1838 * otherwise, set the owner-died bit, atomically.
1839 */
1840 do {
1841 error = futex_load(uaddr, &oldval);
1842 if (error)
1843 goto out;
1844 if ((oldval & FUTEX_TID_MASK) != tid)
1845 goto out;
1846 newval = oldval | FUTEX_OWNER_DIED;
1847 error = ucas_int(uaddr, oldval, newval, &actual);
1848 if (error)
1849 goto out;
1850 } while (actual != oldval);
1851
1852 /*
1853 * If there may be waiters, try to wake one. If anything goes
1854 * wrong, tough.
1855 *
1856 * XXX eventual PI handling?
1857 */
1858 if (oldval & FUTEX_WAITERS)
1859 (void)futex_wake(f, 1, NULL, 0, FUTEX_BITSET_MATCH_ANY);
1860
1861 /* Unlock the queue and release the futex. */
1862 out: futex_queue_unlock(f);
1863 futex_rele(f);
1864 }
1865
1866 /*
1867 * futex_robust_head_lookup(l, lwpid)
1868 *
1869 * Helper function to look up a robust head by LWP ID.
1870 */
1871 int
1872 futex_robust_head_lookup(struct lwp *l, lwpid_t lwpid, void **headp)
1873 {
1874 struct proc *p = l->l_proc;
1875
1876 /* Find the other lwp, if requested; otherwise use our robust head. */
1877 if (lwpid) {
1878 mutex_enter(p->p_lock);
1879 l = lwp_find(p, lwpid);
1880 if (l == NULL) {
1881 mutex_exit(p->p_lock);
1882 return ESRCH;
1883 }
1884 *headp = (void *)l->l_robust_head;
1885 mutex_exit(p->p_lock);
1886 } else {
1887 *headp = (void *)l->l_robust_head;
1888 }
1889 return 0;
1890 }
1891
1892 /*
1893 * futex_fetch_robust_head(uaddr)
1894 *
1895 * Helper routine to fetch the futex robust list head that
1896 * handles 32-bit binaries running on 64-bit kernels.
1897 */
1898 static int
1899 futex_fetch_robust_head(uintptr_t uaddr, u_long *rhead)
1900 {
1901 #ifdef _LP64
1902 if (curproc->p_flag & PK_32) {
1903 uint32_t rhead32[_FUTEX_ROBUST_HEAD_NWORDS];
1904 int error;
1905
1906 error = copyin((void *)uaddr, rhead32, sizeof(rhead32));
1907 if (__predict_true(error == 0)) {
1908 for (int i = 0; i < _FUTEX_ROBUST_HEAD_NWORDS; i++) {
1909 if (i == _FUTEX_ROBUST_HEAD_OFFSET) {
1910 /*
1911 * Make sure the offset is sign-
1912 * extended.
1913 */
1914 rhead[i] = (int32_t)rhead32[i];
1915 } else {
1916 rhead[i] = rhead32[i];
1917 }
1918 }
1919 }
1920 return error;
1921 }
1922 #endif /* _L64 */
1923
1924 return copyin((void *)uaddr, rhead,
1925 sizeof(*rhead) * _FUTEX_ROBUST_HEAD_NWORDS);
1926 }
1927
1928 /*
1929 * futex_decode_robust_word(word)
1930 *
1931 * Decode a robust futex list word into the entry and entry
1932 * properties.
1933 */
1934 static inline void
1935 futex_decode_robust_word(uintptr_t const word, uintptr_t * const entry,
1936 bool * const is_pi)
1937 {
1938 *is_pi = (word & _FUTEX_ROBUST_ENTRY_PI) ? true : false;
1939 *entry = word & ~_FUTEX_ROBUST_ENTRY_PI;
1940 }
1941
1942 /*
1943 * futex_fetch_robust_entry(uaddr)
1944 *
1945 * Helper routine to fetch and decode a robust futex entry
1946 * that handles 32-bit binaries running on 64-bit kernels.
1947 */
1948 static int
1949 futex_fetch_robust_entry(uintptr_t const uaddr, uintptr_t * const valp,
1950 bool * const is_pi)
1951 {
1952 uintptr_t val = 0;
1953 int error = 0;
1954
1955 #ifdef _LP64
1956 if (curproc->p_flag & PK_32) {
1957 uint32_t val32;
1958
1959 error = ufetch_32((uint32_t *)uaddr, &val32);
1960 if (__predict_true(error == 0))
1961 val = val32;
1962 } else
1963 #endif /* _LP64 */
1964 error = ufetch_long((u_long *)uaddr, (u_long *)&val);
1965 if (__predict_false(error))
1966 return error;
1967
1968 futex_decode_robust_word(val, valp, is_pi);
1969 return 0;
1970 }
1971
1972 /*
1973 * futex_release_all_lwp(l, tid)
1974 *
1975 * Release all l's robust futexes. If anything looks funny in
1976 * the process, give up -- it's userland's responsibility to dot
1977 * the i's and cross the t's.
1978 */
1979 void
1980 futex_release_all_lwp(struct lwp * const l)
1981 {
1982 u_long rhead[_FUTEX_ROBUST_HEAD_NWORDS];
1983 int limit = 1000000;
1984 int error;
1985
1986 /* If there's no robust list there's nothing to do. */
1987 if (l->l_robust_head == 0)
1988 return;
1989
1990 KASSERT((l->l_lid & FUTEX_TID_MASK) == l->l_lid);
1991
1992 /* Read the final snapshot of the robust list head. */
1993 error = futex_fetch_robust_head(l->l_robust_head, rhead);
1994 if (error) {
1995 printf("WARNING: pid %jd (%s) lwp %jd:"
1996 " unmapped robust futex list head\n",
1997 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
1998 (uintmax_t)l->l_lid);
1999 return;
2000 }
2001
2002 const long offset = (long)rhead[_FUTEX_ROBUST_HEAD_OFFSET];
2003
2004 uintptr_t next, pending;
2005 bool is_pi, pending_is_pi;
2006
2007 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_LIST],
2008 &next, &is_pi);
2009 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_PENDING],
2010 &pending, &pending_is_pi);
2011
2012 /*
2013 * Walk down the list of locked futexes and release them, up
2014 * to one million of them before we give up.
2015 */
2016
2017 while (next != l->l_robust_head && limit-- > 0) {
2018 /* pending handled below. */
2019 if (next != pending)
2020 release_futex(next + offset, l->l_lid, is_pi, false);
2021 error = futex_fetch_robust_entry(next, &next, &is_pi);
2022 if (error)
2023 break;
2024 preempt_point();
2025 }
2026 if (limit <= 0) {
2027 printf("WARNING: pid %jd (%s) lwp %jd:"
2028 " exhausted robust futex limit\n",
2029 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
2030 (uintmax_t)l->l_lid);
2031 }
2032
2033 /* If there's a pending futex, it may need to be released too. */
2034 if (pending != 0) {
2035 release_futex(pending + offset, l->l_lid, pending_is_pi, true);
2036 }
2037 }
2038