sys_futex.c revision 1.23 1 /* $NetBSD: sys_futex.c,v 1.23 2025/03/05 12:02:00 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_futex.c,v 1.23 2025/03/05 12:02:00 riastradh Exp $");
34
35 /*
36 * Futexes
37 *
38 * The futex system call coordinates notifying threads waiting for
39 * changes on a 32-bit word of memory. The word can be managed by
40 * CPU atomic operations in userland, without system calls, as long
41 * as there is no contention.
42 *
43 * The simplest use case demonstrating the utility is:
44 *
45 * // 32-bit word of memory shared among threads or
46 * // processes in userland. lock & 1 means owned;
47 * // lock & 2 means there are waiters waiting.
48 * volatile int lock = 0;
49 *
50 * int v;
51 *
52 * // Acquire a lock.
53 * do {
54 * v = lock;
55 * if (v & 1) {
56 * // Lock is held. Set a bit to say that
57 * // there are waiters, and wait for lock
58 * // to change to anything other than v;
59 * // then retry.
60 * if (atomic_cas_uint(&lock, v, v | 2) != v)
61 * continue;
62 * futex(FUTEX_WAIT, &lock, v | 2, NULL, NULL, 0);
63 * continue;
64 * }
65 * } while (atomic_cas_uint(&lock, v, v | 1) != v);
66 * membar_acquire();
67 *
68 * ...
69 *
70 * // Release the lock. Optimistically assume there are
71 * // no waiters first until demonstrated otherwise.
72 * membar_release();
73 * if (atomic_cas_uint(&lock, 1, 0) != 1) {
74 * // There may be waiters.
75 * v = atomic_swap_uint(&lock, 0);
76 * // If there are still waiters, wake one.
77 * if (v & 2)
78 * futex(FUTEX_WAKE, &lock, 1, NULL, NULL, 0);
79 * }
80 *
81 * The goal is to avoid the futex system call unless there is
82 * contention; then if there is contention, to guarantee no missed
83 * wakeups.
84 *
85 * For a simple implementation, futex(FUTEX_WAIT) could queue
86 * itself to be woken, double-check the lock word, and then sleep;
87 * spurious wakeups are generally a fact of life, so any
88 * FUTEX_WAKE could just wake every FUTEX_WAIT in the system.
89 *
90 * If this were all there is to it, we could then increase
91 * parallelism by refining the approximation: partition the
92 * waiters into buckets by hashing the lock addresses to reduce
93 * the incidence of spurious wakeups. But this is not all.
94 *
95 * The futex(FUTEX_CMP_REQUEUE, &lock, n, &lock2, m, val)
96 * operation not only wakes n waiters on lock if lock == val, but
97 * also _transfers_ m additional waiters to lock2. Unless wakeups
98 * on lock2 also trigger wakeups on lock, we cannot move waiters
99 * to lock2 if they merely share the same hash as waiters on lock.
100 * Thus, we can't approximately distribute waiters into queues by
101 * a hash function; we must distinguish futex queues exactly by
102 * lock address.
103 *
104 * For now, we use a global red/black tree to index futexes. This
105 * should be replaced by a lockless radix tree with a thread to
106 * free entries no longer in use once all lookups on all CPUs have
107 * completed.
108 *
109 * Specifically, we maintain two maps:
110 *
111 * futex_tab.va[vmspace, va] for private futexes
112 * futex_tab.oa[uvm_voaddr] for shared futexes
113 *
114 * This implementation does not support priority inheritance.
115 */
116
117 #include <sys/param.h>
118 #include <sys/types.h>
119 #include <sys/atomic.h>
120 #include <sys/condvar.h>
121 #include <sys/futex.h>
122 #include <sys/mutex.h>
123 #include <sys/rbtree.h>
124 #include <sys/queue.h>
125
126 #include <sys/syscall.h>
127 #include <sys/syscallargs.h>
128 #include <sys/syscallvar.h>
129
130 #include <uvm/uvm_extern.h>
131
132 /*
133 * Lock order:
134 *
135 * futex_tab.lock
136 * futex::fx_qlock ordered by kva of struct futex
137 * -> futex_wait::fw_lock only one at a time
138 * futex_wait::fw_lock only one at a time
139 * -> futex::fx_abortlock only one at a time
140 */
141
142 /*
143 * union futex_key
144 *
145 * A futex is addressed either by a vmspace+va (private) or by
146 * a uvm_voaddr (shared).
147 */
148 union futex_key {
149 struct {
150 struct vmspace *vmspace;
151 vaddr_t va;
152 } fk_private;
153 struct uvm_voaddr fk_shared;
154 };
155
156 /*
157 * struct futex
158 *
159 * Kernel state for a futex located at a particular address in a
160 * particular virtual address space.
161 *
162 * N.B. fx_refcnt is an unsigned long because we need to be able
163 * to operate on it atomically on all systems while at the same
164 * time rendering practically impossible the chance of it reaching
165 * its max value. In practice, we're limited by the number of LWPs
166 * that can be present on the system at any given time, and the
167 * assumption is that limit will be good enough on a 32-bit platform.
168 * See futex_wake() for why overflow needs to be avoided.
169 */
170 struct futex {
171 union futex_key fx_key;
172 unsigned long fx_refcnt;
173 bool fx_shared;
174 bool fx_on_tree;
175 struct rb_node fx_node;
176
177 kmutex_t fx_qlock;
178 TAILQ_HEAD(, futex_wait) fx_queue;
179
180 kmutex_t fx_abortlock;
181 LIST_HEAD(, futex_wait) fx_abortlist;
182 kcondvar_t fx_abortcv;
183 };
184
185 /*
186 * struct futex_wait
187 *
188 * State for a thread to wait on a futex. Threads wait on fw_cv
189 * for fw_bitset to be set to zero. The thread may transition to
190 * a different futex queue at any time under the futex's lock.
191 */
192 struct futex_wait {
193 kmutex_t fw_lock;
194 kcondvar_t fw_cv;
195 struct futex *fw_futex;
196 TAILQ_ENTRY(futex_wait) fw_entry; /* queue lock */
197 LIST_ENTRY(futex_wait) fw_abort; /* queue abortlock */
198 int fw_bitset;
199 bool fw_aborting; /* fw_lock */
200 };
201
202 /*
203 * futex_tab
204 *
205 * Global trees of futexes by vmspace/va and VM object address.
206 *
207 * XXX This obviously doesn't scale in parallel. We could use a
208 * pserialize-safe data structure, but there may be a high cost to
209 * frequent deletion since we don't cache futexes after we're done
210 * with them. We could use hashed locks. But for now, just make
211 * sure userland can't DoS the serial performance, by using a
212 * balanced binary tree for lookup.
213 *
214 * XXX We could use a per-process tree for the table indexed by
215 * virtual address to reduce contention between processes.
216 */
217 static struct {
218 kmutex_t lock;
219 struct rb_tree va;
220 struct rb_tree oa;
221 } futex_tab __cacheline_aligned;
222
223 static int
224 compare_futex_key(void *cookie, const void *n, const void *k)
225 {
226 const struct futex *fa = n;
227 const union futex_key *fka = &fa->fx_key;
228 const union futex_key *fkb = k;
229
230 if ((uintptr_t)fka->fk_private.vmspace <
231 (uintptr_t)fkb->fk_private.vmspace)
232 return -1;
233 if ((uintptr_t)fka->fk_private.vmspace >
234 (uintptr_t)fkb->fk_private.vmspace)
235 return +1;
236 if (fka->fk_private.va < fkb->fk_private.va)
237 return -1;
238 if (fka->fk_private.va > fkb->fk_private.va)
239 return +1;
240 return 0;
241 }
242
243 static int
244 compare_futex(void *cookie, const void *na, const void *nb)
245 {
246 const struct futex *fa = na;
247 const struct futex *fb = nb;
248
249 return compare_futex_key(cookie, fa, &fb->fx_key);
250 }
251
252 static const rb_tree_ops_t futex_rb_ops = {
253 .rbto_compare_nodes = compare_futex,
254 .rbto_compare_key = compare_futex_key,
255 .rbto_node_offset = offsetof(struct futex, fx_node),
256 };
257
258 static int
259 compare_futex_shared_key(void *cookie, const void *n, const void *k)
260 {
261 const struct futex *fa = n;
262 const union futex_key *fka = &fa->fx_key;
263 const union futex_key *fkb = k;
264
265 return uvm_voaddr_compare(&fka->fk_shared, &fkb->fk_shared);
266 }
267
268 static int
269 compare_futex_shared(void *cookie, const void *na, const void *nb)
270 {
271 const struct futex *fa = na;
272 const struct futex *fb = nb;
273
274 return compare_futex_shared_key(cookie, fa, &fb->fx_key);
275 }
276
277 static const rb_tree_ops_t futex_shared_rb_ops = {
278 .rbto_compare_nodes = compare_futex_shared,
279 .rbto_compare_key = compare_futex_shared_key,
280 .rbto_node_offset = offsetof(struct futex, fx_node),
281 };
282
283 static void futex_wait_dequeue(struct futex_wait *, struct futex *);
284
285 /*
286 * futex_load(uaddr, kaddr)
287 *
288 * Perform a single atomic load to read *uaddr, and return the
289 * result in *kaddr. Return 0 on success, EFAULT if uaddr is not
290 * mapped.
291 */
292 static inline int
293 futex_load(int *uaddr, int *kaddr)
294 {
295 return ufetch_int((u_int *)uaddr, (u_int *)kaddr);
296 }
297
298 /*
299 * futex_test(uaddr, expected)
300 *
301 * True if *uaddr == expected. False if *uaddr != expected, or if
302 * uaddr is not mapped.
303 */
304 static bool
305 futex_test(int *uaddr, int expected)
306 {
307 int val;
308 int error;
309
310 error = futex_load(uaddr, &val);
311 if (error)
312 return false;
313 return val == expected;
314 }
315
316 /*
317 * futex_sys_init()
318 *
319 * Initialize the futex subsystem.
320 */
321 void
322 futex_sys_init(void)
323 {
324
325 mutex_init(&futex_tab.lock, MUTEX_DEFAULT, IPL_NONE);
326 rb_tree_init(&futex_tab.va, &futex_rb_ops);
327 rb_tree_init(&futex_tab.oa, &futex_shared_rb_ops);
328 }
329
330 /*
331 * futex_sys_fini()
332 *
333 * Finalize the futex subsystem.
334 */
335 void
336 futex_sys_fini(void)
337 {
338
339 KASSERT(RB_TREE_MIN(&futex_tab.oa) == NULL);
340 KASSERT(RB_TREE_MIN(&futex_tab.va) == NULL);
341 mutex_destroy(&futex_tab.lock);
342 }
343
344 /*
345 * futex_queue_init(f)
346 *
347 * Initialize the futex queue. Caller must call futex_queue_fini
348 * when done.
349 *
350 * Never sleeps.
351 */
352 static void
353 futex_queue_init(struct futex *f)
354 {
355
356 mutex_init(&f->fx_qlock, MUTEX_DEFAULT, IPL_NONE);
357 mutex_init(&f->fx_abortlock, MUTEX_DEFAULT, IPL_NONE);
358 cv_init(&f->fx_abortcv, "fqabort");
359 LIST_INIT(&f->fx_abortlist);
360 TAILQ_INIT(&f->fx_queue);
361 }
362
363 /*
364 * futex_queue_drain(f)
365 *
366 * Wait for any aborting waiters in f; then empty the queue of
367 * any stragglers and wake them. Caller must guarantee no new
368 * references to f.
369 *
370 * May sleep.
371 */
372 static void
373 futex_queue_drain(struct futex *f)
374 {
375 struct futex_wait *fw, *fw_next;
376
377 mutex_enter(&f->fx_abortlock);
378 while (!LIST_EMPTY(&f->fx_abortlist))
379 cv_wait(&f->fx_abortcv, &f->fx_abortlock);
380 mutex_exit(&f->fx_abortlock);
381
382 mutex_enter(&f->fx_qlock);
383 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
384 mutex_enter(&fw->fw_lock);
385 futex_wait_dequeue(fw, f);
386 cv_broadcast(&fw->fw_cv);
387 mutex_exit(&fw->fw_lock);
388 }
389 mutex_exit(&f->fx_qlock);
390 }
391
392 /*
393 * futex_queue_fini(fq)
394 *
395 * Finalize the futex queue initialized by futex_queue_init. Queue
396 * must be empty. Caller must not use f again until a subsequent
397 * futex_queue_init.
398 */
399 static void
400 futex_queue_fini(struct futex *f)
401 {
402
403 KASSERT(TAILQ_EMPTY(&f->fx_queue));
404 KASSERT(LIST_EMPTY(&f->fx_abortlist));
405 mutex_destroy(&f->fx_qlock);
406 mutex_destroy(&f->fx_abortlock);
407 cv_destroy(&f->fx_abortcv);
408 }
409
410 /*
411 * futex_key_init(key, vm, va, shared)
412 *
413 * Initialize a futex key for lookup, etc.
414 */
415 static int
416 futex_key_init(union futex_key *fk, struct vmspace *vm, vaddr_t va, bool shared)
417 {
418 int error = 0;
419
420 if (__predict_false(shared)) {
421 if (!uvm_voaddr_acquire(&vm->vm_map, va, &fk->fk_shared))
422 error = EFAULT;
423 } else {
424 fk->fk_private.vmspace = vm;
425 fk->fk_private.va = va;
426 }
427
428 return error;
429 }
430
431 /*
432 * futex_key_fini(key, shared)
433 *
434 * Release a futex key.
435 */
436 static void
437 futex_key_fini(union futex_key *fk, bool shared)
438 {
439 if (__predict_false(shared))
440 uvm_voaddr_release(&fk->fk_shared);
441 memset(fk, 0, sizeof(*fk));
442 }
443
444 /*
445 * futex_create(fk, shared)
446 *
447 * Create a futex. Initial reference count is 1, representing the
448 * caller. Returns NULL on failure. Always takes ownership of the
449 * key, either transferring it to the newly-created futex, or releasing
450 * the key if creation fails.
451 *
452 * Never sleeps for memory, but may sleep to acquire a lock.
453 */
454 static struct futex *
455 futex_create(union futex_key *fk, bool shared)
456 {
457 struct futex *f;
458
459 f = kmem_alloc(sizeof(*f), KM_NOSLEEP);
460 if (f == NULL) {
461 futex_key_fini(fk, shared);
462 return NULL;
463 }
464 f->fx_key = *fk;
465 f->fx_refcnt = 1;
466 f->fx_shared = shared;
467 f->fx_on_tree = false;
468 futex_queue_init(f);
469
470 return f;
471 }
472
473 /*
474 * futex_destroy(f)
475 *
476 * Destroy a futex created with futex_create. Reference count
477 * must be zero.
478 *
479 * May sleep.
480 */
481 static void
482 futex_destroy(struct futex *f)
483 {
484
485 ASSERT_SLEEPABLE();
486
487 KASSERT(atomic_load_relaxed(&f->fx_refcnt) == 0);
488 KASSERT(!f->fx_on_tree);
489
490 /* Drain and destroy the private queue. */
491 futex_queue_drain(f);
492 futex_queue_fini(f);
493
494 futex_key_fini(&f->fx_key, f->fx_shared);
495
496 kmem_free(f, sizeof(*f));
497 }
498
499 /*
500 * futex_hold(f)
501 *
502 * Attempt to acquire a reference to f. Return 0 on success,
503 * ENFILE on too many references.
504 *
505 * Never sleeps.
506 */
507 static int
508 futex_hold(struct futex *f)
509 {
510 unsigned long refcnt;
511
512 do {
513 refcnt = atomic_load_relaxed(&f->fx_refcnt);
514 if (refcnt == ULONG_MAX)
515 return ENFILE;
516 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt + 1) != refcnt);
517
518 return 0;
519 }
520
521 /*
522 * futex_rele(f)
523 *
524 * Release a reference to f acquired with futex_create or
525 * futex_hold.
526 *
527 * May sleep to free f.
528 */
529 static void
530 futex_rele(struct futex *f)
531 {
532 unsigned long refcnt;
533
534 ASSERT_SLEEPABLE();
535
536 do {
537 refcnt = atomic_load_relaxed(&f->fx_refcnt);
538 if (refcnt == 1)
539 goto trylast;
540 membar_release();
541 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
542 return;
543
544 trylast:
545 mutex_enter(&futex_tab.lock);
546 if (atomic_dec_ulong_nv(&f->fx_refcnt) == 0) {
547 membar_acquire();
548 if (f->fx_on_tree) {
549 if (__predict_false(f->fx_shared))
550 rb_tree_remove_node(&futex_tab.oa, f);
551 else
552 rb_tree_remove_node(&futex_tab.va, f);
553 f->fx_on_tree = false;
554 }
555 } else {
556 /* References remain -- don't destroy it. */
557 f = NULL;
558 }
559 mutex_exit(&futex_tab.lock);
560 if (f != NULL)
561 futex_destroy(f);
562 }
563
564 /*
565 * futex_rele_not_last(f)
566 *
567 * Release a reference to f acquired with futex_create or
568 * futex_hold.
569 *
570 * This version asserts that we are not dropping the last
571 * reference to f.
572 */
573 static void
574 futex_rele_not_last(struct futex *f)
575 {
576 unsigned long refcnt;
577
578 do {
579 refcnt = atomic_load_relaxed(&f->fx_refcnt);
580 KASSERT(refcnt > 1);
581 } while (atomic_cas_ulong(&f->fx_refcnt, refcnt, refcnt - 1) != refcnt);
582 }
583
584 /*
585 * futex_lookup_by_key(key, shared, &f)
586 *
587 * Try to find an existing futex va reference in the specified key
588 * On success, return 0, set f to found futex or to NULL if not found,
589 * and increment f's reference count if found.
590 *
591 * Return ENFILE if reference count too high.
592 *
593 * Internal lookup routine shared by futex_lookup() and
594 * futex_lookup_create().
595 */
596 static int
597 futex_lookup_by_key(union futex_key *fk, bool shared, struct futex **fp)
598 {
599 struct futex *f;
600 int error = 0;
601
602 mutex_enter(&futex_tab.lock);
603 if (__predict_false(shared)) {
604 f = rb_tree_find_node(&futex_tab.oa, fk);
605 } else {
606 f = rb_tree_find_node(&futex_tab.va, fk);
607 }
608 if (f) {
609 error = futex_hold(f);
610 if (error)
611 f = NULL;
612 }
613 *fp = f;
614 mutex_exit(&futex_tab.lock);
615
616 return error;
617 }
618
619 /*
620 * futex_insert(f, fp)
621 *
622 * Try to insert the futex f into the tree by va. If there
623 * already is a futex for its va, acquire a reference to it, and
624 * store it in *fp; otherwise store f in *fp.
625 *
626 * Return 0 on success, ENFILE if there already is a futex but its
627 * reference count is too high.
628 */
629 static int
630 futex_insert(struct futex *f, struct futex **fp)
631 {
632 struct futex *f0;
633 int error;
634
635 KASSERT(atomic_load_relaxed(&f->fx_refcnt) != 0);
636 KASSERT(!f->fx_on_tree);
637
638 mutex_enter(&futex_tab.lock);
639 if (__predict_false(f->fx_shared))
640 f0 = rb_tree_insert_node(&futex_tab.oa, f);
641 else
642 f0 = rb_tree_insert_node(&futex_tab.va, f);
643 if (f0 == f) {
644 f->fx_on_tree = true;
645 error = 0;
646 } else {
647 KASSERT(atomic_load_relaxed(&f0->fx_refcnt) != 0);
648 KASSERT(f0->fx_on_tree);
649 error = futex_hold(f0);
650 if (error)
651 goto out;
652 }
653 *fp = f0;
654 out: mutex_exit(&futex_tab.lock);
655
656 return error;
657 }
658
659 /*
660 * futex_lookup(uaddr, shared, &f)
661 *
662 * Find a futex at the userland pointer uaddr in the current
663 * process's VM space. On success, return the futex in f and
664 * increment its reference count.
665 *
666 * Caller must call futex_rele when done.
667 */
668 static int
669 futex_lookup(int *uaddr, bool shared, struct futex **fp)
670 {
671 union futex_key fk;
672 struct vmspace *vm = curproc->p_vmspace;
673 vaddr_t va = (vaddr_t)uaddr;
674 int error;
675
676 /*
677 * Reject unaligned user pointers so we don't cross page
678 * boundaries and so atomics will work.
679 */
680 if ((va & 3) != 0)
681 return EINVAL;
682
683 /* Look it up. */
684 error = futex_key_init(&fk, vm, va, shared);
685 if (error)
686 return error;
687
688 error = futex_lookup_by_key(&fk, shared, fp);
689 futex_key_fini(&fk, shared);
690 if (error)
691 return error;
692
693 KASSERT(*fp == NULL || (*fp)->fx_shared == shared);
694 KASSERT(*fp == NULL || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
695
696 /*
697 * Success! (Caller must still check whether we found
698 * anything, but nothing went _wrong_ like trying to use
699 * unmapped memory.)
700 */
701 KASSERT(error == 0);
702
703 return error;
704 }
705
706 /*
707 * futex_lookup_create(uaddr, shared, &f)
708 *
709 * Find or create a futex at the userland pointer uaddr in the
710 * current process's VM space. On success, return the futex in f
711 * and increment its reference count.
712 *
713 * Caller must call futex_rele when done.
714 */
715 static int
716 futex_lookup_create(int *uaddr, bool shared, struct futex **fp)
717 {
718 union futex_key fk;
719 struct vmspace *vm = curproc->p_vmspace;
720 struct futex *f = NULL;
721 vaddr_t va = (vaddr_t)uaddr;
722 int error;
723
724 /*
725 * Reject unaligned user pointers so we don't cross page
726 * boundaries and so atomics will work.
727 */
728 if ((va & 3) != 0)
729 return EINVAL;
730
731 error = futex_key_init(&fk, vm, va, shared);
732 if (error)
733 return error;
734
735 /*
736 * Optimistically assume there already is one, and try to find
737 * it.
738 */
739 error = futex_lookup_by_key(&fk, shared, fp);
740 if (error || *fp != NULL) {
741 /*
742 * We either found one, or there was an error.
743 * In either case, we are done with the key.
744 */
745 futex_key_fini(&fk, shared);
746 goto out;
747 }
748
749 /*
750 * Create a futex record. This transfers ownership of the key
751 * in all cases.
752 */
753 f = futex_create(&fk, shared);
754 if (f == NULL) {
755 error = ENOMEM;
756 goto out;
757 }
758
759 /*
760 * Insert our new futex, or use existing if someone else beat
761 * us to it.
762 */
763 error = futex_insert(f, fp);
764 if (error)
765 goto out;
766 if (*fp == f)
767 f = NULL; /* don't release on exit */
768
769 /* Success! */
770 KASSERT(error == 0);
771
772 out: if (f != NULL)
773 futex_rele(f);
774 KASSERT(error || *fp != NULL);
775 KASSERT(error || atomic_load_relaxed(&(*fp)->fx_refcnt) != 0);
776 return error;
777 }
778
779 /*
780 * futex_wait_init(fw, bitset)
781 *
782 * Initialize a record for a thread to wait on a futex matching
783 * the specified bit set. Should be passed to futex_wait_enqueue
784 * before futex_wait, and should be passed to futex_wait_fini when
785 * done.
786 */
787 static void
788 futex_wait_init(struct futex_wait *fw, int bitset)
789 {
790
791 KASSERT(bitset);
792
793 mutex_init(&fw->fw_lock, MUTEX_DEFAULT, IPL_NONE);
794 cv_init(&fw->fw_cv, "futex");
795 fw->fw_futex = NULL;
796 fw->fw_bitset = bitset;
797 fw->fw_aborting = false;
798 }
799
800 /*
801 * futex_wait_fini(fw)
802 *
803 * Finalize a record for a futex waiter. Must not be on any
804 * futex's queue.
805 */
806 static void
807 futex_wait_fini(struct futex_wait *fw)
808 {
809
810 KASSERT(fw->fw_futex == NULL);
811
812 cv_destroy(&fw->fw_cv);
813 mutex_destroy(&fw->fw_lock);
814 }
815
816 /*
817 * futex_wait_enqueue(fw, f)
818 *
819 * Put fw on the futex queue. Must be done before futex_wait.
820 * Caller must hold fw's lock and f's lock, and fw must not be on
821 * any existing futex's waiter list.
822 */
823 static void
824 futex_wait_enqueue(struct futex_wait *fw, struct futex *f)
825 {
826
827 KASSERT(mutex_owned(&f->fx_qlock));
828 KASSERT(mutex_owned(&fw->fw_lock));
829 KASSERT(fw->fw_futex == NULL);
830 KASSERT(!fw->fw_aborting);
831
832 fw->fw_futex = f;
833 TAILQ_INSERT_TAIL(&f->fx_queue, fw, fw_entry);
834 }
835
836 /*
837 * futex_wait_dequeue(fw, f)
838 *
839 * Remove fw from the futex queue. Precludes subsequent
840 * futex_wait until a futex_wait_enqueue. Caller must hold fw's
841 * lock and f's lock, and fw must be on f.
842 */
843 static void
844 futex_wait_dequeue(struct futex_wait *fw, struct futex *f)
845 {
846
847 KASSERT(mutex_owned(&f->fx_qlock));
848 KASSERT(mutex_owned(&fw->fw_lock));
849 KASSERT(fw->fw_futex == f);
850
851 TAILQ_REMOVE(&f->fx_queue, fw, fw_entry);
852 fw->fw_futex = NULL;
853 }
854
855 /*
856 * futex_wait_abort(fw)
857 *
858 * Caller is no longer waiting for fw. Remove it from any queue
859 * if it was on one. Caller must hold fw->fw_lock.
860 */
861 static void
862 futex_wait_abort(struct futex_wait *fw)
863 {
864 struct futex *f;
865
866 KASSERT(mutex_owned(&fw->fw_lock));
867
868 /*
869 * Grab the futex queue. It can't go away as long as we hold
870 * fw_lock. However, we can't take the queue lock because
871 * that's a lock order reversal.
872 */
873 f = fw->fw_futex;
874
875 /* Put us on the abort list so that fq won't go away. */
876 mutex_enter(&f->fx_abortlock);
877 LIST_INSERT_HEAD(&f->fx_abortlist, fw, fw_abort);
878 mutex_exit(&f->fx_abortlock);
879
880 /*
881 * Mark fw as aborting so it won't lose wakeups and won't be
882 * transferred to any other queue.
883 */
884 fw->fw_aborting = true;
885
886 /* f is now stable, so we can release fw_lock. */
887 mutex_exit(&fw->fw_lock);
888
889 /* Now we can remove fw under the queue lock. */
890 mutex_enter(&f->fx_qlock);
891 mutex_enter(&fw->fw_lock);
892 futex_wait_dequeue(fw, f);
893 mutex_exit(&fw->fw_lock);
894 mutex_exit(&f->fx_qlock);
895
896 /*
897 * Finally, remove us from the abort list and notify anyone
898 * waiting for the abort to complete if we were the last to go.
899 */
900 mutex_enter(&f->fx_abortlock);
901 LIST_REMOVE(fw, fw_abort);
902 if (LIST_EMPTY(&f->fx_abortlist))
903 cv_broadcast(&f->fx_abortcv);
904 mutex_exit(&f->fx_abortlock);
905
906 /*
907 * Release our reference to the futex now that we are not
908 * waiting for it.
909 */
910 futex_rele(f);
911
912 /*
913 * Reacquire the fw lock as caller expects. Verify that we're
914 * aborting and no longer associated with a futex.
915 */
916 mutex_enter(&fw->fw_lock);
917 KASSERT(fw->fw_aborting);
918 KASSERT(fw->fw_futex == NULL);
919 }
920
921 /*
922 * futex_wait(fw, deadline, clkid)
923 *
924 * fw must be a waiter on a futex's queue. Wait until deadline on
925 * the clock clkid, or forever if deadline is NULL, for a futex
926 * wakeup. Return 0 on explicit wakeup or destruction of futex,
927 * ETIMEDOUT on timeout, EINTR/ERESTART on signal. Either way, fw
928 * will no longer be on a futex queue on return.
929 */
930 static int
931 futex_wait(struct futex_wait *fw, const struct timespec *deadline,
932 clockid_t clkid)
933 {
934 int error = 0;
935
936 /* Test and wait under the wait lock. */
937 mutex_enter(&fw->fw_lock);
938
939 for (;;) {
940 /* If we're done yet, stop and report success. */
941 if (fw->fw_bitset == 0 || fw->fw_futex == NULL) {
942 error = 0;
943 break;
944 }
945
946 /* If anything went wrong in the last iteration, stop. */
947 if (error)
948 break;
949
950 /* Not done yet. Wait. */
951 if (deadline) {
952 struct timespec ts;
953
954 /* Check our watch. */
955 error = clock_gettime1(clkid, &ts);
956 if (error)
957 break;
958
959 /* If we're past the deadline, ETIMEDOUT. */
960 if (timespeccmp(deadline, &ts, <=)) {
961 error = ETIMEDOUT;
962 break;
963 }
964
965 /* Count how much time is left. */
966 timespecsub(deadline, &ts, &ts);
967
968 /* Wait for that much time, allowing signals. */
969 error = cv_timedwait_sig(&fw->fw_cv, &fw->fw_lock,
970 tstohz(&ts));
971 } else {
972 /* Wait indefinitely, allowing signals. */
973 error = cv_wait_sig(&fw->fw_cv, &fw->fw_lock);
974 }
975 }
976
977 /*
978 * If we were woken up, the waker will have removed fw from the
979 * queue. But if anything went wrong, we must remove fw from
980 * the queue ourselves. While here, convert EWOULDBLOCK to
981 * ETIMEDOUT.
982 */
983 if (error) {
984 futex_wait_abort(fw);
985 if (error == EWOULDBLOCK)
986 error = ETIMEDOUT;
987 }
988
989 mutex_exit(&fw->fw_lock);
990
991 return error;
992 }
993
994 /*
995 * futex_wake(f, nwake, f2, nrequeue, bitset)
996 *
997 * Wake up to nwake waiters on f matching bitset; then, if f2 is
998 * provided, move up to nrequeue remaining waiters on f matching
999 * bitset to f2. Return the number of waiters actually woken or
1000 * requeued. Caller must hold the locks of f and f2, if provided.
1001 */
1002 static unsigned
1003 futex_wake(struct futex *f, unsigned nwake, struct futex *f2,
1004 unsigned nrequeue, int bitset)
1005 {
1006 struct futex_wait *fw, *fw_next;
1007 unsigned nwoken_or_requeued = 0;
1008 int hold_error __diagused;
1009
1010 KASSERT(mutex_owned(&f->fx_qlock));
1011 KASSERT(f2 == NULL || mutex_owned(&f2->fx_qlock));
1012
1013 /* Wake up to nwake waiters, and count the number woken. */
1014 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1015 if ((fw->fw_bitset & bitset) == 0)
1016 continue;
1017 if (nwake > 0) {
1018 mutex_enter(&fw->fw_lock);
1019 if (__predict_false(fw->fw_aborting)) {
1020 mutex_exit(&fw->fw_lock);
1021 continue;
1022 }
1023 futex_wait_dequeue(fw, f);
1024 fw->fw_bitset = 0;
1025 cv_broadcast(&fw->fw_cv);
1026 mutex_exit(&fw->fw_lock);
1027 nwake--;
1028 nwoken_or_requeued++;
1029 /*
1030 * Drop the futex reference on behalf of the
1031 * waiter. We assert this is not the last
1032 * reference on the futex (our caller should
1033 * also have one).
1034 */
1035 futex_rele_not_last(f);
1036 } else {
1037 break;
1038 }
1039 }
1040
1041 if (f2) {
1042 /* Move up to nrequeue waiters from f's queue to f2's queue. */
1043 TAILQ_FOREACH_SAFE(fw, &f->fx_queue, fw_entry, fw_next) {
1044 if ((fw->fw_bitset & bitset) == 0)
1045 continue;
1046 if (nrequeue > 0) {
1047 mutex_enter(&fw->fw_lock);
1048 if (__predict_false(fw->fw_aborting)) {
1049 mutex_exit(&fw->fw_lock);
1050 continue;
1051 }
1052 futex_wait_dequeue(fw, f);
1053 futex_wait_enqueue(fw, f2);
1054 mutex_exit(&fw->fw_lock);
1055 nrequeue--;
1056 /*
1057 * PR kern/59004: Missing constant for upper
1058 * bound on systemwide number of lwps
1059 */
1060 KASSERT(nwoken_or_requeued <
1061 MIN(PID_MAX*MAXMAXLWP, FUTEX_TID_MASK));
1062 __CTASSERT(UINT_MAX >=
1063 MIN(PID_MAX*MAXMAXLWP, FUTEX_TID_MASK));
1064 if (++nwoken_or_requeued == 0) /* paranoia */
1065 nwoken_or_requeued = UINT_MAX;
1066 /*
1067 * Transfer the reference from f to f2.
1068 * As above, we assert that we are not
1069 * dropping the last reference to f here.
1070 *
1071 * XXX futex_hold() could theoretically
1072 * XXX fail here.
1073 */
1074 futex_rele_not_last(f);
1075 hold_error = futex_hold(f2);
1076 KASSERT(hold_error == 0);
1077 } else {
1078 break;
1079 }
1080 }
1081 } else {
1082 KASSERT(nrequeue == 0);
1083 }
1084
1085 /* Return the number of waiters woken or requeued. */
1086 return nwoken_or_requeued;
1087 }
1088
1089 /*
1090 * futex_queue_lock(f)
1091 *
1092 * Acquire the queue lock of f. Pair with futex_queue_unlock. Do
1093 * not use if caller needs to acquire two locks; use
1094 * futex_queue_lock2 instead.
1095 */
1096 static void
1097 futex_queue_lock(struct futex *f)
1098 {
1099 mutex_enter(&f->fx_qlock);
1100 }
1101
1102 /*
1103 * futex_queue_unlock(f)
1104 *
1105 * Release the queue lock of f.
1106 */
1107 static void
1108 futex_queue_unlock(struct futex *f)
1109 {
1110 mutex_exit(&f->fx_qlock);
1111 }
1112
1113 /*
1114 * futex_queue_lock2(f, f2)
1115 *
1116 * Acquire the queue locks of both f and f2, which may be null, or
1117 * which may have the same underlying queue. If they are
1118 * distinct, an arbitrary total order is chosen on the locks.
1119 *
1120 * Callers should only ever acquire multiple queue locks
1121 * simultaneously using futex_queue_lock2.
1122 */
1123 static void
1124 futex_queue_lock2(struct futex *f, struct futex *f2)
1125 {
1126
1127 /*
1128 * If both are null, do nothing; if one is null and the other
1129 * is not, lock the other and be done with it.
1130 */
1131 if (f == NULL && f2 == NULL) {
1132 return;
1133 } else if (f == NULL) {
1134 mutex_enter(&f2->fx_qlock);
1135 return;
1136 } else if (f2 == NULL) {
1137 mutex_enter(&f->fx_qlock);
1138 return;
1139 }
1140
1141 /* If both futexes are the same, acquire only one. */
1142 if (f == f2) {
1143 mutex_enter(&f->fx_qlock);
1144 return;
1145 }
1146
1147 /* Otherwise, use the ordering on the kva of the futex pointer. */
1148 if ((uintptr_t)f < (uintptr_t)f2) {
1149 mutex_enter(&f->fx_qlock);
1150 mutex_enter(&f2->fx_qlock);
1151 } else {
1152 mutex_enter(&f2->fx_qlock);
1153 mutex_enter(&f->fx_qlock);
1154 }
1155 }
1156
1157 /*
1158 * futex_queue_unlock2(f, f2)
1159 *
1160 * Release the queue locks of both f and f2, which may be null, or
1161 * which may have the same underlying queue.
1162 */
1163 static void
1164 futex_queue_unlock2(struct futex *f, struct futex *f2)
1165 {
1166
1167 /*
1168 * If both are null, do nothing; if one is null and the other
1169 * is not, unlock the other and be done with it.
1170 */
1171 if (f == NULL && f2 == NULL) {
1172 return;
1173 } else if (f == NULL) {
1174 mutex_exit(&f2->fx_qlock);
1175 return;
1176 } else if (f2 == NULL) {
1177 mutex_exit(&f->fx_qlock);
1178 return;
1179 }
1180
1181 /* If both futexes are the same, release only one. */
1182 if (f == f2) {
1183 mutex_exit(&f->fx_qlock);
1184 return;
1185 }
1186
1187 /* Otherwise, use the ordering on the kva of the futex pointer. */
1188 if ((uintptr_t)f < (uintptr_t)f2) {
1189 mutex_exit(&f2->fx_qlock);
1190 mutex_exit(&f->fx_qlock);
1191 } else {
1192 mutex_exit(&f->fx_qlock);
1193 mutex_exit(&f2->fx_qlock);
1194 }
1195 }
1196
1197 /*
1198 * futex_func_wait(uaddr, val, val3, timeout, clkid, clkflags, retval)
1199 *
1200 * Implement futex(FUTEX_WAIT).
1201 */
1202 static int
1203 futex_func_wait(bool shared, int *uaddr, int val, int val3,
1204 const struct timespec *timeout, clockid_t clkid, int clkflags,
1205 register_t *retval)
1206 {
1207 struct futex *f;
1208 struct futex_wait wait, *fw = &wait;
1209 struct timespec ts;
1210 const struct timespec *deadline;
1211 int error;
1212
1213 /*
1214 * If there's nothing to wait for, and nobody will ever wake
1215 * us, then don't set anything up to wait -- just stop here.
1216 */
1217 if (val3 == 0)
1218 return EINVAL;
1219
1220 /* Optimistically test before anything else. */
1221 if (!futex_test(uaddr, val))
1222 return EAGAIN;
1223
1224 /* Determine a deadline on the specified clock. */
1225 if (timeout == NULL || (clkflags & TIMER_ABSTIME) == TIMER_ABSTIME) {
1226 deadline = timeout;
1227 } else {
1228 error = clock_gettime1(clkid, &ts);
1229 if (error)
1230 return error;
1231 timespecadd(&ts, timeout, &ts);
1232 deadline = &ts;
1233 }
1234
1235 /* Get the futex, creating it if necessary. */
1236 error = futex_lookup_create(uaddr, shared, &f);
1237 if (error)
1238 return error;
1239 KASSERT(f);
1240
1241 /* Get ready to wait. */
1242 futex_wait_init(fw, val3);
1243
1244 /*
1245 * Under the queue lock, check the value again: if it has
1246 * already changed, EAGAIN; otherwise enqueue the waiter.
1247 * Since FUTEX_WAKE will use the same lock and be done after
1248 * modifying the value, the order in which we check and enqueue
1249 * is immaterial.
1250 */
1251 futex_queue_lock(f);
1252 if (!futex_test(uaddr, val)) {
1253 futex_queue_unlock(f);
1254 error = EAGAIN;
1255 goto out;
1256 }
1257 mutex_enter(&fw->fw_lock);
1258 futex_wait_enqueue(fw, f);
1259 mutex_exit(&fw->fw_lock);
1260 futex_queue_unlock(f);
1261
1262 /*
1263 * We cannot drop our reference to the futex here, because
1264 * we might be enqueued on a different one when we are awakened.
1265 * The references will be managed on our behalf in the requeue
1266 * and wake cases.
1267 */
1268 f = NULL;
1269
1270 /* Wait. */
1271 error = futex_wait(fw, deadline, clkid);
1272 if (error)
1273 goto out;
1274
1275 /* Return 0 on success, error on failure. */
1276 *retval = 0;
1277
1278 out: if (f != NULL)
1279 futex_rele(f);
1280 futex_wait_fini(fw);
1281 return error;
1282 }
1283
1284 /*
1285 * futex_func_wake(uaddr, val, val3, retval)
1286 *
1287 * Implement futex(FUTEX_WAKE) and futex(FUTEX_WAKE_BITSET).
1288 */
1289 static int
1290 futex_func_wake(bool shared, int *uaddr, int val, int val3, register_t *retval)
1291 {
1292 struct futex *f;
1293 unsigned int nwoken = 0;
1294 int error = 0;
1295
1296 /* Reject negative number of wakeups. */
1297 if (val < 0) {
1298 error = EINVAL;
1299 goto out;
1300 }
1301
1302 /* Look up the futex, if any. */
1303 error = futex_lookup(uaddr, shared, &f);
1304 if (error)
1305 goto out;
1306
1307 /* If there's no futex, there are no waiters to wake. */
1308 if (f == NULL)
1309 goto out;
1310
1311 /*
1312 * Under f's queue lock, wake the waiters and remember the
1313 * number woken.
1314 */
1315 futex_queue_lock(f);
1316 nwoken = futex_wake(f, /*nwake*/val, NULL, /*nrequeue*/0,
1317 /*bitset*/val3);
1318 futex_queue_unlock(f);
1319
1320 /* Release the futex. */
1321 futex_rele(f);
1322
1323 out:
1324 /* Return the number of waiters woken. */
1325 *retval = nwoken;
1326
1327 /* Success! */
1328 return error;
1329 }
1330
1331 /*
1332 * futex_func_requeue(op, uaddr, val, uaddr2, val2, val3, retval)
1333 *
1334 * Implement futex(FUTEX_REQUEUE) and futex(FUTEX_CMP_REQUEUE).
1335 */
1336 static int
1337 futex_func_requeue(bool shared, int op, int *uaddr, int val, int *uaddr2,
1338 int val2, int val3, register_t *retval)
1339 {
1340 struct futex *f = NULL, *f2 = NULL;
1341 unsigned nwoken_or_requeued = 0; /* default to zero on early return */
1342 int error;
1343
1344 /* Reject negative number of wakeups or requeues. */
1345 if (val < 0 || val2 < 0) {
1346 error = EINVAL;
1347 goto out;
1348 }
1349
1350 /*
1351 * Look up or create the source futex. For FUTEX_CMP_REQUEUE,
1352 * we always create it, rather than bail if it has no waiters,
1353 * because FUTEX_CMP_REQUEUE always tests the futex word in
1354 * order to report EAGAIN.
1355 */
1356 error = (op == FUTEX_CMP_REQUEUE
1357 ? futex_lookup_create(uaddr, shared, &f)
1358 : futex_lookup(uaddr, shared, &f));
1359 if (error)
1360 goto out;
1361
1362 /* If there is none for FUTEX_REQUEUE, nothing to do. */
1363 if (f == NULL) {
1364 KASSERT(op != FUTEX_CMP_REQUEUE);
1365 goto out;
1366 }
1367
1368 /*
1369 * We may need to create the destination futex because it's
1370 * entirely possible it does not currently have any waiters.
1371 */
1372 error = futex_lookup_create(uaddr2, shared, &f2);
1373 if (error)
1374 goto out;
1375
1376 /*
1377 * Under the futexes' queue locks, check the value; if
1378 * unchanged from val3, wake the waiters.
1379 */
1380 futex_queue_lock2(f, f2);
1381 if (op == FUTEX_CMP_REQUEUE && !futex_test(uaddr, val3)) {
1382 error = EAGAIN;
1383 } else {
1384 error = 0;
1385 nwoken_or_requeued = futex_wake(f, /*nwake*/val,
1386 f2, /*nrequeue*/val2,
1387 FUTEX_BITSET_MATCH_ANY);
1388 }
1389 futex_queue_unlock2(f, f2);
1390
1391 out:
1392 /* Return the number of waiters woken or requeued. */
1393 *retval = nwoken_or_requeued;
1394
1395 /* Release the futexes if we got them. */
1396 if (f2)
1397 futex_rele(f2);
1398 if (f)
1399 futex_rele(f);
1400 return error;
1401 }
1402
1403 /*
1404 * futex_opcmp_arg(arg)
1405 *
1406 * arg is either the oparg or cmparg field of a FUTEX_WAKE_OP
1407 * operation, a 12-bit string in either case. Map it to a numeric
1408 * argument value by sign-extending it in two's-complement
1409 * representation.
1410 */
1411 static int
1412 futex_opcmp_arg(int arg)
1413 {
1414
1415 KASSERT(arg == (arg & __BITS(11,0)));
1416 return arg - 0x1000*__SHIFTOUT(arg, __BIT(11));
1417 }
1418
1419 /*
1420 * futex_validate_op_cmp(val3)
1421 *
1422 * Validate an op/cmp argument for FUTEX_WAKE_OP.
1423 */
1424 static int
1425 futex_validate_op_cmp(int val3)
1426 {
1427 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1428 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1429
1430 if (op & FUTEX_OP_OPARG_SHIFT) {
1431 int oparg =
1432 futex_opcmp_arg(__SHIFTOUT(val3, FUTEX_OP_OPARG_MASK));
1433 if (oparg < 0)
1434 return EINVAL;
1435 if (oparg >= 32)
1436 return EINVAL;
1437 op &= ~FUTEX_OP_OPARG_SHIFT;
1438 }
1439
1440 switch (op) {
1441 case FUTEX_OP_SET:
1442 case FUTEX_OP_ADD:
1443 case FUTEX_OP_OR:
1444 case FUTEX_OP_ANDN:
1445 case FUTEX_OP_XOR:
1446 break;
1447 default:
1448 return EINVAL;
1449 }
1450
1451 switch (cmp) {
1452 case FUTEX_OP_CMP_EQ:
1453 case FUTEX_OP_CMP_NE:
1454 case FUTEX_OP_CMP_LT:
1455 case FUTEX_OP_CMP_LE:
1456 case FUTEX_OP_CMP_GT:
1457 case FUTEX_OP_CMP_GE:
1458 break;
1459 default:
1460 return EINVAL;
1461 }
1462
1463 return 0;
1464 }
1465
1466 /*
1467 * futex_compute_op(oldval, val3)
1468 *
1469 * Apply a FUTEX_WAKE_OP operation to oldval.
1470 */
1471 static int
1472 futex_compute_op(int oldval, int val3)
1473 {
1474 int op = __SHIFTOUT(val3, FUTEX_OP_OP_MASK);
1475 int oparg = futex_opcmp_arg(__SHIFTOUT(val3, FUTEX_OP_OPARG_MASK));
1476
1477 if (op & FUTEX_OP_OPARG_SHIFT) {
1478 KASSERT(oparg >= 0);
1479 KASSERT(oparg < 32);
1480 oparg = 1u << oparg;
1481 op &= ~FUTEX_OP_OPARG_SHIFT;
1482 }
1483
1484 switch (op) {
1485 case FUTEX_OP_SET:
1486 return oparg;
1487
1488 case FUTEX_OP_ADD:
1489 /*
1490 * Avoid signed arithmetic overflow by doing
1491 * arithmetic unsigned and converting back to signed
1492 * at the end.
1493 */
1494 return (int)((unsigned)oldval + (unsigned)oparg);
1495
1496 case FUTEX_OP_OR:
1497 return oldval | oparg;
1498
1499 case FUTEX_OP_ANDN:
1500 return oldval & ~oparg;
1501
1502 case FUTEX_OP_XOR:
1503 return oldval ^ oparg;
1504
1505 default:
1506 panic("invalid futex op");
1507 }
1508 }
1509
1510 /*
1511 * futex_compute_cmp(oldval, val3)
1512 *
1513 * Apply a FUTEX_WAKE_OP comparison to oldval.
1514 */
1515 static bool
1516 futex_compute_cmp(int oldval, int val3)
1517 {
1518 int cmp = __SHIFTOUT(val3, FUTEX_OP_CMP_MASK);
1519 int cmparg = futex_opcmp_arg(__SHIFTOUT(val3, FUTEX_OP_CMPARG_MASK));
1520
1521 switch (cmp) {
1522 case FUTEX_OP_CMP_EQ:
1523 return (oldval == cmparg);
1524
1525 case FUTEX_OP_CMP_NE:
1526 return (oldval != cmparg);
1527
1528 case FUTEX_OP_CMP_LT:
1529 return (oldval < cmparg);
1530
1531 case FUTEX_OP_CMP_LE:
1532 return (oldval <= cmparg);
1533
1534 case FUTEX_OP_CMP_GT:
1535 return (oldval > cmparg);
1536
1537 case FUTEX_OP_CMP_GE:
1538 return (oldval >= cmparg);
1539
1540 default:
1541 panic("invalid futex cmp operation");
1542 }
1543 }
1544
1545 /*
1546 * futex_func_wake_op(uaddr, val, uaddr2, val2, val3, retval)
1547 *
1548 * Implement futex(FUTEX_WAKE_OP).
1549 */
1550 static int
1551 futex_func_wake_op(bool shared, int *uaddr, int val, int *uaddr2, int val2,
1552 int val3, register_t *retval)
1553 {
1554 struct futex *f = NULL, *f2 = NULL;
1555 int oldval, newval, actual;
1556 unsigned nwoken = 0;
1557 int error;
1558
1559 /* Reject negative number of wakeups. */
1560 if (val < 0 || val2 < 0) {
1561 error = EINVAL;
1562 goto out;
1563 }
1564
1565 /* Reject invalid operations before we start doing things. */
1566 if ((error = futex_validate_op_cmp(val3)) != 0)
1567 goto out;
1568
1569 /* Look up the first futex, if any. */
1570 error = futex_lookup(uaddr, shared, &f);
1571 if (error)
1572 goto out;
1573
1574 /* Look up the second futex, if any. */
1575 error = futex_lookup(uaddr2, shared, &f2);
1576 if (error)
1577 goto out;
1578
1579 /*
1580 * Under the queue locks:
1581 *
1582 * 1. Read/modify/write: *uaddr2 op= oparg.
1583 * 2. Unconditionally wake uaddr.
1584 * 3. Conditionally wake uaddr2, if it previously matched val2.
1585 */
1586 futex_queue_lock2(f, f2);
1587 do {
1588 error = futex_load(uaddr2, &oldval);
1589 if (error)
1590 goto out_unlock;
1591 newval = futex_compute_op(oldval, val3);
1592 error = ucas_int(uaddr2, oldval, newval, &actual);
1593 if (error)
1594 goto out_unlock;
1595 } while (actual != oldval);
1596 if (f == NULL) {
1597 nwoken = 0;
1598 } else {
1599 nwoken = futex_wake(f, /*nwake*/val, NULL, /*nrequeue*/0,
1600 FUTEX_BITSET_MATCH_ANY);
1601 }
1602 if (f2 && futex_compute_cmp(oldval, val3)) {
1603 nwoken += futex_wake(f2, /*nwake*/val2, NULL, /*nrequeue*/0,
1604 FUTEX_BITSET_MATCH_ANY);
1605 }
1606
1607 /* Success! */
1608 error = 0;
1609 out_unlock:
1610 futex_queue_unlock2(f, f2);
1611
1612 out:
1613 /* Return the number of waiters woken. */
1614 *retval = nwoken;
1615
1616 /* Release the futexes, if we got them. */
1617 if (f2)
1618 futex_rele(f2);
1619 if (f)
1620 futex_rele(f);
1621 return error;
1622 }
1623
1624 /*
1625 * do_futex(uaddr, op, val, timeout, uaddr2, val2, val3)
1626 *
1627 * Implement the futex system call with all the parameters
1628 * parsed out.
1629 */
1630 int
1631 do_futex(int *uaddr, int op, int val, const struct timespec *timeout,
1632 int *uaddr2, int val2, int val3, register_t *retval)
1633 {
1634 const bool shared = (op & FUTEX_PRIVATE_FLAG) ? false : true;
1635 const clockid_t clkid = (op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME
1636 : CLOCK_MONOTONIC;
1637
1638 op &= FUTEX_CMD_MASK;
1639
1640 switch (op) {
1641 case FUTEX_WAIT:
1642 return futex_func_wait(shared, uaddr, val,
1643 FUTEX_BITSET_MATCH_ANY, timeout, clkid, TIMER_RELTIME,
1644 retval);
1645
1646 case FUTEX_WAKE:
1647 val3 = FUTEX_BITSET_MATCH_ANY;
1648 /* FALLTHROUGH */
1649 case FUTEX_WAKE_BITSET:
1650 return futex_func_wake(shared, uaddr, val, val3, retval);
1651
1652 case FUTEX_REQUEUE:
1653 case FUTEX_CMP_REQUEUE:
1654 return futex_func_requeue(shared, op, uaddr, val, uaddr2,
1655 val2, val3, retval);
1656
1657 case FUTEX_WAIT_BITSET:
1658 return futex_func_wait(shared, uaddr, val, val3, timeout,
1659 clkid, TIMER_ABSTIME, retval);
1660
1661 case FUTEX_WAKE_OP:
1662 return futex_func_wake_op(shared, uaddr, val, uaddr2, val2,
1663 val3, retval);
1664
1665 case FUTEX_FD:
1666 default:
1667 return ENOSYS;
1668 }
1669 }
1670
1671 /*
1672 * sys___futex(l, uap, retval)
1673 *
1674 * __futex(2) system call: generic futex operations.
1675 */
1676 int
1677 sys___futex(struct lwp *l, const struct sys___futex_args *uap,
1678 register_t *retval)
1679 {
1680 /* {
1681 syscallarg(int *) uaddr;
1682 syscallarg(int) op;
1683 syscallarg(int) val;
1684 syscallarg(const struct timespec *) timeout;
1685 syscallarg(int *) uaddr2;
1686 syscallarg(int) val2;
1687 syscallarg(int) val3;
1688 } */
1689 struct timespec ts, *tsp;
1690 int error;
1691
1692 /*
1693 * Copy in the timeout argument, if specified.
1694 */
1695 if (SCARG(uap, timeout)) {
1696 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
1697 if (error)
1698 return error;
1699 tsp = &ts;
1700 } else {
1701 tsp = NULL;
1702 }
1703
1704 return do_futex(SCARG(uap, uaddr), SCARG(uap, op), SCARG(uap, val),
1705 tsp, SCARG(uap, uaddr2), SCARG(uap, val2), SCARG(uap, val3),
1706 retval);
1707 }
1708
1709 /*
1710 * sys___futex_set_robust_list(l, uap, retval)
1711 *
1712 * __futex_set_robust_list(2) system call for robust futexes.
1713 */
1714 int
1715 sys___futex_set_robust_list(struct lwp *l,
1716 const struct sys___futex_set_robust_list_args *uap, register_t *retval)
1717 {
1718 /* {
1719 syscallarg(void *) head;
1720 syscallarg(size_t) len;
1721 } */
1722 void *head = SCARG(uap, head);
1723
1724 if (SCARG(uap, len) != _FUTEX_ROBUST_HEAD_SIZE)
1725 return EINVAL;
1726 if ((uintptr_t)head % sizeof(u_long))
1727 return EINVAL;
1728
1729 l->l_robust_head = (uintptr_t)head;
1730
1731 return 0;
1732 }
1733
1734 /*
1735 * sys___futex_get_robust_list(l, uap, retval)
1736 *
1737 * __futex_get_robust_list(2) system call for robust futexes.
1738 */
1739 int
1740 sys___futex_get_robust_list(struct lwp *l,
1741 const struct sys___futex_get_robust_list_args *uap, register_t *retval)
1742 {
1743 /* {
1744 syscallarg(lwpid_t) lwpid;
1745 syscallarg(void **) headp;
1746 syscallarg(size_t *) lenp;
1747 } */
1748 void *head;
1749 const size_t len = _FUTEX_ROBUST_HEAD_SIZE;
1750 int error;
1751
1752 error = futex_robust_head_lookup(l, SCARG(uap, lwpid), &head);
1753 if (error)
1754 return error;
1755
1756 /* Copy out the head pointer and the head structure length. */
1757 error = copyout(&head, SCARG(uap, headp), sizeof(head));
1758 if (__predict_true(error == 0)) {
1759 error = copyout(&len, SCARG(uap, lenp), sizeof(len));
1760 }
1761
1762 return error;
1763 }
1764
1765 /*
1766 * release_futex(uva, tid)
1767 *
1768 * Try to release the robust futex at uva in the current process
1769 * on lwp exit. If anything goes wrong, silently fail. It is the
1770 * userland program's obligation to arrange correct behaviour.
1771 */
1772 static void
1773 release_futex(uintptr_t const uptr, lwpid_t const tid, bool const is_pi,
1774 bool const is_pending)
1775 {
1776 int *uaddr;
1777 struct futex *f;
1778 int oldval, newval, actual;
1779 int error;
1780
1781 /* If it's misaligned, tough. */
1782 if (__predict_false(uptr & 3))
1783 return;
1784 uaddr = (int *)uptr;
1785
1786 error = futex_load(uaddr, &oldval);
1787 if (__predict_false(error))
1788 return;
1789
1790 /*
1791 * There are two race conditions we need to handle here:
1792 *
1793 * 1. User space cleared the futex word but died before
1794 * being able to issue the wakeup. No wakeups will
1795 * ever be issued, oops!
1796 *
1797 * 2. Awakened waiter died before being able to acquire
1798 * the futex in user space. Any other waiters are
1799 * now stuck, oops!
1800 *
1801 * In both of these cases, the futex word will be 0 (because
1802 * it's updated before the wake is issued). The best we can
1803 * do is detect this situation if it's the pending futex and
1804 * issue a wake without modifying the futex word.
1805 *
1806 * XXX eventual PI handling?
1807 */
1808 if (__predict_false(is_pending && (oldval & ~FUTEX_WAITERS) == 0)) {
1809 register_t retval;
1810 (void) futex_func_wake(/*shared*/true, uaddr, 1,
1811 FUTEX_BITSET_MATCH_ANY, &retval);
1812 return;
1813 }
1814
1815 /* Optimistically test whether we need to do anything at all. */
1816 if ((oldval & FUTEX_TID_MASK) != tid)
1817 return;
1818
1819 /*
1820 * We need to handle the case where this thread owned the futex,
1821 * but it was uncontended. In this case, there won't be any
1822 * kernel state to look up. All we can do is mark the futex
1823 * as a zombie to be mopped up the next time another thread
1824 * attempts to acquire it.
1825 *
1826 * N.B. It's important to ensure to set FUTEX_OWNER_DIED in
1827 * this loop, even if waiters appear while we're are doing
1828 * so. This is beause FUTEX_WAITERS is set by user space
1829 * before calling __futex() to wait, and the futex needs
1830 * to be marked as a zombie when the new waiter gets into
1831 * the kernel.
1832 */
1833 if ((oldval & FUTEX_WAITERS) == 0) {
1834 do {
1835 error = futex_load(uaddr, &oldval);
1836 if (error)
1837 return;
1838 if ((oldval & FUTEX_TID_MASK) != tid)
1839 return;
1840 newval = oldval | FUTEX_OWNER_DIED;
1841 error = ucas_int(uaddr, oldval, newval, &actual);
1842 if (error)
1843 return;
1844 } while (actual != oldval);
1845
1846 /*
1847 * If where is still no indication of waiters, then there is
1848 * no more work for us to do.
1849 */
1850 if ((oldval & FUTEX_WAITERS) == 0)
1851 return;
1852 }
1853
1854 /*
1855 * Look for a shared futex since we have no positive indication
1856 * it is private. If we can't, tough.
1857 */
1858 error = futex_lookup(uaddr, /*shared*/true, &f);
1859 if (error)
1860 return;
1861
1862 /*
1863 * If there's no kernel state for this futex, there's nothing to
1864 * release.
1865 */
1866 if (f == NULL)
1867 return;
1868
1869 /* Work under the futex queue lock. */
1870 futex_queue_lock(f);
1871
1872 /*
1873 * Fetch the word: if the tid doesn't match ours, skip;
1874 * otherwise, set the owner-died bit, atomically.
1875 */
1876 do {
1877 error = futex_load(uaddr, &oldval);
1878 if (error)
1879 goto out;
1880 if ((oldval & FUTEX_TID_MASK) != tid)
1881 goto out;
1882 newval = oldval | FUTEX_OWNER_DIED;
1883 error = ucas_int(uaddr, oldval, newval, &actual);
1884 if (error)
1885 goto out;
1886 } while (actual != oldval);
1887
1888 /*
1889 * If there may be waiters, try to wake one. If anything goes
1890 * wrong, tough.
1891 *
1892 * XXX eventual PI handling?
1893 */
1894 if (oldval & FUTEX_WAITERS) {
1895 (void)futex_wake(f, /*nwake*/1, NULL, /*nrequeue*/0,
1896 FUTEX_BITSET_MATCH_ANY);
1897 }
1898
1899 /* Unlock the queue and release the futex. */
1900 out: futex_queue_unlock(f);
1901 futex_rele(f);
1902 }
1903
1904 /*
1905 * futex_robust_head_lookup(l, lwpid)
1906 *
1907 * Helper function to look up a robust head by LWP ID.
1908 */
1909 int
1910 futex_robust_head_lookup(struct lwp *l, lwpid_t lwpid, void **headp)
1911 {
1912 struct proc *p = l->l_proc;
1913
1914 /* Find the other lwp, if requested; otherwise use our robust head. */
1915 if (lwpid) {
1916 mutex_enter(p->p_lock);
1917 l = lwp_find(p, lwpid);
1918 if (l == NULL) {
1919 mutex_exit(p->p_lock);
1920 return ESRCH;
1921 }
1922 *headp = (void *)l->l_robust_head;
1923 mutex_exit(p->p_lock);
1924 } else {
1925 *headp = (void *)l->l_robust_head;
1926 }
1927 return 0;
1928 }
1929
1930 /*
1931 * futex_fetch_robust_head(uaddr)
1932 *
1933 * Helper routine to fetch the futex robust list head that
1934 * handles 32-bit binaries running on 64-bit kernels.
1935 */
1936 static int
1937 futex_fetch_robust_head(uintptr_t uaddr, u_long *rhead)
1938 {
1939 #ifdef _LP64
1940 if (curproc->p_flag & PK_32) {
1941 uint32_t rhead32[_FUTEX_ROBUST_HEAD_NWORDS];
1942 int error;
1943
1944 error = copyin((void *)uaddr, rhead32, sizeof(rhead32));
1945 if (__predict_true(error == 0)) {
1946 for (int i = 0; i < _FUTEX_ROBUST_HEAD_NWORDS; i++) {
1947 if (i == _FUTEX_ROBUST_HEAD_OFFSET) {
1948 /*
1949 * Make sure the offset is sign-
1950 * extended.
1951 */
1952 rhead[i] = (int32_t)rhead32[i];
1953 } else {
1954 rhead[i] = rhead32[i];
1955 }
1956 }
1957 }
1958 return error;
1959 }
1960 #endif /* _L64 */
1961
1962 return copyin((void *)uaddr, rhead,
1963 sizeof(*rhead) * _FUTEX_ROBUST_HEAD_NWORDS);
1964 }
1965
1966 /*
1967 * futex_decode_robust_word(word)
1968 *
1969 * Decode a robust futex list word into the entry and entry
1970 * properties.
1971 */
1972 static inline void
1973 futex_decode_robust_word(uintptr_t const word, uintptr_t * const entry,
1974 bool * const is_pi)
1975 {
1976 *is_pi = (word & _FUTEX_ROBUST_ENTRY_PI) ? true : false;
1977 *entry = word & ~_FUTEX_ROBUST_ENTRY_PI;
1978 }
1979
1980 /*
1981 * futex_fetch_robust_entry(uaddr)
1982 *
1983 * Helper routine to fetch and decode a robust futex entry
1984 * that handles 32-bit binaries running on 64-bit kernels.
1985 */
1986 static int
1987 futex_fetch_robust_entry(uintptr_t const uaddr, uintptr_t * const valp,
1988 bool * const is_pi)
1989 {
1990 uintptr_t val = 0;
1991 int error = 0;
1992
1993 #ifdef _LP64
1994 if (curproc->p_flag & PK_32) {
1995 uint32_t val32;
1996
1997 error = ufetch_32((uint32_t *)uaddr, &val32);
1998 if (__predict_true(error == 0))
1999 val = val32;
2000 } else
2001 #endif /* _LP64 */
2002 error = ufetch_long((u_long *)uaddr, (u_long *)&val);
2003 if (__predict_false(error))
2004 return error;
2005
2006 futex_decode_robust_word(val, valp, is_pi);
2007 return 0;
2008 }
2009
2010 /*
2011 * futex_release_all_lwp(l, tid)
2012 *
2013 * Release all l's robust futexes. If anything looks funny in
2014 * the process, give up -- it's userland's responsibility to dot
2015 * the i's and cross the t's.
2016 */
2017 void
2018 futex_release_all_lwp(struct lwp * const l)
2019 {
2020 u_long rhead[_FUTEX_ROBUST_HEAD_NWORDS];
2021 int limit = 1000000;
2022 int error;
2023
2024 /* If there's no robust list there's nothing to do. */
2025 if (l->l_robust_head == 0)
2026 return;
2027
2028 KASSERT((l->l_lid & FUTEX_TID_MASK) == l->l_lid);
2029
2030 /* Read the final snapshot of the robust list head. */
2031 error = futex_fetch_robust_head(l->l_robust_head, rhead);
2032 if (error) {
2033 printf("WARNING: pid %jd (%s) lwp %jd:"
2034 " unmapped robust futex list head\n",
2035 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
2036 (uintmax_t)l->l_lid);
2037 return;
2038 }
2039
2040 const long offset = (long)rhead[_FUTEX_ROBUST_HEAD_OFFSET];
2041
2042 uintptr_t next, pending;
2043 bool is_pi, pending_is_pi;
2044
2045 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_LIST],
2046 &next, &is_pi);
2047 futex_decode_robust_word(rhead[_FUTEX_ROBUST_HEAD_PENDING],
2048 &pending, &pending_is_pi);
2049
2050 /*
2051 * Walk down the list of locked futexes and release them, up
2052 * to one million of them before we give up.
2053 */
2054
2055 while (next != l->l_robust_head && limit-- > 0) {
2056 /* pending handled below. */
2057 if (next != pending)
2058 release_futex(next + offset, l->l_lid, is_pi, false);
2059 error = futex_fetch_robust_entry(next, &next, &is_pi);
2060 if (error)
2061 break;
2062 preempt_point();
2063 }
2064 if (limit <= 0) {
2065 printf("WARNING: pid %jd (%s) lwp %jd:"
2066 " exhausted robust futex limit\n",
2067 (uintmax_t)l->l_proc->p_pid, l->l_proc->p_comm,
2068 (uintmax_t)l->l_lid);
2069 }
2070
2071 /* If there's a pending futex, it may need to be released too. */
2072 if (pending != 0) {
2073 release_futex(pending + offset, l->l_lid, pending_is_pi, true);
2074 }
2075 }
2076