Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: subr_copy.c,v 1.21 2026/01/04 02:11:26 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
      5  *	The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10  * NASA Ames Research Center.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * Copyright (c) 1982, 1986, 1991, 1993
     36  *	The Regents of the University of California.  All rights reserved.
     37  * (c) UNIX System Laboratories, Inc.
     38  * All or some portions of this file are derived from material licensed
     39  * to the University of California by American Telephone and Telegraph
     40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     41  * the permission of UNIX System Laboratories, Inc.
     42  *
     43  * Copyright (c) 1992, 1993
     44  *	The Regents of the University of California.  All rights reserved.
     45  *
     46  * This software was developed by the Computer Systems Engineering group
     47  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     48  * contributed to Berkeley.
     49  *
     50  * All advertising materials mentioning features or use of this software
     51  * must display the following acknowledgement:
     52  *	This product includes software developed by the University of
     53  *	California, Lawrence Berkeley Laboratory.
     54  *
     55  * Redistribution and use in source and binary forms, with or without
     56  * modification, are permitted provided that the following conditions
     57  * are met:
     58  * 1. Redistributions of source code must retain the above copyright
     59  *    notice, this list of conditions and the following disclaimer.
     60  * 2. Redistributions in binary form must reproduce the above copyright
     61  *    notice, this list of conditions and the following disclaimer in the
     62  *    documentation and/or other materials provided with the distribution.
     63  * 3. Neither the name of the University nor the names of its contributors
     64  *    may be used to endorse or promote products derived from this software
     65  *    without specific prior written permission.
     66  *
     67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     77  * SUCH DAMAGE.
     78  *
     79  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
     80  */
     81 
     82 #include <sys/cdefs.h>
     83 __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.21 2026/01/04 02:11:26 riastradh Exp $");
     84 
     85 #define	__UFETCHSTORE_PRIVATE
     86 #define	__UCAS_PRIVATE
     87 
     88 #include <sys/param.h>
     89 #include <sys/types.h>
     90 
     91 #include <sys/fcntl.h>
     92 #include <sys/proc.h>
     93 #include <sys/sdt.h>
     94 #include <sys/systm.h>
     95 
     96 #include <uvm/uvm_extern.h>
     97 
     98 void
     99 uio_setup_sysspace(struct uio *uio)
    100 {
    101 
    102 	uio->uio_vmspace = vmspace_kernel();
    103 }
    104 
    105 int
    106 uiomove(void *buf, size_t n, struct uio *uio)
    107 {
    108 	struct vmspace *vm = uio->uio_vmspace;
    109 	struct iovec *iov;
    110 	size_t cnt;
    111 	int error = 0;
    112 	char *cp = buf;
    113 
    114 	ASSERT_SLEEPABLE();
    115 
    116 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    117 	while (n > 0 && uio->uio_resid) {
    118 		KASSERT(uio->uio_iovcnt > 0);
    119 		iov = uio->uio_iov;
    120 		cnt = iov->iov_len;
    121 		if (cnt == 0) {
    122 			KASSERT(uio->uio_iovcnt > 1);
    123 			uio->uio_iov++;
    124 			uio->uio_iovcnt--;
    125 			continue;
    126 		}
    127 		if (cnt > n)
    128 			cnt = n;
    129 		if (!VMSPACE_IS_KERNEL_P(vm)) {
    130 			preempt_point();
    131 		}
    132 
    133 		if (uio->uio_rw == UIO_READ) {
    134 			error = copyout_vmspace(vm, cp, iov->iov_base,
    135 			    cnt);
    136 		} else {
    137 			error = copyin_vmspace(vm, iov->iov_base, cp,
    138 			    cnt);
    139 		}
    140 		if (error) {
    141 			break;
    142 		}
    143 		iov->iov_base = (char *)iov->iov_base + cnt;
    144 		iov->iov_len -= cnt;
    145 		uio->uio_resid -= cnt;
    146 		uio->uio_offset += cnt;
    147 		cp += cnt;
    148 		KDASSERT(cnt <= n);
    149 		n -= cnt;
    150 	}
    151 
    152 	return (error);
    153 }
    154 
    155 /*
    156  * Wrapper for uiomove() that validates the arguments against a known-good
    157  * kernel buffer.
    158  */
    159 int
    160 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
    161 {
    162 	size_t offset;
    163 
    164 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
    165 	    (offset = uio->uio_offset) != uio->uio_offset)
    166 		return SET_ERROR(EINVAL);
    167 	if (offset >= buflen)
    168 		return 0;
    169 	return (uiomove((char *)buf + offset, buflen - offset, uio));
    170 }
    171 
    172 int
    173 uiopeek(void *buf, size_t n, struct uio *uio)
    174 {
    175 	struct vmspace *vm = uio->uio_vmspace;
    176 	struct iovec *iov;
    177 	size_t cnt;
    178 	int error = 0;
    179 	char *cp = buf;
    180 	size_t resid = uio->uio_resid;
    181 	int iovcnt = uio->uio_iovcnt;
    182 	char *base;
    183 	size_t len;
    184 
    185 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    186 
    187 	if (n == 0 || resid == 0)
    188 		return 0;
    189 	iov = uio->uio_iov;
    190 	base = iov->iov_base;
    191 	len = iov->iov_len;
    192 
    193 	while (n > 0 && resid > 0) {
    194 		KASSERT(iovcnt > 0);
    195 		cnt = len;
    196 		if (cnt == 0) {
    197 			KASSERT(iovcnt > 1);
    198 			iov++;
    199 			iovcnt--;
    200 			base = iov->iov_base;
    201 			len = iov->iov_len;
    202 			continue;
    203 		}
    204 		if (cnt > n)
    205 			cnt = n;
    206 		if (!VMSPACE_IS_KERNEL_P(vm)) {
    207 			preempt_point();
    208 		}
    209 
    210 		if (uio->uio_rw == UIO_READ) {
    211 			error = copyout_vmspace(vm, cp, base, cnt);
    212 		} else {
    213 			error = copyin_vmspace(vm, base, cp, cnt);
    214 		}
    215 		if (error) {
    216 			break;
    217 		}
    218 		base += cnt;
    219 		len -= cnt;
    220 		resid -= cnt;
    221 		cp += cnt;
    222 		KDASSERT(cnt <= n);
    223 		n -= cnt;
    224 	}
    225 
    226 	return error;
    227 }
    228 
    229 void
    230 uioskip(size_t n, struct uio *uio)
    231 {
    232 	struct iovec *iov;
    233 	size_t cnt;
    234 
    235 	KASSERTMSG(n <= uio->uio_resid, "n=%zu resid=%zu", n, uio->uio_resid);
    236 
    237 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    238 	while (n > 0 && uio->uio_resid) {
    239 		KASSERT(uio->uio_iovcnt > 0);
    240 		iov = uio->uio_iov;
    241 		cnt = iov->iov_len;
    242 		if (cnt == 0) {
    243 			KASSERT(uio->uio_iovcnt > 1);
    244 			uio->uio_iov++;
    245 			uio->uio_iovcnt--;
    246 			continue;
    247 		}
    248 		if (cnt > n)
    249 			cnt = n;
    250 		iov->iov_base = (char *)iov->iov_base + cnt;
    251 		iov->iov_len -= cnt;
    252 		uio->uio_resid -= cnt;
    253 		uio->uio_offset += cnt;
    254 		KDASSERT(cnt <= n);
    255 		n -= cnt;
    256 	}
    257 }
    258 
    259 /*
    260  * Give next character to user as result of read.
    261  */
    262 int
    263 ureadc(int c, struct uio *uio)
    264 {
    265 	struct iovec *iov;
    266 
    267 	if (uio->uio_resid <= 0)
    268 		panic("ureadc: non-positive resid");
    269 again:
    270 	if (uio->uio_iovcnt <= 0)
    271 		panic("ureadc: non-positive iovcnt");
    272 	iov = uio->uio_iov;
    273 	if (iov->iov_len <= 0) {
    274 		uio->uio_iovcnt--;
    275 		uio->uio_iov++;
    276 		goto again;
    277 	}
    278 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
    279 		int error;
    280 		if ((error = ustore_char(iov->iov_base, c)) != 0)
    281 			return (error);
    282 	} else {
    283 		*(char *)iov->iov_base = c;
    284 	}
    285 	iov->iov_base = (char *)iov->iov_base + 1;
    286 	iov->iov_len--;
    287 	uio->uio_resid--;
    288 	uio->uio_offset++;
    289 	return (0);
    290 }
    291 
    292 /*
    293  * Like copyin(), but operates on an arbitrary vmspace.
    294  */
    295 int
    296 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
    297 {
    298 	struct iovec iov;
    299 	struct uio uio;
    300 	int error;
    301 
    302 	if (len == 0)
    303 		return (0);
    304 
    305 	if (VMSPACE_IS_KERNEL_P(vm)) {
    306 		return kcopy(uaddr, kaddr, len);
    307 	}
    308 	if (__predict_true(vm == curproc->p_vmspace)) {
    309 		return copyin(uaddr, kaddr, len);
    310 	}
    311 
    312 	iov.iov_base = kaddr;
    313 	iov.iov_len = len;
    314 	uio.uio_iov = &iov;
    315 	uio.uio_iovcnt = 1;
    316 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    317 	uio.uio_resid = len;
    318 	uio.uio_rw = UIO_READ;
    319 	UIO_SETUP_SYSSPACE(&uio);
    320 	error = uvm_io(&vm->vm_map, &uio, 0);
    321 
    322 	return (error);
    323 }
    324 
    325 /*
    326  * Like copyout(), but operates on an arbitrary vmspace.
    327  */
    328 int
    329 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
    330 {
    331 	struct iovec iov;
    332 	struct uio uio;
    333 	int error;
    334 
    335 	if (len == 0)
    336 		return (0);
    337 
    338 	if (VMSPACE_IS_KERNEL_P(vm)) {
    339 		return kcopy(kaddr, uaddr, len);
    340 	}
    341 	if (__predict_true(vm == curproc->p_vmspace)) {
    342 		return copyout(kaddr, uaddr, len);
    343 	}
    344 
    345 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
    346 	iov.iov_len = len;
    347 	uio.uio_iov = &iov;
    348 	uio.uio_iovcnt = 1;
    349 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    350 	uio.uio_resid = len;
    351 	uio.uio_rw = UIO_WRITE;
    352 	UIO_SETUP_SYSSPACE(&uio);
    353 	error = uvm_io(&vm->vm_map, &uio, 0);
    354 
    355 	return (error);
    356 }
    357 
    358 /*
    359  * Like copyin(), but operates on an arbitrary process.
    360  */
    361 int
    362 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
    363 {
    364 	struct vmspace *vm;
    365 	int error;
    366 
    367 	error = proc_vmspace_getref(p, &vm);
    368 	if (error) {
    369 		return error;
    370 	}
    371 	error = copyin_vmspace(vm, uaddr, kaddr, len);
    372 	uvmspace_free(vm);
    373 
    374 	return error;
    375 }
    376 
    377 /*
    378  * Like copyout(), but operates on an arbitrary process.
    379  */
    380 int
    381 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
    382 {
    383 	struct vmspace *vm;
    384 	int error;
    385 
    386 	error = proc_vmspace_getref(p, &vm);
    387 	if (error) {
    388 		return error;
    389 	}
    390 	error = copyout_vmspace(vm, kaddr, uaddr, len);
    391 	uvmspace_free(vm);
    392 
    393 	return error;
    394 }
    395 
    396 /*
    397  * Like copyin(), but operates on an arbitrary pid.
    398  */
    399 int
    400 copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
    401 {
    402 	struct proc *p;
    403 	struct vmspace *vm;
    404 	int error;
    405 
    406 	mutex_enter(&proc_lock);
    407 	p = proc_find(pid);
    408 	if (p == NULL) {
    409 		mutex_exit(&proc_lock);
    410 		return SET_ERROR(ESRCH);
    411 	}
    412 	mutex_enter(p->p_lock);
    413 	error = proc_vmspace_getref(p, &vm);
    414 	mutex_exit(p->p_lock);
    415 	mutex_exit(&proc_lock);
    416 
    417 	if (error == 0) {
    418 		error = copyin_vmspace(vm, uaddr, kaddr, len);
    419 		uvmspace_free(vm);
    420 	}
    421 	return error;
    422 }
    423 
    424 /*
    425  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
    426  * flag is passed in `ioctlflags' from the ioctl call.
    427  */
    428 int
    429 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
    430 {
    431 	if (ioctlflags & FKIOCTL)
    432 		return kcopy(src, dst, len);
    433 	return copyin(src, dst, len);
    434 }
    435 
    436 /*
    437  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
    438  * flag is passed in `ioctlflags' from the ioctl call.
    439  */
    440 int
    441 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
    442 {
    443 	if (ioctlflags & FKIOCTL)
    444 		return kcopy(src, dst, len);
    445 	return copyout(src, dst, len);
    446 }
    447 
    448 /*
    449  * User-space CAS / fetch / store
    450  */
    451 
    452 #ifdef __NO_STRICT_ALIGNMENT
    453 #define	CHECK_ALIGNMENT(x)	__nothing
    454 #else /* ! __NO_STRICT_ALIGNMENT */
    455 static bool
    456 ufetchstore_aligned(uintptr_t uaddr, size_t size)
    457 {
    458 	return (uaddr & (size - 1)) == 0;
    459 }
    460 
    461 #define	CHECK_ALIGNMENT()						\
    462 do {									\
    463 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
    464 		return SET_ERROR(EFAULT);				\
    465 } while (/*CONSTCOND*/0)
    466 #endif /* __NO_STRICT_ALIGNMENT */
    467 
    468 /*
    469  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
    470  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
    471  *
    472  * In all other cases, we provide generic implementations that work on
    473  * all platforms.
    474  */
    475 
    476 #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
    477 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    478 #include <sys/atomic.h>
    479 #include <sys/cpu.h>
    480 #include <sys/once.h>
    481 #include <sys/mutex.h>
    482 #include <sys/ipi.h>
    483 
    484 static int ucas_critical_splcookie;
    485 static volatile u_int ucas_critical_pausing_cpus;
    486 static u_int ucas_critical_ipi;
    487 static ONCE_DECL(ucas_critical_init_once)
    488 
    489 static void
    490 ucas_critical_cpu_gate(void *arg __unused)
    491 {
    492 	int count = SPINLOCK_BACKOFF_MIN;
    493 
    494 	KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
    495 
    496 	/*
    497 	 * Notify ucas_critical_wait that we have stopped.  Using
    498 	 * store-release ensures all our memory operations up to the
    499 	 * IPI happen before the ucas -- no buffered stores on our end
    500 	 * can clobber it later on, for instance.
    501 	 *
    502 	 * Matches atomic_load_acquire in ucas_critical_wait -- turns
    503 	 * the following atomic_dec_uint into a store-release.
    504 	 */
    505 	membar_release();
    506 	atomic_dec_uint(&ucas_critical_pausing_cpus);
    507 
    508 	/*
    509 	 * Wait for ucas_critical_exit to reopen the gate and let us
    510 	 * proceed.  Using a load-acquire ensures the ucas happens
    511 	 * before any of our memory operations when we return from the
    512 	 * IPI and proceed -- we won't observe any stale cached value
    513 	 * that the ucas overwrote, for instance.
    514 	 *
    515 	 * Matches atomic_store_release in ucas_critical_exit.
    516 	 */
    517 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
    518 		SPINLOCK_BACKOFF(count);
    519 	}
    520 }
    521 
    522 static int
    523 ucas_critical_init(void)
    524 {
    525 
    526 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
    527 	return 0;
    528 }
    529 
    530 static void
    531 ucas_critical_wait(void)
    532 {
    533 	int count = SPINLOCK_BACKOFF_MIN;
    534 
    535 	/*
    536 	 * Wait for all CPUs to stop at the gate.  Using a load-acquire
    537 	 * ensures all memory operations before they stop at the gate
    538 	 * happen before the ucas -- no buffered stores in other CPUs
    539 	 * can clobber it later on, for instance.
    540 	 *
    541 	 * Matches membar_release/atomic_dec_uint (store-release) in
    542 	 * ucas_critical_cpu_gate.
    543 	 */
    544 	while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
    545 		SPINLOCK_BACKOFF(count);
    546 	}
    547 }
    548 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    549 
    550 static inline void
    551 ucas_critical_enter(lwp_t * const l)
    552 {
    553 
    554 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    555 	if (ncpu > 1) {
    556 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
    557 
    558 		/*
    559 		 * Acquire the mutex first, then go to splhigh() and
    560 		 * broadcast the IPI to lock all of the other CPUs
    561 		 * behind the gate.
    562 		 *
    563 		 * N.B. Going to splhigh() implicitly disables preemption,
    564 		 * so there's no need to do it explicitly.
    565 		 */
    566 		mutex_enter(&cpu_lock);
    567 		ucas_critical_splcookie = splhigh();
    568 		ucas_critical_pausing_cpus = ncpu - 1;
    569 		ipi_trigger_broadcast(ucas_critical_ipi, true);
    570 		ucas_critical_wait();
    571 		return;
    572 	}
    573 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    574 
    575 	KPREEMPT_DISABLE(l);
    576 }
    577 
    578 static inline void
    579 ucas_critical_exit(lwp_t * const l)
    580 {
    581 
    582 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    583 	if (ncpu > 1) {
    584 		/*
    585 		 * Open the gate and notify all CPUs in
    586 		 * ucas_critical_cpu_gate that they can now proceed.
    587 		 * Using a store-release ensures the ucas happens
    588 		 * before any memory operations they issue after the
    589 		 * IPI -- they won't observe any stale cache of the
    590 		 * target word, for instance.
    591 		 *
    592 		 * Matches atomic_load_acquire in ucas_critical_cpu_gate.
    593 		 */
    594 		atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
    595 		splx(ucas_critical_splcookie);
    596 		mutex_exit(&cpu_lock);
    597 		return;
    598 	}
    599 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    600 
    601 	KPREEMPT_ENABLE(l);
    602 }
    603 
    604 int
    605 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    606 {
    607 	lwp_t * const l = curlwp;
    608 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
    609 	int error;
    610 
    611 	/*
    612 	 * Wire the user address down to avoid taking a page fault during
    613 	 * the critical section.
    614 	 */
    615 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    616 			   VM_PROT_READ | VM_PROT_WRITE);
    617 	if (error)
    618 		return error;
    619 
    620 	ucas_critical_enter(l);
    621 	error = _ufetch_32(uva, ret);
    622 	if (error == 0 && *ret == old) {
    623 		error = _ustore_32(uva, new);
    624 	}
    625 	ucas_critical_exit(l);
    626 
    627 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    628 
    629 	return error;
    630 }
    631 
    632 #ifdef _LP64
    633 int
    634 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    635 {
    636 	lwp_t * const l = curlwp;
    637 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
    638 	int error;
    639 
    640 	/*
    641 	 * Wire the user address down to avoid taking a page fault during
    642 	 * the critical section.
    643 	 */
    644 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    645 			   VM_PROT_READ | VM_PROT_WRITE);
    646 	if (error)
    647 		return error;
    648 
    649 	ucas_critical_enter(l);
    650 	error = _ufetch_64(uva, ret);
    651 	if (error == 0 && *ret == old) {
    652 		error = _ustore_64(uva, new);
    653 	}
    654 	ucas_critical_exit(l);
    655 
    656 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    657 
    658 	return error;
    659 }
    660 #endif /* _LP64 */
    661 #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
    662 
    663 int
    664 ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    665 {
    666 
    667 	ASSERT_SLEEPABLE();
    668 	CHECK_ALIGNMENT();
    669 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    670     !defined(_RUMPKERNEL)
    671 	if (ncpu > 1) {
    672 		return _ucas_32_mp(uaddr, old, new, ret);
    673 	}
    674 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    675 	return _ucas_32(uaddr, old, new, ret);
    676 }
    677 
    678 #ifdef _LP64
    679 int
    680 ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    681 {
    682 
    683 	ASSERT_SLEEPABLE();
    684 	CHECK_ALIGNMENT();
    685 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    686     !defined(_RUMPKERNEL)
    687 	if (ncpu > 1) {
    688 		return _ucas_64_mp(uaddr, old, new, ret);
    689 	}
    690 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    691 	return _ucas_64(uaddr, old, new, ret);
    692 }
    693 #endif /* _LP64 */
    694 
    695 __strong_alias(ucas_int,ucas_32);
    696 #ifdef _LP64
    697 __strong_alias(ucas_ptr,ucas_64);
    698 #else
    699 __strong_alias(ucas_ptr,ucas_32);
    700 #endif /* _LP64 */
    701 
    702 int
    703 ufetch_8(const uint8_t *uaddr, uint8_t *valp)
    704 {
    705 
    706 	ASSERT_SLEEPABLE();
    707 	CHECK_ALIGNMENT();
    708 	return _ufetch_8(uaddr, valp);
    709 }
    710 
    711 int
    712 ufetch_16(const uint16_t *uaddr, uint16_t *valp)
    713 {
    714 
    715 	ASSERT_SLEEPABLE();
    716 	CHECK_ALIGNMENT();
    717 	return _ufetch_16(uaddr, valp);
    718 }
    719 
    720 int
    721 ufetch_32(const uint32_t *uaddr, uint32_t *valp)
    722 {
    723 
    724 	ASSERT_SLEEPABLE();
    725 	CHECK_ALIGNMENT();
    726 	return _ufetch_32(uaddr, valp);
    727 }
    728 
    729 #ifdef _LP64
    730 int
    731 ufetch_64(const uint64_t *uaddr, uint64_t *valp)
    732 {
    733 
    734 	ASSERT_SLEEPABLE();
    735 	CHECK_ALIGNMENT();
    736 	return _ufetch_64(uaddr, valp);
    737 }
    738 #endif /* _LP64 */
    739 
    740 __strong_alias(ufetch_char,ufetch_8);
    741 __strong_alias(ufetch_short,ufetch_16);
    742 __strong_alias(ufetch_int,ufetch_32);
    743 #ifdef _LP64
    744 __strong_alias(ufetch_long,ufetch_64);
    745 __strong_alias(ufetch_ptr,ufetch_64);
    746 #else
    747 __strong_alias(ufetch_long,ufetch_32);
    748 __strong_alias(ufetch_ptr,ufetch_32);
    749 #endif /* _LP64 */
    750 
    751 int
    752 ustore_8(uint8_t *uaddr, uint8_t val)
    753 {
    754 
    755 	ASSERT_SLEEPABLE();
    756 	CHECK_ALIGNMENT();
    757 	return _ustore_8(uaddr, val);
    758 }
    759 
    760 int
    761 ustore_16(uint16_t *uaddr, uint16_t val)
    762 {
    763 
    764 	ASSERT_SLEEPABLE();
    765 	CHECK_ALIGNMENT();
    766 	return _ustore_16(uaddr, val);
    767 }
    768 
    769 int
    770 ustore_32(uint32_t *uaddr, uint32_t val)
    771 {
    772 
    773 	ASSERT_SLEEPABLE();
    774 	CHECK_ALIGNMENT();
    775 	return _ustore_32(uaddr, val);
    776 }
    777 
    778 #ifdef _LP64
    779 int
    780 ustore_64(uint64_t *uaddr, uint64_t val)
    781 {
    782 
    783 	ASSERT_SLEEPABLE();
    784 	CHECK_ALIGNMENT();
    785 	return _ustore_64(uaddr, val);
    786 }
    787 #endif /* _LP64 */
    788 
    789 __strong_alias(ustore_char,ustore_8);
    790 __strong_alias(ustore_short,ustore_16);
    791 __strong_alias(ustore_int,ustore_32);
    792 #ifdef _LP64
    793 __strong_alias(ustore_long,ustore_64);
    794 __strong_alias(ustore_ptr,ustore_64);
    795 #else
    796 __strong_alias(ustore_long,ustore_32);
    797 __strong_alias(ustore_ptr,ustore_32);
    798 #endif /* _LP64 */
    799