Home | History | Annotate | Line # | Download | only in kern
subr_copy.c revision 1.14
      1  1.14        ad /*	$NetBSD: subr_copy.c,v 1.14 2020/05/23 23:42:43 ad Exp $	*/
      2   1.1     pooka 
      3   1.1     pooka /*-
      4   1.9   thorpej  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
      5   1.9   thorpej  *	The NetBSD Foundation, Inc.
      6   1.1     pooka  * All rights reserved.
      7   1.1     pooka  *
      8   1.1     pooka  * This code is derived from software contributed to The NetBSD Foundation
      9   1.1     pooka  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10   1.1     pooka  * NASA Ames Research Center.
     11   1.1     pooka  *
     12   1.1     pooka  * Redistribution and use in source and binary forms, with or without
     13   1.1     pooka  * modification, are permitted provided that the following conditions
     14   1.1     pooka  * are met:
     15   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     16   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     17   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     19   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     20   1.1     pooka  *
     21   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22   1.1     pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23   1.1     pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24   1.1     pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25   1.1     pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26   1.1     pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27   1.1     pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28   1.1     pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29   1.1     pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30   1.1     pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31   1.1     pooka  * POSSIBILITY OF SUCH DAMAGE.
     32   1.1     pooka  */
     33   1.1     pooka 
     34   1.1     pooka /*
     35   1.1     pooka  * Copyright (c) 1982, 1986, 1991, 1993
     36   1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     37   1.1     pooka  * (c) UNIX System Laboratories, Inc.
     38   1.1     pooka  * All or some portions of this file are derived from material licensed
     39   1.1     pooka  * to the University of California by American Telephone and Telegraph
     40   1.1     pooka  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     41   1.1     pooka  * the permission of UNIX System Laboratories, Inc.
     42   1.1     pooka  *
     43   1.1     pooka  * Copyright (c) 1992, 1993
     44   1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     45   1.1     pooka  *
     46   1.1     pooka  * This software was developed by the Computer Systems Engineering group
     47   1.1     pooka  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     48   1.1     pooka  * contributed to Berkeley.
     49   1.1     pooka  *
     50   1.1     pooka  * All advertising materials mentioning features or use of this software
     51   1.1     pooka  * must display the following acknowledgement:
     52   1.1     pooka  *	This product includes software developed by the University of
     53   1.1     pooka  *	California, Lawrence Berkeley Laboratory.
     54   1.1     pooka  *
     55   1.1     pooka  * Redistribution and use in source and binary forms, with or without
     56   1.1     pooka  * modification, are permitted provided that the following conditions
     57   1.1     pooka  * are met:
     58   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     59   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     60   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     61   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     62   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     63   1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     64   1.1     pooka  *    may be used to endorse or promote products derived from this software
     65   1.1     pooka  *    without specific prior written permission.
     66   1.1     pooka  *
     67   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     68   1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     69   1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     70   1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     71   1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     72   1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     73   1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     74   1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     75   1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     76   1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     77   1.1     pooka  * SUCH DAMAGE.
     78   1.1     pooka  *
     79   1.1     pooka  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
     80   1.1     pooka  */
     81   1.1     pooka 
     82   1.1     pooka #include <sys/cdefs.h>
     83  1.14        ad __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.14 2020/05/23 23:42:43 ad Exp $");
     84   1.9   thorpej 
     85   1.9   thorpej #define	__UFETCHSTORE_PRIVATE
     86   1.9   thorpej #define	__UCAS_PRIVATE
     87   1.1     pooka 
     88   1.1     pooka #include <sys/param.h>
     89   1.1     pooka #include <sys/fcntl.h>
     90   1.1     pooka #include <sys/proc.h>
     91   1.1     pooka #include <sys/systm.h>
     92   1.1     pooka 
     93   1.1     pooka #include <uvm/uvm_extern.h>
     94   1.1     pooka 
     95   1.1     pooka void
     96   1.1     pooka uio_setup_sysspace(struct uio *uio)
     97   1.1     pooka {
     98   1.1     pooka 
     99   1.1     pooka 	uio->uio_vmspace = vmspace_kernel();
    100   1.1     pooka }
    101   1.1     pooka 
    102   1.1     pooka int
    103   1.1     pooka uiomove(void *buf, size_t n, struct uio *uio)
    104   1.1     pooka {
    105   1.1     pooka 	struct vmspace *vm = uio->uio_vmspace;
    106   1.1     pooka 	struct iovec *iov;
    107   1.1     pooka 	size_t cnt;
    108   1.1     pooka 	int error = 0;
    109   1.1     pooka 	char *cp = buf;
    110   1.1     pooka 
    111   1.1     pooka 	ASSERT_SLEEPABLE();
    112   1.1     pooka 
    113   1.6  riastrad 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    114   1.1     pooka 	while (n > 0 && uio->uio_resid) {
    115   1.1     pooka 		iov = uio->uio_iov;
    116   1.1     pooka 		cnt = iov->iov_len;
    117   1.1     pooka 		if (cnt == 0) {
    118   1.1     pooka 			KASSERT(uio->uio_iovcnt > 0);
    119   1.1     pooka 			uio->uio_iov++;
    120   1.1     pooka 			uio->uio_iovcnt--;
    121   1.1     pooka 			continue;
    122   1.1     pooka 		}
    123   1.1     pooka 		if (cnt > n)
    124   1.1     pooka 			cnt = n;
    125   1.1     pooka 		if (!VMSPACE_IS_KERNEL_P(vm)) {
    126  1.13        ad 			preempt_point();
    127   1.1     pooka 		}
    128   1.1     pooka 
    129   1.1     pooka 		if (uio->uio_rw == UIO_READ) {
    130   1.1     pooka 			error = copyout_vmspace(vm, cp, iov->iov_base,
    131   1.1     pooka 			    cnt);
    132   1.1     pooka 		} else {
    133   1.1     pooka 			error = copyin_vmspace(vm, iov->iov_base, cp,
    134   1.1     pooka 			    cnt);
    135   1.1     pooka 		}
    136   1.1     pooka 		if (error) {
    137   1.1     pooka 			break;
    138   1.1     pooka 		}
    139   1.1     pooka 		iov->iov_base = (char *)iov->iov_base + cnt;
    140   1.1     pooka 		iov->iov_len -= cnt;
    141   1.1     pooka 		uio->uio_resid -= cnt;
    142   1.1     pooka 		uio->uio_offset += cnt;
    143   1.1     pooka 		cp += cnt;
    144   1.1     pooka 		KDASSERT(cnt <= n);
    145   1.1     pooka 		n -= cnt;
    146   1.1     pooka 	}
    147   1.1     pooka 
    148   1.1     pooka 	return (error);
    149   1.1     pooka }
    150   1.1     pooka 
    151   1.1     pooka /*
    152   1.1     pooka  * Wrapper for uiomove() that validates the arguments against a known-good
    153   1.1     pooka  * kernel buffer.
    154   1.1     pooka  */
    155   1.1     pooka int
    156   1.1     pooka uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
    157   1.1     pooka {
    158   1.1     pooka 	size_t offset;
    159   1.1     pooka 
    160   1.1     pooka 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
    161   1.1     pooka 	    (offset = uio->uio_offset) != uio->uio_offset)
    162   1.1     pooka 		return (EINVAL);
    163   1.1     pooka 	if (offset >= buflen)
    164   1.1     pooka 		return (0);
    165   1.1     pooka 	return (uiomove((char *)buf + offset, buflen - offset, uio));
    166   1.1     pooka }
    167   1.1     pooka 
    168   1.1     pooka /*
    169   1.1     pooka  * Give next character to user as result of read.
    170   1.1     pooka  */
    171   1.1     pooka int
    172   1.1     pooka ureadc(int c, struct uio *uio)
    173   1.1     pooka {
    174   1.1     pooka 	struct iovec *iov;
    175   1.1     pooka 
    176   1.1     pooka 	if (uio->uio_resid <= 0)
    177   1.1     pooka 		panic("ureadc: non-positive resid");
    178   1.1     pooka again:
    179   1.1     pooka 	if (uio->uio_iovcnt <= 0)
    180   1.1     pooka 		panic("ureadc: non-positive iovcnt");
    181   1.1     pooka 	iov = uio->uio_iov;
    182   1.1     pooka 	if (iov->iov_len <= 0) {
    183   1.1     pooka 		uio->uio_iovcnt--;
    184   1.1     pooka 		uio->uio_iov++;
    185   1.1     pooka 		goto again;
    186   1.1     pooka 	}
    187   1.1     pooka 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
    188   1.9   thorpej 		int error;
    189   1.9   thorpej 		if ((error = ustore_char(iov->iov_base, c)) != 0)
    190   1.9   thorpej 			return (error);
    191   1.1     pooka 	} else {
    192   1.1     pooka 		*(char *)iov->iov_base = c;
    193   1.1     pooka 	}
    194   1.1     pooka 	iov->iov_base = (char *)iov->iov_base + 1;
    195   1.1     pooka 	iov->iov_len--;
    196   1.1     pooka 	uio->uio_resid--;
    197   1.1     pooka 	uio->uio_offset++;
    198   1.1     pooka 	return (0);
    199   1.1     pooka }
    200   1.1     pooka 
    201   1.1     pooka /*
    202   1.1     pooka  * Like copyin(), but operates on an arbitrary vmspace.
    203   1.1     pooka  */
    204   1.1     pooka int
    205   1.1     pooka copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
    206   1.1     pooka {
    207   1.1     pooka 	struct iovec iov;
    208   1.1     pooka 	struct uio uio;
    209   1.1     pooka 	int error;
    210   1.1     pooka 
    211   1.1     pooka 	if (len == 0)
    212   1.1     pooka 		return (0);
    213   1.1     pooka 
    214   1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    215   1.3  riastrad 		return kcopy(uaddr, kaddr, len);
    216   1.3  riastrad 	}
    217   1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    218   1.2  riastrad 		return copyin(uaddr, kaddr, len);
    219   1.2  riastrad 	}
    220   1.1     pooka 
    221   1.1     pooka 	iov.iov_base = kaddr;
    222   1.1     pooka 	iov.iov_len = len;
    223   1.1     pooka 	uio.uio_iov = &iov;
    224   1.1     pooka 	uio.uio_iovcnt = 1;
    225   1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    226   1.1     pooka 	uio.uio_resid = len;
    227   1.1     pooka 	uio.uio_rw = UIO_READ;
    228   1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    229   1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    230   1.1     pooka 
    231   1.1     pooka 	return (error);
    232   1.1     pooka }
    233   1.1     pooka 
    234   1.1     pooka /*
    235   1.1     pooka  * Like copyout(), but operates on an arbitrary vmspace.
    236   1.1     pooka  */
    237   1.1     pooka int
    238   1.1     pooka copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
    239   1.1     pooka {
    240   1.1     pooka 	struct iovec iov;
    241   1.1     pooka 	struct uio uio;
    242   1.1     pooka 	int error;
    243   1.1     pooka 
    244   1.1     pooka 	if (len == 0)
    245   1.1     pooka 		return (0);
    246   1.1     pooka 
    247   1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    248   1.3  riastrad 		return kcopy(kaddr, uaddr, len);
    249   1.3  riastrad 	}
    250   1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    251   1.2  riastrad 		return copyout(kaddr, uaddr, len);
    252   1.2  riastrad 	}
    253   1.1     pooka 
    254   1.1     pooka 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
    255   1.1     pooka 	iov.iov_len = len;
    256   1.1     pooka 	uio.uio_iov = &iov;
    257   1.1     pooka 	uio.uio_iovcnt = 1;
    258   1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    259   1.1     pooka 	uio.uio_resid = len;
    260   1.1     pooka 	uio.uio_rw = UIO_WRITE;
    261   1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    262   1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    263   1.1     pooka 
    264   1.1     pooka 	return (error);
    265   1.1     pooka }
    266   1.1     pooka 
    267   1.1     pooka /*
    268   1.1     pooka  * Like copyin(), but operates on an arbitrary process.
    269   1.1     pooka  */
    270   1.1     pooka int
    271   1.1     pooka copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
    272   1.1     pooka {
    273   1.1     pooka 	struct vmspace *vm;
    274   1.1     pooka 	int error;
    275   1.1     pooka 
    276   1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    277   1.1     pooka 	if (error) {
    278   1.1     pooka 		return error;
    279   1.1     pooka 	}
    280   1.1     pooka 	error = copyin_vmspace(vm, uaddr, kaddr, len);
    281   1.1     pooka 	uvmspace_free(vm);
    282   1.1     pooka 
    283   1.1     pooka 	return error;
    284   1.1     pooka }
    285   1.1     pooka 
    286   1.1     pooka /*
    287   1.1     pooka  * Like copyout(), but operates on an arbitrary process.
    288   1.1     pooka  */
    289   1.1     pooka int
    290   1.1     pooka copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
    291   1.1     pooka {
    292   1.1     pooka 	struct vmspace *vm;
    293   1.1     pooka 	int error;
    294   1.1     pooka 
    295   1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    296   1.1     pooka 	if (error) {
    297   1.1     pooka 		return error;
    298   1.1     pooka 	}
    299   1.1     pooka 	error = copyout_vmspace(vm, kaddr, uaddr, len);
    300   1.1     pooka 	uvmspace_free(vm);
    301   1.1     pooka 
    302   1.1     pooka 	return error;
    303   1.1     pooka }
    304   1.1     pooka 
    305   1.1     pooka /*
    306   1.8       chs  * Like copyin(), but operates on an arbitrary pid.
    307   1.8       chs  */
    308   1.8       chs int
    309   1.8       chs copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
    310   1.8       chs {
    311   1.8       chs 	struct proc *p;
    312   1.8       chs 	struct vmspace *vm;
    313   1.8       chs 	int error;
    314   1.8       chs 
    315  1.14        ad 	mutex_enter(&proc_lock);
    316   1.8       chs 	p = proc_find(pid);
    317   1.8       chs 	if (p == NULL) {
    318  1.14        ad 		mutex_exit(&proc_lock);
    319   1.8       chs 		return ESRCH;
    320   1.8       chs 	}
    321   1.8       chs 	mutex_enter(p->p_lock);
    322  1.12       chs 	error = proc_vmspace_getref(p, &vm);
    323   1.8       chs 	mutex_exit(p->p_lock);
    324  1.14        ad 	mutex_exit(&proc_lock);
    325   1.8       chs 
    326  1.12       chs 	if (error == 0) {
    327  1.12       chs 		error = copyin_vmspace(vm, uaddr, kaddr, len);
    328  1.12       chs 		uvmspace_free(vm);
    329  1.12       chs 	}
    330   1.8       chs 	return error;
    331   1.8       chs }
    332   1.8       chs 
    333   1.8       chs /*
    334   1.1     pooka  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
    335   1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    336   1.1     pooka  */
    337   1.1     pooka int
    338   1.1     pooka ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
    339   1.1     pooka {
    340   1.1     pooka 	if (ioctlflags & FKIOCTL)
    341   1.1     pooka 		return kcopy(src, dst, len);
    342   1.1     pooka 	return copyin(src, dst, len);
    343   1.1     pooka }
    344   1.1     pooka 
    345   1.1     pooka /*
    346   1.1     pooka  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
    347   1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    348   1.1     pooka  */
    349   1.1     pooka int
    350   1.1     pooka ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
    351   1.1     pooka {
    352   1.1     pooka 	if (ioctlflags & FKIOCTL)
    353   1.1     pooka 		return kcopy(src, dst, len);
    354   1.1     pooka 	return copyout(src, dst, len);
    355   1.1     pooka }
    356   1.9   thorpej 
    357   1.9   thorpej /*
    358   1.9   thorpej  * User-space CAS / fetch / store
    359   1.9   thorpej  */
    360   1.9   thorpej 
    361   1.9   thorpej #ifdef __NO_STRICT_ALIGNMENT
    362   1.9   thorpej #define	CHECK_ALIGNMENT(x)	__nothing
    363   1.9   thorpej #else /* ! __NO_STRICT_ALIGNMENT */
    364   1.9   thorpej static bool
    365   1.9   thorpej ufetchstore_aligned(uintptr_t uaddr, size_t size)
    366   1.9   thorpej {
    367   1.9   thorpej 	return (uaddr & (size - 1)) == 0;
    368   1.9   thorpej }
    369   1.9   thorpej 
    370   1.9   thorpej #define	CHECK_ALIGNMENT()						\
    371   1.9   thorpej do {									\
    372   1.9   thorpej 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
    373   1.9   thorpej 		return EFAULT;						\
    374   1.9   thorpej } while (/*CONSTCOND*/0)
    375   1.9   thorpej #endif /* __NO_STRICT_ALIGNMENT */
    376   1.9   thorpej 
    377  1.10   thorpej /*
    378  1.10   thorpej  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
    379  1.10   thorpej  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
    380  1.10   thorpej  *
    381  1.10   thorpej  * In all other cases, we provide generic implementations that work on
    382  1.10   thorpej  * all platforms.
    383  1.10   thorpej  */
    384  1.10   thorpej 
    385  1.10   thorpej #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
    386   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    387   1.9   thorpej #include <sys/atomic.h>
    388   1.9   thorpej #include <sys/cpu.h>
    389   1.9   thorpej #include <sys/once.h>
    390   1.9   thorpej #include <sys/mutex.h>
    391   1.9   thorpej #include <sys/ipi.h>
    392   1.9   thorpej 
    393   1.9   thorpej static int ucas_critical_splcookie;
    394   1.9   thorpej static volatile u_int ucas_critical_pausing_cpus;
    395   1.9   thorpej static u_int ucas_critical_ipi;
    396   1.9   thorpej static ONCE_DECL(ucas_critical_init_once)
    397   1.9   thorpej 
    398   1.9   thorpej static void
    399   1.9   thorpej ucas_critical_cpu_gate(void *arg __unused)
    400   1.9   thorpej {
    401   1.9   thorpej 	int count = SPINLOCK_BACKOFF_MIN;
    402   1.9   thorpej 
    403   1.9   thorpej 	KASSERT(ucas_critical_pausing_cpus > 0);
    404   1.9   thorpej 	atomic_dec_uint(&ucas_critical_pausing_cpus);
    405   1.9   thorpej 	while (ucas_critical_pausing_cpus != (u_int)-1) {
    406   1.9   thorpej 		SPINLOCK_BACKOFF(count);
    407   1.9   thorpej 	}
    408   1.9   thorpej }
    409   1.9   thorpej 
    410   1.9   thorpej static int
    411   1.9   thorpej ucas_critical_init(void)
    412   1.9   thorpej {
    413   1.9   thorpej 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
    414   1.9   thorpej 	return 0;
    415   1.9   thorpej }
    416   1.9   thorpej 
    417   1.9   thorpej static void
    418   1.9   thorpej ucas_critical_wait(void)
    419   1.9   thorpej {
    420   1.9   thorpej 	int count = SPINLOCK_BACKOFF_MIN;
    421   1.9   thorpej 
    422   1.9   thorpej 	while (ucas_critical_pausing_cpus > 0) {
    423   1.9   thorpej 		SPINLOCK_BACKOFF(count);
    424   1.9   thorpej 	}
    425   1.9   thorpej }
    426   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    427   1.9   thorpej 
    428   1.9   thorpej static inline void
    429   1.9   thorpej ucas_critical_enter(lwp_t * const l)
    430   1.9   thorpej {
    431   1.9   thorpej 
    432   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    433   1.9   thorpej 	if (ncpu > 1) {
    434   1.9   thorpej 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
    435   1.9   thorpej 
    436   1.9   thorpej 		/*
    437   1.9   thorpej 		 * Acquire the mutex first, then go to splhigh() and
    438   1.9   thorpej 		 * broadcast the IPI to lock all of the other CPUs
    439   1.9   thorpej 		 * behind the gate.
    440   1.9   thorpej 		 *
    441   1.9   thorpej 		 * N.B. Going to splhigh() implicitly disables preemption,
    442   1.9   thorpej 		 * so there's no need to do it explicitly.
    443   1.9   thorpej 		 */
    444   1.9   thorpej 		mutex_enter(&cpu_lock);
    445   1.9   thorpej 		ucas_critical_splcookie = splhigh();
    446   1.9   thorpej 		ucas_critical_pausing_cpus = ncpu - 1;
    447   1.9   thorpej 		membar_enter();
    448   1.9   thorpej 
    449   1.9   thorpej 		ipi_trigger_broadcast(ucas_critical_ipi, true);
    450   1.9   thorpej 		ucas_critical_wait();
    451   1.9   thorpej 		return;
    452   1.9   thorpej 	}
    453   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    454   1.9   thorpej 
    455   1.9   thorpej 	KPREEMPT_DISABLE(l);
    456   1.9   thorpej }
    457   1.9   thorpej 
    458   1.9   thorpej static inline void
    459   1.9   thorpej ucas_critical_exit(lwp_t * const l)
    460   1.9   thorpej {
    461   1.9   thorpej 
    462   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    463   1.9   thorpej 	if (ncpu > 1) {
    464   1.9   thorpej 		membar_exit();
    465   1.9   thorpej 		ucas_critical_pausing_cpus = (u_int)-1;
    466   1.9   thorpej 		splx(ucas_critical_splcookie);
    467   1.9   thorpej 		mutex_exit(&cpu_lock);
    468   1.9   thorpej 		return;
    469   1.9   thorpej 	}
    470   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    471   1.9   thorpej 
    472   1.9   thorpej 	KPREEMPT_ENABLE(l);
    473   1.9   thorpej }
    474   1.9   thorpej 
    475   1.9   thorpej int
    476   1.9   thorpej _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    477   1.9   thorpej {
    478   1.9   thorpej 	lwp_t * const l = curlwp;
    479   1.9   thorpej 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
    480   1.9   thorpej 	int error;
    481   1.9   thorpej 
    482   1.9   thorpej 	/*
    483   1.9   thorpej 	 * Wire the user address down to avoid taking a page fault during
    484   1.9   thorpej 	 * the critical section.
    485   1.9   thorpej 	 */
    486   1.9   thorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    487   1.9   thorpej 			   VM_PROT_READ | VM_PROT_WRITE);
    488   1.9   thorpej 	if (error)
    489   1.9   thorpej 		return error;
    490   1.9   thorpej 
    491   1.9   thorpej 	ucas_critical_enter(l);
    492   1.9   thorpej 	error = _ufetch_32(uva, ret);
    493   1.9   thorpej 	if (error == 0 && *ret == old) {
    494   1.9   thorpej 		error = _ustore_32(uva, new);
    495   1.9   thorpej 	}
    496   1.9   thorpej 	ucas_critical_exit(l);
    497   1.9   thorpej 
    498   1.9   thorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    499   1.9   thorpej 
    500   1.9   thorpej 	return error;
    501   1.9   thorpej }
    502   1.9   thorpej 
    503   1.9   thorpej #ifdef _LP64
    504   1.9   thorpej int
    505   1.9   thorpej _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    506   1.9   thorpej {
    507   1.9   thorpej 	lwp_t * const l = curlwp;
    508   1.9   thorpej 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
    509   1.9   thorpej 	int error;
    510   1.9   thorpej 
    511   1.9   thorpej 	/*
    512   1.9   thorpej 	 * Wire the user address down to avoid taking a page fault during
    513   1.9   thorpej 	 * the critical section.
    514   1.9   thorpej 	 */
    515   1.9   thorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    516   1.9   thorpej 			   VM_PROT_READ | VM_PROT_WRITE);
    517   1.9   thorpej 	if (error)
    518   1.9   thorpej 		return error;
    519   1.9   thorpej 
    520   1.9   thorpej 	ucas_critical_enter(l);
    521   1.9   thorpej 	error = _ufetch_64(uva, ret);
    522   1.9   thorpej 	if (error == 0 && *ret == old) {
    523   1.9   thorpej 		error = _ustore_64(uva, new);
    524   1.9   thorpej 	}
    525   1.9   thorpej 	ucas_critical_exit(l);
    526   1.9   thorpej 
    527   1.9   thorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    528   1.9   thorpej 
    529   1.9   thorpej 	return error;
    530   1.9   thorpej }
    531   1.9   thorpej #endif /* _LP64 */
    532  1.10   thorpej #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
    533   1.9   thorpej 
    534   1.9   thorpej int
    535   1.9   thorpej ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    536   1.9   thorpej {
    537   1.9   thorpej 
    538   1.9   thorpej 	ASSERT_SLEEPABLE();
    539   1.9   thorpej 	CHECK_ALIGNMENT();
    540  1.11   thorpej #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    541  1.11   thorpej     !defined(_RUMPKERNEL)
    542   1.9   thorpej 	if (ncpu > 1) {
    543   1.9   thorpej 		return _ucas_32_mp(uaddr, old, new, ret);
    544   1.9   thorpej 	}
    545   1.9   thorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    546   1.9   thorpej 	return _ucas_32(uaddr, old, new, ret);
    547   1.9   thorpej }
    548   1.9   thorpej 
    549   1.9   thorpej #ifdef _LP64
    550   1.9   thorpej int
    551   1.9   thorpej ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    552   1.9   thorpej {
    553   1.9   thorpej 
    554   1.9   thorpej 	ASSERT_SLEEPABLE();
    555   1.9   thorpej 	CHECK_ALIGNMENT();
    556  1.11   thorpej #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    557  1.11   thorpej     !defined(_RUMPKERNEL)
    558   1.9   thorpej 	if (ncpu > 1) {
    559   1.9   thorpej 		return _ucas_64_mp(uaddr, old, new, ret);
    560   1.9   thorpej 	}
    561   1.9   thorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    562   1.9   thorpej 	return _ucas_64(uaddr, old, new, ret);
    563   1.9   thorpej }
    564   1.9   thorpej #endif /* _LP64 */
    565   1.9   thorpej 
    566   1.9   thorpej __strong_alias(ucas_int,ucas_32);
    567   1.9   thorpej #ifdef _LP64
    568   1.9   thorpej __strong_alias(ucas_ptr,ucas_64);
    569   1.9   thorpej #else
    570   1.9   thorpej __strong_alias(ucas_ptr,ucas_32);
    571   1.9   thorpej #endif /* _LP64 */
    572   1.9   thorpej 
    573   1.9   thorpej int
    574   1.9   thorpej ufetch_8(const uint8_t *uaddr, uint8_t *valp)
    575   1.9   thorpej {
    576   1.9   thorpej 
    577   1.9   thorpej 	ASSERT_SLEEPABLE();
    578   1.9   thorpej 	CHECK_ALIGNMENT();
    579   1.9   thorpej 	return _ufetch_8(uaddr, valp);
    580   1.9   thorpej }
    581   1.9   thorpej 
    582   1.9   thorpej int
    583   1.9   thorpej ufetch_16(const uint16_t *uaddr, uint16_t *valp)
    584   1.9   thorpej {
    585   1.9   thorpej 
    586   1.9   thorpej 	ASSERT_SLEEPABLE();
    587   1.9   thorpej 	CHECK_ALIGNMENT();
    588   1.9   thorpej 	return _ufetch_16(uaddr, valp);
    589   1.9   thorpej }
    590   1.9   thorpej 
    591   1.9   thorpej int
    592   1.9   thorpej ufetch_32(const uint32_t *uaddr, uint32_t *valp)
    593   1.9   thorpej {
    594   1.9   thorpej 
    595   1.9   thorpej 	ASSERT_SLEEPABLE();
    596   1.9   thorpej 	CHECK_ALIGNMENT();
    597   1.9   thorpej 	return _ufetch_32(uaddr, valp);
    598   1.9   thorpej }
    599   1.9   thorpej 
    600   1.9   thorpej #ifdef _LP64
    601   1.9   thorpej int
    602   1.9   thorpej ufetch_64(const uint64_t *uaddr, uint64_t *valp)
    603   1.9   thorpej {
    604   1.9   thorpej 
    605   1.9   thorpej 	ASSERT_SLEEPABLE();
    606   1.9   thorpej 	CHECK_ALIGNMENT();
    607   1.9   thorpej 	return _ufetch_64(uaddr, valp);
    608   1.9   thorpej }
    609   1.9   thorpej #endif /* _LP64 */
    610   1.9   thorpej 
    611   1.9   thorpej __strong_alias(ufetch_char,ufetch_8);
    612   1.9   thorpej __strong_alias(ufetch_short,ufetch_16);
    613   1.9   thorpej __strong_alias(ufetch_int,ufetch_32);
    614   1.9   thorpej #ifdef _LP64
    615   1.9   thorpej __strong_alias(ufetch_long,ufetch_64);
    616   1.9   thorpej __strong_alias(ufetch_ptr,ufetch_64);
    617   1.9   thorpej #else
    618   1.9   thorpej __strong_alias(ufetch_long,ufetch_32);
    619   1.9   thorpej __strong_alias(ufetch_ptr,ufetch_32);
    620   1.9   thorpej #endif /* _LP64 */
    621   1.9   thorpej 
    622   1.9   thorpej int
    623   1.9   thorpej ustore_8(uint8_t *uaddr, uint8_t val)
    624   1.9   thorpej {
    625   1.9   thorpej 
    626   1.9   thorpej 	ASSERT_SLEEPABLE();
    627   1.9   thorpej 	CHECK_ALIGNMENT();
    628   1.9   thorpej 	return _ustore_8(uaddr, val);
    629   1.9   thorpej }
    630   1.9   thorpej 
    631   1.9   thorpej int
    632   1.9   thorpej ustore_16(uint16_t *uaddr, uint16_t val)
    633   1.9   thorpej {
    634   1.9   thorpej 
    635   1.9   thorpej 	ASSERT_SLEEPABLE();
    636   1.9   thorpej 	CHECK_ALIGNMENT();
    637   1.9   thorpej 	return _ustore_16(uaddr, val);
    638   1.9   thorpej }
    639   1.9   thorpej 
    640   1.9   thorpej int
    641   1.9   thorpej ustore_32(uint32_t *uaddr, uint32_t val)
    642   1.9   thorpej {
    643   1.9   thorpej 
    644   1.9   thorpej 	ASSERT_SLEEPABLE();
    645   1.9   thorpej 	CHECK_ALIGNMENT();
    646   1.9   thorpej 	return _ustore_32(uaddr, val);
    647   1.9   thorpej }
    648   1.9   thorpej 
    649   1.9   thorpej #ifdef _LP64
    650   1.9   thorpej int
    651   1.9   thorpej ustore_64(uint64_t *uaddr, uint64_t val)
    652   1.9   thorpej {
    653   1.9   thorpej 
    654   1.9   thorpej 	ASSERT_SLEEPABLE();
    655   1.9   thorpej 	CHECK_ALIGNMENT();
    656   1.9   thorpej 	return _ustore_64(uaddr, val);
    657   1.9   thorpej }
    658   1.9   thorpej #endif /* _LP64 */
    659   1.9   thorpej 
    660   1.9   thorpej __strong_alias(ustore_char,ustore_8);
    661   1.9   thorpej __strong_alias(ustore_short,ustore_16);
    662   1.9   thorpej __strong_alias(ustore_int,ustore_32);
    663   1.9   thorpej #ifdef _LP64
    664   1.9   thorpej __strong_alias(ustore_long,ustore_64);
    665   1.9   thorpej __strong_alias(ustore_ptr,ustore_64);
    666   1.9   thorpej #else
    667   1.9   thorpej __strong_alias(ustore_long,ustore_32);
    668   1.9   thorpej __strong_alias(ustore_ptr,ustore_32);
    669   1.9   thorpej #endif /* _LP64 */
    670