Home | History | Annotate | Line # | Download | only in kern
subr_copy.c revision 1.10
      1  1.10   thorpej /*	$NetBSD: subr_copy.c,v 1.10 2019/04/06 15:52:35 thorpej Exp $	*/
      2   1.1     pooka 
      3   1.1     pooka /*-
      4   1.9   thorpej  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
      5   1.9   thorpej  *	The NetBSD Foundation, Inc.
      6   1.1     pooka  * All rights reserved.
      7   1.1     pooka  *
      8   1.1     pooka  * This code is derived from software contributed to The NetBSD Foundation
      9   1.1     pooka  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10   1.1     pooka  * NASA Ames Research Center.
     11   1.1     pooka  *
     12   1.1     pooka  * Redistribution and use in source and binary forms, with or without
     13   1.1     pooka  * modification, are permitted provided that the following conditions
     14   1.1     pooka  * are met:
     15   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     16   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     17   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     19   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     20   1.1     pooka  *
     21   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22   1.1     pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23   1.1     pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24   1.1     pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25   1.1     pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26   1.1     pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27   1.1     pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28   1.1     pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29   1.1     pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30   1.1     pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31   1.1     pooka  * POSSIBILITY OF SUCH DAMAGE.
     32   1.1     pooka  */
     33   1.1     pooka 
     34   1.1     pooka /*
     35   1.1     pooka  * Copyright (c) 1982, 1986, 1991, 1993
     36   1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     37   1.1     pooka  * (c) UNIX System Laboratories, Inc.
     38   1.1     pooka  * All or some portions of this file are derived from material licensed
     39   1.1     pooka  * to the University of California by American Telephone and Telegraph
     40   1.1     pooka  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     41   1.1     pooka  * the permission of UNIX System Laboratories, Inc.
     42   1.1     pooka  *
     43   1.1     pooka  * Copyright (c) 1992, 1993
     44   1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     45   1.1     pooka  *
     46   1.1     pooka  * This software was developed by the Computer Systems Engineering group
     47   1.1     pooka  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     48   1.1     pooka  * contributed to Berkeley.
     49   1.1     pooka  *
     50   1.1     pooka  * All advertising materials mentioning features or use of this software
     51   1.1     pooka  * must display the following acknowledgement:
     52   1.1     pooka  *	This product includes software developed by the University of
     53   1.1     pooka  *	California, Lawrence Berkeley Laboratory.
     54   1.1     pooka  *
     55   1.1     pooka  * Redistribution and use in source and binary forms, with or without
     56   1.1     pooka  * modification, are permitted provided that the following conditions
     57   1.1     pooka  * are met:
     58   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     59   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     60   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     61   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     62   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     63   1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     64   1.1     pooka  *    may be used to endorse or promote products derived from this software
     65   1.1     pooka  *    without specific prior written permission.
     66   1.1     pooka  *
     67   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     68   1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     69   1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     70   1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     71   1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     72   1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     73   1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     74   1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     75   1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     76   1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     77   1.1     pooka  * SUCH DAMAGE.
     78   1.1     pooka  *
     79   1.1     pooka  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
     80   1.1     pooka  */
     81   1.1     pooka 
     82   1.1     pooka #include <sys/cdefs.h>
     83  1.10   thorpej __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.10 2019/04/06 15:52:35 thorpej Exp $");
     84   1.9   thorpej 
     85   1.9   thorpej #define	__UFETCHSTORE_PRIVATE
     86   1.9   thorpej #define	__UCAS_PRIVATE
     87   1.1     pooka 
     88   1.1     pooka #include <sys/param.h>
     89   1.1     pooka #include <sys/fcntl.h>
     90   1.1     pooka #include <sys/proc.h>
     91   1.1     pooka #include <sys/systm.h>
     92   1.1     pooka 
     93   1.1     pooka #include <uvm/uvm_extern.h>
     94   1.1     pooka 
     95   1.1     pooka void
     96   1.1     pooka uio_setup_sysspace(struct uio *uio)
     97   1.1     pooka {
     98   1.1     pooka 
     99   1.1     pooka 	uio->uio_vmspace = vmspace_kernel();
    100   1.1     pooka }
    101   1.1     pooka 
    102   1.1     pooka int
    103   1.1     pooka uiomove(void *buf, size_t n, struct uio *uio)
    104   1.1     pooka {
    105   1.1     pooka 	struct vmspace *vm = uio->uio_vmspace;
    106   1.1     pooka 	struct iovec *iov;
    107   1.1     pooka 	size_t cnt;
    108   1.1     pooka 	int error = 0;
    109   1.1     pooka 	char *cp = buf;
    110   1.1     pooka 
    111   1.1     pooka 	ASSERT_SLEEPABLE();
    112   1.1     pooka 
    113   1.6  riastrad 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    114   1.1     pooka 	while (n > 0 && uio->uio_resid) {
    115   1.1     pooka 		iov = uio->uio_iov;
    116   1.1     pooka 		cnt = iov->iov_len;
    117   1.1     pooka 		if (cnt == 0) {
    118   1.1     pooka 			KASSERT(uio->uio_iovcnt > 0);
    119   1.1     pooka 			uio->uio_iov++;
    120   1.1     pooka 			uio->uio_iovcnt--;
    121   1.1     pooka 			continue;
    122   1.1     pooka 		}
    123   1.1     pooka 		if (cnt > n)
    124   1.1     pooka 			cnt = n;
    125   1.1     pooka 		if (!VMSPACE_IS_KERNEL_P(vm)) {
    126   1.1     pooka 			if (curcpu()->ci_schedstate.spc_flags &
    127   1.1     pooka 			    SPCF_SHOULDYIELD)
    128   1.1     pooka 				preempt();
    129   1.1     pooka 		}
    130   1.1     pooka 
    131   1.1     pooka 		if (uio->uio_rw == UIO_READ) {
    132   1.1     pooka 			error = copyout_vmspace(vm, cp, iov->iov_base,
    133   1.1     pooka 			    cnt);
    134   1.1     pooka 		} else {
    135   1.1     pooka 			error = copyin_vmspace(vm, iov->iov_base, cp,
    136   1.1     pooka 			    cnt);
    137   1.1     pooka 		}
    138   1.1     pooka 		if (error) {
    139   1.1     pooka 			break;
    140   1.1     pooka 		}
    141   1.1     pooka 		iov->iov_base = (char *)iov->iov_base + cnt;
    142   1.1     pooka 		iov->iov_len -= cnt;
    143   1.1     pooka 		uio->uio_resid -= cnt;
    144   1.1     pooka 		uio->uio_offset += cnt;
    145   1.1     pooka 		cp += cnt;
    146   1.1     pooka 		KDASSERT(cnt <= n);
    147   1.1     pooka 		n -= cnt;
    148   1.1     pooka 	}
    149   1.1     pooka 
    150   1.1     pooka 	return (error);
    151   1.1     pooka }
    152   1.1     pooka 
    153   1.1     pooka /*
    154   1.1     pooka  * Wrapper for uiomove() that validates the arguments against a known-good
    155   1.1     pooka  * kernel buffer.
    156   1.1     pooka  */
    157   1.1     pooka int
    158   1.1     pooka uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
    159   1.1     pooka {
    160   1.1     pooka 	size_t offset;
    161   1.1     pooka 
    162   1.1     pooka 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
    163   1.1     pooka 	    (offset = uio->uio_offset) != uio->uio_offset)
    164   1.1     pooka 		return (EINVAL);
    165   1.1     pooka 	if (offset >= buflen)
    166   1.1     pooka 		return (0);
    167   1.1     pooka 	return (uiomove((char *)buf + offset, buflen - offset, uio));
    168   1.1     pooka }
    169   1.1     pooka 
    170   1.1     pooka /*
    171   1.1     pooka  * Give next character to user as result of read.
    172   1.1     pooka  */
    173   1.1     pooka int
    174   1.1     pooka ureadc(int c, struct uio *uio)
    175   1.1     pooka {
    176   1.1     pooka 	struct iovec *iov;
    177   1.1     pooka 
    178   1.1     pooka 	if (uio->uio_resid <= 0)
    179   1.1     pooka 		panic("ureadc: non-positive resid");
    180   1.1     pooka again:
    181   1.1     pooka 	if (uio->uio_iovcnt <= 0)
    182   1.1     pooka 		panic("ureadc: non-positive iovcnt");
    183   1.1     pooka 	iov = uio->uio_iov;
    184   1.1     pooka 	if (iov->iov_len <= 0) {
    185   1.1     pooka 		uio->uio_iovcnt--;
    186   1.1     pooka 		uio->uio_iov++;
    187   1.1     pooka 		goto again;
    188   1.1     pooka 	}
    189   1.1     pooka 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
    190   1.9   thorpej 		int error;
    191   1.9   thorpej 		if ((error = ustore_char(iov->iov_base, c)) != 0)
    192   1.9   thorpej 			return (error);
    193   1.1     pooka 	} else {
    194   1.1     pooka 		*(char *)iov->iov_base = c;
    195   1.1     pooka 	}
    196   1.1     pooka 	iov->iov_base = (char *)iov->iov_base + 1;
    197   1.1     pooka 	iov->iov_len--;
    198   1.1     pooka 	uio->uio_resid--;
    199   1.1     pooka 	uio->uio_offset++;
    200   1.1     pooka 	return (0);
    201   1.1     pooka }
    202   1.1     pooka 
    203   1.1     pooka /*
    204   1.1     pooka  * Like copyin(), but operates on an arbitrary vmspace.
    205   1.1     pooka  */
    206   1.1     pooka int
    207   1.1     pooka copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
    208   1.1     pooka {
    209   1.1     pooka 	struct iovec iov;
    210   1.1     pooka 	struct uio uio;
    211   1.1     pooka 	int error;
    212   1.1     pooka 
    213   1.1     pooka 	if (len == 0)
    214   1.1     pooka 		return (0);
    215   1.1     pooka 
    216   1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    217   1.3  riastrad 		return kcopy(uaddr, kaddr, len);
    218   1.3  riastrad 	}
    219   1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    220   1.2  riastrad 		return copyin(uaddr, kaddr, len);
    221   1.2  riastrad 	}
    222   1.1     pooka 
    223   1.1     pooka 	iov.iov_base = kaddr;
    224   1.1     pooka 	iov.iov_len = len;
    225   1.1     pooka 	uio.uio_iov = &iov;
    226   1.1     pooka 	uio.uio_iovcnt = 1;
    227   1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    228   1.1     pooka 	uio.uio_resid = len;
    229   1.1     pooka 	uio.uio_rw = UIO_READ;
    230   1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    231   1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    232   1.1     pooka 
    233   1.1     pooka 	return (error);
    234   1.1     pooka }
    235   1.1     pooka 
    236   1.1     pooka /*
    237   1.1     pooka  * Like copyout(), but operates on an arbitrary vmspace.
    238   1.1     pooka  */
    239   1.1     pooka int
    240   1.1     pooka copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
    241   1.1     pooka {
    242   1.1     pooka 	struct iovec iov;
    243   1.1     pooka 	struct uio uio;
    244   1.1     pooka 	int error;
    245   1.1     pooka 
    246   1.1     pooka 	if (len == 0)
    247   1.1     pooka 		return (0);
    248   1.1     pooka 
    249   1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    250   1.3  riastrad 		return kcopy(kaddr, uaddr, len);
    251   1.3  riastrad 	}
    252   1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    253   1.2  riastrad 		return copyout(kaddr, uaddr, len);
    254   1.2  riastrad 	}
    255   1.1     pooka 
    256   1.1     pooka 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
    257   1.1     pooka 	iov.iov_len = len;
    258   1.1     pooka 	uio.uio_iov = &iov;
    259   1.1     pooka 	uio.uio_iovcnt = 1;
    260   1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    261   1.1     pooka 	uio.uio_resid = len;
    262   1.1     pooka 	uio.uio_rw = UIO_WRITE;
    263   1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    264   1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    265   1.1     pooka 
    266   1.1     pooka 	return (error);
    267   1.1     pooka }
    268   1.1     pooka 
    269   1.1     pooka /*
    270   1.1     pooka  * Like copyin(), but operates on an arbitrary process.
    271   1.1     pooka  */
    272   1.1     pooka int
    273   1.1     pooka copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
    274   1.1     pooka {
    275   1.1     pooka 	struct vmspace *vm;
    276   1.1     pooka 	int error;
    277   1.1     pooka 
    278   1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    279   1.1     pooka 	if (error) {
    280   1.1     pooka 		return error;
    281   1.1     pooka 	}
    282   1.1     pooka 	error = copyin_vmspace(vm, uaddr, kaddr, len);
    283   1.1     pooka 	uvmspace_free(vm);
    284   1.1     pooka 
    285   1.1     pooka 	return error;
    286   1.1     pooka }
    287   1.1     pooka 
    288   1.1     pooka /*
    289   1.1     pooka  * Like copyout(), but operates on an arbitrary process.
    290   1.1     pooka  */
    291   1.1     pooka int
    292   1.1     pooka copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
    293   1.1     pooka {
    294   1.1     pooka 	struct vmspace *vm;
    295   1.1     pooka 	int error;
    296   1.1     pooka 
    297   1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    298   1.1     pooka 	if (error) {
    299   1.1     pooka 		return error;
    300   1.1     pooka 	}
    301   1.1     pooka 	error = copyout_vmspace(vm, kaddr, uaddr, len);
    302   1.1     pooka 	uvmspace_free(vm);
    303   1.1     pooka 
    304   1.1     pooka 	return error;
    305   1.1     pooka }
    306   1.1     pooka 
    307   1.1     pooka /*
    308   1.8       chs  * Like copyin(), but operates on an arbitrary pid.
    309   1.8       chs  */
    310   1.8       chs int
    311   1.8       chs copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
    312   1.8       chs {
    313   1.8       chs 	struct proc *p;
    314   1.8       chs 	struct vmspace *vm;
    315   1.8       chs 	int error;
    316   1.8       chs 
    317   1.8       chs 	mutex_enter(proc_lock);
    318   1.8       chs 	p = proc_find(pid);
    319   1.8       chs 	if (p == NULL) {
    320   1.8       chs 		mutex_exit(proc_lock);
    321   1.8       chs 		return ESRCH;
    322   1.8       chs 	}
    323   1.8       chs 	mutex_enter(p->p_lock);
    324   1.8       chs 	proc_vmspace_getref(p, &vm);
    325   1.8       chs 	mutex_exit(p->p_lock);
    326   1.8       chs 	mutex_exit(proc_lock);
    327   1.8       chs 
    328   1.8       chs 	error = copyin_vmspace(vm, uaddr, kaddr, len);
    329   1.8       chs 
    330   1.8       chs 	uvmspace_free(vm);
    331   1.8       chs 	return error;
    332   1.8       chs }
    333   1.8       chs 
    334   1.8       chs /*
    335   1.1     pooka  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
    336   1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    337   1.1     pooka  */
    338   1.1     pooka int
    339   1.1     pooka ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
    340   1.1     pooka {
    341   1.1     pooka 	if (ioctlflags & FKIOCTL)
    342   1.1     pooka 		return kcopy(src, dst, len);
    343   1.1     pooka 	return copyin(src, dst, len);
    344   1.1     pooka }
    345   1.1     pooka 
    346   1.1     pooka /*
    347   1.1     pooka  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
    348   1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    349   1.1     pooka  */
    350   1.1     pooka int
    351   1.1     pooka ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
    352   1.1     pooka {
    353   1.1     pooka 	if (ioctlflags & FKIOCTL)
    354   1.1     pooka 		return kcopy(src, dst, len);
    355   1.1     pooka 	return copyout(src, dst, len);
    356   1.1     pooka }
    357   1.9   thorpej 
    358   1.9   thorpej /*
    359   1.9   thorpej  * User-space CAS / fetch / store
    360   1.9   thorpej  */
    361   1.9   thorpej 
    362   1.9   thorpej #ifdef __NO_STRICT_ALIGNMENT
    363   1.9   thorpej #define	CHECK_ALIGNMENT(x)	__nothing
    364   1.9   thorpej #else /* ! __NO_STRICT_ALIGNMENT */
    365   1.9   thorpej static bool
    366   1.9   thorpej ufetchstore_aligned(uintptr_t uaddr, size_t size)
    367   1.9   thorpej {
    368   1.9   thorpej 	return (uaddr & (size - 1)) == 0;
    369   1.9   thorpej }
    370   1.9   thorpej 
    371   1.9   thorpej #define	CHECK_ALIGNMENT()						\
    372   1.9   thorpej do {									\
    373   1.9   thorpej 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
    374   1.9   thorpej 		return EFAULT;						\
    375   1.9   thorpej } while (/*CONSTCOND*/0)
    376   1.9   thorpej #endif /* __NO_STRICT_ALIGNMENT */
    377   1.9   thorpej 
    378  1.10   thorpej /*
    379  1.10   thorpej  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
    380  1.10   thorpej  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
    381  1.10   thorpej  *
    382  1.10   thorpej  * In all other cases, we provide generic implementations that work on
    383  1.10   thorpej  * all platforms.
    384  1.10   thorpej  */
    385  1.10   thorpej 
    386  1.10   thorpej #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
    387   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    388   1.9   thorpej #include <sys/atomic.h>
    389   1.9   thorpej #include <sys/cpu.h>
    390   1.9   thorpej #include <sys/once.h>
    391   1.9   thorpej #include <sys/mutex.h>
    392   1.9   thorpej #include <sys/ipi.h>
    393   1.9   thorpej 
    394   1.9   thorpej static int ucas_critical_splcookie;
    395   1.9   thorpej static volatile u_int ucas_critical_pausing_cpus;
    396   1.9   thorpej static u_int ucas_critical_ipi;
    397   1.9   thorpej static ONCE_DECL(ucas_critical_init_once)
    398   1.9   thorpej 
    399   1.9   thorpej static void
    400   1.9   thorpej ucas_critical_cpu_gate(void *arg __unused)
    401   1.9   thorpej {
    402   1.9   thorpej 	int count = SPINLOCK_BACKOFF_MIN;
    403   1.9   thorpej 
    404   1.9   thorpej 	KASSERT(ucas_critical_pausing_cpus > 0);
    405   1.9   thorpej 	atomic_dec_uint(&ucas_critical_pausing_cpus);
    406   1.9   thorpej 	while (ucas_critical_pausing_cpus != (u_int)-1) {
    407   1.9   thorpej 		SPINLOCK_BACKOFF(count);
    408   1.9   thorpej 	}
    409   1.9   thorpej }
    410   1.9   thorpej 
    411   1.9   thorpej static int
    412   1.9   thorpej ucas_critical_init(void)
    413   1.9   thorpej {
    414   1.9   thorpej 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
    415   1.9   thorpej 	return 0;
    416   1.9   thorpej }
    417   1.9   thorpej 
    418   1.9   thorpej static void
    419   1.9   thorpej ucas_critical_wait(void)
    420   1.9   thorpej {
    421   1.9   thorpej 	int count = SPINLOCK_BACKOFF_MIN;
    422   1.9   thorpej 
    423   1.9   thorpej 	while (ucas_critical_pausing_cpus > 0) {
    424   1.9   thorpej 		SPINLOCK_BACKOFF(count);
    425   1.9   thorpej 	}
    426   1.9   thorpej }
    427   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    428   1.9   thorpej 
    429   1.9   thorpej static inline void
    430   1.9   thorpej ucas_critical_enter(lwp_t * const l)
    431   1.9   thorpej {
    432   1.9   thorpej 
    433   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    434   1.9   thorpej 	if (ncpu > 1) {
    435   1.9   thorpej 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
    436   1.9   thorpej 
    437   1.9   thorpej 		/*
    438   1.9   thorpej 		 * Acquire the mutex first, then go to splhigh() and
    439   1.9   thorpej 		 * broadcast the IPI to lock all of the other CPUs
    440   1.9   thorpej 		 * behind the gate.
    441   1.9   thorpej 		 *
    442   1.9   thorpej 		 * N.B. Going to splhigh() implicitly disables preemption,
    443   1.9   thorpej 		 * so there's no need to do it explicitly.
    444   1.9   thorpej 		 */
    445   1.9   thorpej 		mutex_enter(&cpu_lock);
    446   1.9   thorpej 		ucas_critical_splcookie = splhigh();
    447   1.9   thorpej 		ucas_critical_pausing_cpus = ncpu - 1;
    448   1.9   thorpej 		membar_enter();
    449   1.9   thorpej 
    450   1.9   thorpej 		ipi_trigger_broadcast(ucas_critical_ipi, true);
    451   1.9   thorpej 		ucas_critical_wait();
    452   1.9   thorpej 		return;
    453   1.9   thorpej 	}
    454   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    455   1.9   thorpej 
    456   1.9   thorpej 	KPREEMPT_DISABLE(l);
    457   1.9   thorpej }
    458   1.9   thorpej 
    459   1.9   thorpej static inline void
    460   1.9   thorpej ucas_critical_exit(lwp_t * const l)
    461   1.9   thorpej {
    462   1.9   thorpej 
    463   1.9   thorpej #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    464   1.9   thorpej 	if (ncpu > 1) {
    465   1.9   thorpej 		membar_exit();
    466   1.9   thorpej 		ucas_critical_pausing_cpus = (u_int)-1;
    467   1.9   thorpej 		splx(ucas_critical_splcookie);
    468   1.9   thorpej 		mutex_exit(&cpu_lock);
    469   1.9   thorpej 		return;
    470   1.9   thorpej 	}
    471   1.9   thorpej #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    472   1.9   thorpej 
    473   1.9   thorpej 	KPREEMPT_ENABLE(l);
    474   1.9   thorpej }
    475   1.9   thorpej 
    476   1.9   thorpej int
    477   1.9   thorpej _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    478   1.9   thorpej {
    479   1.9   thorpej 	lwp_t * const l = curlwp;
    480   1.9   thorpej 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
    481   1.9   thorpej 	int error;
    482   1.9   thorpej 
    483   1.9   thorpej 	/*
    484   1.9   thorpej 	 * Wire the user address down to avoid taking a page fault during
    485   1.9   thorpej 	 * the critical section.
    486   1.9   thorpej 	 */
    487   1.9   thorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    488   1.9   thorpej 			   VM_PROT_READ | VM_PROT_WRITE);
    489   1.9   thorpej 	if (error)
    490   1.9   thorpej 		return error;
    491   1.9   thorpej 
    492   1.9   thorpej 	ucas_critical_enter(l);
    493   1.9   thorpej 	error = _ufetch_32(uva, ret);
    494   1.9   thorpej 	if (error == 0 && *ret == old) {
    495   1.9   thorpej 		error = _ustore_32(uva, new);
    496   1.9   thorpej 	}
    497   1.9   thorpej 	ucas_critical_exit(l);
    498   1.9   thorpej 
    499   1.9   thorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    500   1.9   thorpej 
    501   1.9   thorpej 	return error;
    502   1.9   thorpej }
    503   1.9   thorpej 
    504   1.9   thorpej #ifdef _LP64
    505   1.9   thorpej int
    506   1.9   thorpej _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    507   1.9   thorpej {
    508   1.9   thorpej 	lwp_t * const l = curlwp;
    509   1.9   thorpej 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
    510   1.9   thorpej 	int error;
    511   1.9   thorpej 
    512   1.9   thorpej 	/*
    513   1.9   thorpej 	 * Wire the user address down to avoid taking a page fault during
    514   1.9   thorpej 	 * the critical section.
    515   1.9   thorpej 	 */
    516   1.9   thorpej 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    517   1.9   thorpej 			   VM_PROT_READ | VM_PROT_WRITE);
    518   1.9   thorpej 	if (error)
    519   1.9   thorpej 		return error;
    520   1.9   thorpej 
    521   1.9   thorpej 	ucas_critical_enter(l);
    522   1.9   thorpej 	error = _ufetch_64(uva, ret);
    523   1.9   thorpej 	if (error == 0 && *ret == old) {
    524   1.9   thorpej 		error = _ustore_64(uva, new);
    525   1.9   thorpej 	}
    526   1.9   thorpej 	ucas_critical_exit(l);
    527   1.9   thorpej 
    528   1.9   thorpej 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    529   1.9   thorpej 
    530   1.9   thorpej 	return error;
    531   1.9   thorpej }
    532   1.9   thorpej #endif /* _LP64 */
    533  1.10   thorpej #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
    534   1.9   thorpej 
    535   1.9   thorpej int
    536   1.9   thorpej ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    537   1.9   thorpej {
    538   1.9   thorpej 
    539   1.9   thorpej 	ASSERT_SLEEPABLE();
    540   1.9   thorpej 	CHECK_ALIGNMENT();
    541   1.9   thorpej #if defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    542   1.9   thorpej 	if (ncpu > 1) {
    543   1.9   thorpej 		return _ucas_32_mp(uaddr, old, new, ret);
    544   1.9   thorpej 	}
    545   1.9   thorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    546   1.9   thorpej 	return _ucas_32(uaddr, old, new, ret);
    547   1.9   thorpej }
    548   1.9   thorpej 
    549   1.9   thorpej #ifdef _LP64
    550   1.9   thorpej int
    551   1.9   thorpej ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    552   1.9   thorpej {
    553   1.9   thorpej 
    554   1.9   thorpej 	ASSERT_SLEEPABLE();
    555   1.9   thorpej 	CHECK_ALIGNMENT();
    556   1.9   thorpej #if defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    557   1.9   thorpej 	if (ncpu > 1) {
    558   1.9   thorpej 		return _ucas_64_mp(uaddr, old, new, ret);
    559   1.9   thorpej 	}
    560   1.9   thorpej #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    561   1.9   thorpej 	return _ucas_64(uaddr, old, new, ret);
    562   1.9   thorpej }
    563   1.9   thorpej #endif /* _LP64 */
    564   1.9   thorpej 
    565   1.9   thorpej __strong_alias(ucas_int,ucas_32);
    566   1.9   thorpej #ifdef _LP64
    567   1.9   thorpej __strong_alias(ucas_ptr,ucas_64);
    568   1.9   thorpej #else
    569   1.9   thorpej __strong_alias(ucas_ptr,ucas_32);
    570   1.9   thorpej #endif /* _LP64 */
    571   1.9   thorpej 
    572   1.9   thorpej int
    573   1.9   thorpej ufetch_8(const uint8_t *uaddr, uint8_t *valp)
    574   1.9   thorpej {
    575   1.9   thorpej 
    576   1.9   thorpej 	ASSERT_SLEEPABLE();
    577   1.9   thorpej 	CHECK_ALIGNMENT();
    578   1.9   thorpej 	return _ufetch_8(uaddr, valp);
    579   1.9   thorpej }
    580   1.9   thorpej 
    581   1.9   thorpej int
    582   1.9   thorpej ufetch_16(const uint16_t *uaddr, uint16_t *valp)
    583   1.9   thorpej {
    584   1.9   thorpej 
    585   1.9   thorpej 	ASSERT_SLEEPABLE();
    586   1.9   thorpej 	CHECK_ALIGNMENT();
    587   1.9   thorpej 	return _ufetch_16(uaddr, valp);
    588   1.9   thorpej }
    589   1.9   thorpej 
    590   1.9   thorpej int
    591   1.9   thorpej ufetch_32(const uint32_t *uaddr, uint32_t *valp)
    592   1.9   thorpej {
    593   1.9   thorpej 
    594   1.9   thorpej 	ASSERT_SLEEPABLE();
    595   1.9   thorpej 	CHECK_ALIGNMENT();
    596   1.9   thorpej 	return _ufetch_32(uaddr, valp);
    597   1.9   thorpej }
    598   1.9   thorpej 
    599   1.9   thorpej #ifdef _LP64
    600   1.9   thorpej int
    601   1.9   thorpej ufetch_64(const uint64_t *uaddr, uint64_t *valp)
    602   1.9   thorpej {
    603   1.9   thorpej 
    604   1.9   thorpej 	ASSERT_SLEEPABLE();
    605   1.9   thorpej 	CHECK_ALIGNMENT();
    606   1.9   thorpej 	return _ufetch_64(uaddr, valp);
    607   1.9   thorpej }
    608   1.9   thorpej #endif /* _LP64 */
    609   1.9   thorpej 
    610   1.9   thorpej __strong_alias(ufetch_char,ufetch_8);
    611   1.9   thorpej __strong_alias(ufetch_short,ufetch_16);
    612   1.9   thorpej __strong_alias(ufetch_int,ufetch_32);
    613   1.9   thorpej #ifdef _LP64
    614   1.9   thorpej __strong_alias(ufetch_long,ufetch_64);
    615   1.9   thorpej __strong_alias(ufetch_ptr,ufetch_64);
    616   1.9   thorpej #else
    617   1.9   thorpej __strong_alias(ufetch_long,ufetch_32);
    618   1.9   thorpej __strong_alias(ufetch_ptr,ufetch_32);
    619   1.9   thorpej #endif /* _LP64 */
    620   1.9   thorpej 
    621   1.9   thorpej int
    622   1.9   thorpej ustore_8(uint8_t *uaddr, uint8_t val)
    623   1.9   thorpej {
    624   1.9   thorpej 
    625   1.9   thorpej 	ASSERT_SLEEPABLE();
    626   1.9   thorpej 	CHECK_ALIGNMENT();
    627   1.9   thorpej 	return _ustore_8(uaddr, val);
    628   1.9   thorpej }
    629   1.9   thorpej 
    630   1.9   thorpej int
    631   1.9   thorpej ustore_16(uint16_t *uaddr, uint16_t val)
    632   1.9   thorpej {
    633   1.9   thorpej 
    634   1.9   thorpej 	ASSERT_SLEEPABLE();
    635   1.9   thorpej 	CHECK_ALIGNMENT();
    636   1.9   thorpej 	return _ustore_16(uaddr, val);
    637   1.9   thorpej }
    638   1.9   thorpej 
    639   1.9   thorpej int
    640   1.9   thorpej ustore_32(uint32_t *uaddr, uint32_t val)
    641   1.9   thorpej {
    642   1.9   thorpej 
    643   1.9   thorpej 	ASSERT_SLEEPABLE();
    644   1.9   thorpej 	CHECK_ALIGNMENT();
    645   1.9   thorpej 	return _ustore_32(uaddr, val);
    646   1.9   thorpej }
    647   1.9   thorpej 
    648   1.9   thorpej #ifdef _LP64
    649   1.9   thorpej int
    650   1.9   thorpej ustore_64(uint64_t *uaddr, uint64_t val)
    651   1.9   thorpej {
    652   1.9   thorpej 
    653   1.9   thorpej 	ASSERT_SLEEPABLE();
    654   1.9   thorpej 	CHECK_ALIGNMENT();
    655   1.9   thorpej 	return _ustore_64(uaddr, val);
    656   1.9   thorpej }
    657   1.9   thorpej #endif /* _LP64 */
    658   1.9   thorpej 
    659   1.9   thorpej __strong_alias(ustore_char,ustore_8);
    660   1.9   thorpej __strong_alias(ustore_short,ustore_16);
    661   1.9   thorpej __strong_alias(ustore_int,ustore_32);
    662   1.9   thorpej #ifdef _LP64
    663   1.9   thorpej __strong_alias(ustore_long,ustore_64);
    664   1.9   thorpej __strong_alias(ustore_ptr,ustore_64);
    665   1.9   thorpej #else
    666   1.9   thorpej __strong_alias(ustore_long,ustore_32);
    667   1.9   thorpej __strong_alias(ustore_ptr,ustore_32);
    668   1.9   thorpej #endif /* _LP64 */
    669