Home | History | Annotate | Line # | Download | only in kern
subr_copy.c revision 1.8.2.2
      1  1.8.2.2    martin /*	$NetBSD: subr_copy.c,v 1.8.2.2 2020/04/08 14:08:52 martin Exp $	*/
      2      1.1     pooka 
      3      1.1     pooka /*-
      4  1.8.2.1  christos  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
      5  1.8.2.1  christos  *	The NetBSD Foundation, Inc.
      6      1.1     pooka  * All rights reserved.
      7      1.1     pooka  *
      8      1.1     pooka  * This code is derived from software contributed to The NetBSD Foundation
      9      1.1     pooka  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10      1.1     pooka  * NASA Ames Research Center.
     11      1.1     pooka  *
     12      1.1     pooka  * Redistribution and use in source and binary forms, with or without
     13      1.1     pooka  * modification, are permitted provided that the following conditions
     14      1.1     pooka  * are met:
     15      1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     16      1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     17      1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     18      1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     19      1.1     pooka  *    documentation and/or other materials provided with the distribution.
     20      1.1     pooka  *
     21      1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22      1.1     pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23      1.1     pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24      1.1     pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25      1.1     pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26      1.1     pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27      1.1     pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28      1.1     pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29      1.1     pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30      1.1     pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31      1.1     pooka  * POSSIBILITY OF SUCH DAMAGE.
     32      1.1     pooka  */
     33      1.1     pooka 
     34      1.1     pooka /*
     35      1.1     pooka  * Copyright (c) 1982, 1986, 1991, 1993
     36      1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     37      1.1     pooka  * (c) UNIX System Laboratories, Inc.
     38      1.1     pooka  * All or some portions of this file are derived from material licensed
     39      1.1     pooka  * to the University of California by American Telephone and Telegraph
     40      1.1     pooka  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     41      1.1     pooka  * the permission of UNIX System Laboratories, Inc.
     42      1.1     pooka  *
     43      1.1     pooka  * Copyright (c) 1992, 1993
     44      1.1     pooka  *	The Regents of the University of California.  All rights reserved.
     45      1.1     pooka  *
     46      1.1     pooka  * This software was developed by the Computer Systems Engineering group
     47      1.1     pooka  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     48      1.1     pooka  * contributed to Berkeley.
     49      1.1     pooka  *
     50      1.1     pooka  * All advertising materials mentioning features or use of this software
     51      1.1     pooka  * must display the following acknowledgement:
     52      1.1     pooka  *	This product includes software developed by the University of
     53      1.1     pooka  *	California, Lawrence Berkeley Laboratory.
     54      1.1     pooka  *
     55      1.1     pooka  * Redistribution and use in source and binary forms, with or without
     56      1.1     pooka  * modification, are permitted provided that the following conditions
     57      1.1     pooka  * are met:
     58      1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     59      1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     60      1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     61      1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     62      1.1     pooka  *    documentation and/or other materials provided with the distribution.
     63      1.1     pooka  * 3. Neither the name of the University nor the names of its contributors
     64      1.1     pooka  *    may be used to endorse or promote products derived from this software
     65      1.1     pooka  *    without specific prior written permission.
     66      1.1     pooka  *
     67      1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     68      1.1     pooka  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     69      1.1     pooka  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     70      1.1     pooka  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     71      1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     72      1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     73      1.1     pooka  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     74      1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     75      1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     76      1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     77      1.1     pooka  * SUCH DAMAGE.
     78      1.1     pooka  *
     79      1.1     pooka  *	@(#)kern_subr.c	8.4 (Berkeley) 2/14/95
     80      1.1     pooka  */
     81      1.1     pooka 
     82      1.1     pooka #include <sys/cdefs.h>
     83  1.8.2.2    martin __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.8.2.2 2020/04/08 14:08:52 martin Exp $");
     84  1.8.2.1  christos 
     85  1.8.2.1  christos #define	__UFETCHSTORE_PRIVATE
     86  1.8.2.1  christos #define	__UCAS_PRIVATE
     87      1.1     pooka 
     88      1.1     pooka #include <sys/param.h>
     89      1.1     pooka #include <sys/fcntl.h>
     90      1.1     pooka #include <sys/proc.h>
     91      1.1     pooka #include <sys/systm.h>
     92      1.1     pooka 
     93      1.1     pooka #include <uvm/uvm_extern.h>
     94      1.1     pooka 
     95      1.1     pooka void
     96      1.1     pooka uio_setup_sysspace(struct uio *uio)
     97      1.1     pooka {
     98      1.1     pooka 
     99      1.1     pooka 	uio->uio_vmspace = vmspace_kernel();
    100      1.1     pooka }
    101      1.1     pooka 
    102      1.1     pooka int
    103      1.1     pooka uiomove(void *buf, size_t n, struct uio *uio)
    104      1.1     pooka {
    105      1.1     pooka 	struct vmspace *vm = uio->uio_vmspace;
    106      1.1     pooka 	struct iovec *iov;
    107      1.1     pooka 	size_t cnt;
    108      1.1     pooka 	int error = 0;
    109      1.1     pooka 	char *cp = buf;
    110      1.1     pooka 
    111      1.1     pooka 	ASSERT_SLEEPABLE();
    112      1.1     pooka 
    113      1.6  riastrad 	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
    114      1.1     pooka 	while (n > 0 && uio->uio_resid) {
    115      1.1     pooka 		iov = uio->uio_iov;
    116      1.1     pooka 		cnt = iov->iov_len;
    117      1.1     pooka 		if (cnt == 0) {
    118      1.1     pooka 			KASSERT(uio->uio_iovcnt > 0);
    119      1.1     pooka 			uio->uio_iov++;
    120      1.1     pooka 			uio->uio_iovcnt--;
    121      1.1     pooka 			continue;
    122      1.1     pooka 		}
    123      1.1     pooka 		if (cnt > n)
    124      1.1     pooka 			cnt = n;
    125      1.1     pooka 		if (!VMSPACE_IS_KERNEL_P(vm)) {
    126  1.8.2.2    martin 			preempt_point();
    127      1.1     pooka 		}
    128      1.1     pooka 
    129      1.1     pooka 		if (uio->uio_rw == UIO_READ) {
    130      1.1     pooka 			error = copyout_vmspace(vm, cp, iov->iov_base,
    131      1.1     pooka 			    cnt);
    132      1.1     pooka 		} else {
    133      1.1     pooka 			error = copyin_vmspace(vm, iov->iov_base, cp,
    134      1.1     pooka 			    cnt);
    135      1.1     pooka 		}
    136      1.1     pooka 		if (error) {
    137      1.1     pooka 			break;
    138      1.1     pooka 		}
    139      1.1     pooka 		iov->iov_base = (char *)iov->iov_base + cnt;
    140      1.1     pooka 		iov->iov_len -= cnt;
    141      1.1     pooka 		uio->uio_resid -= cnt;
    142      1.1     pooka 		uio->uio_offset += cnt;
    143      1.1     pooka 		cp += cnt;
    144      1.1     pooka 		KDASSERT(cnt <= n);
    145      1.1     pooka 		n -= cnt;
    146      1.1     pooka 	}
    147      1.1     pooka 
    148      1.1     pooka 	return (error);
    149      1.1     pooka }
    150      1.1     pooka 
    151      1.1     pooka /*
    152      1.1     pooka  * Wrapper for uiomove() that validates the arguments against a known-good
    153      1.1     pooka  * kernel buffer.
    154      1.1     pooka  */
    155      1.1     pooka int
    156      1.1     pooka uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
    157      1.1     pooka {
    158      1.1     pooka 	size_t offset;
    159      1.1     pooka 
    160      1.1     pooka 	if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
    161      1.1     pooka 	    (offset = uio->uio_offset) != uio->uio_offset)
    162      1.1     pooka 		return (EINVAL);
    163      1.1     pooka 	if (offset >= buflen)
    164      1.1     pooka 		return (0);
    165      1.1     pooka 	return (uiomove((char *)buf + offset, buflen - offset, uio));
    166      1.1     pooka }
    167      1.1     pooka 
    168      1.1     pooka /*
    169      1.1     pooka  * Give next character to user as result of read.
    170      1.1     pooka  */
    171      1.1     pooka int
    172      1.1     pooka ureadc(int c, struct uio *uio)
    173      1.1     pooka {
    174      1.1     pooka 	struct iovec *iov;
    175      1.1     pooka 
    176      1.1     pooka 	if (uio->uio_resid <= 0)
    177      1.1     pooka 		panic("ureadc: non-positive resid");
    178      1.1     pooka again:
    179      1.1     pooka 	if (uio->uio_iovcnt <= 0)
    180      1.1     pooka 		panic("ureadc: non-positive iovcnt");
    181      1.1     pooka 	iov = uio->uio_iov;
    182      1.1     pooka 	if (iov->iov_len <= 0) {
    183      1.1     pooka 		uio->uio_iovcnt--;
    184      1.1     pooka 		uio->uio_iov++;
    185      1.1     pooka 		goto again;
    186      1.1     pooka 	}
    187      1.1     pooka 	if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
    188  1.8.2.1  christos 		int error;
    189  1.8.2.1  christos 		if ((error = ustore_char(iov->iov_base, c)) != 0)
    190  1.8.2.1  christos 			return (error);
    191      1.1     pooka 	} else {
    192      1.1     pooka 		*(char *)iov->iov_base = c;
    193      1.1     pooka 	}
    194      1.1     pooka 	iov->iov_base = (char *)iov->iov_base + 1;
    195      1.1     pooka 	iov->iov_len--;
    196      1.1     pooka 	uio->uio_resid--;
    197      1.1     pooka 	uio->uio_offset++;
    198      1.1     pooka 	return (0);
    199      1.1     pooka }
    200      1.1     pooka 
    201      1.1     pooka /*
    202      1.1     pooka  * Like copyin(), but operates on an arbitrary vmspace.
    203      1.1     pooka  */
    204      1.1     pooka int
    205      1.1     pooka copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
    206      1.1     pooka {
    207      1.1     pooka 	struct iovec iov;
    208      1.1     pooka 	struct uio uio;
    209      1.1     pooka 	int error;
    210      1.1     pooka 
    211      1.1     pooka 	if (len == 0)
    212      1.1     pooka 		return (0);
    213      1.1     pooka 
    214      1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    215      1.3  riastrad 		return kcopy(uaddr, kaddr, len);
    216      1.3  riastrad 	}
    217      1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    218      1.2  riastrad 		return copyin(uaddr, kaddr, len);
    219      1.2  riastrad 	}
    220      1.1     pooka 
    221      1.1     pooka 	iov.iov_base = kaddr;
    222      1.1     pooka 	iov.iov_len = len;
    223      1.1     pooka 	uio.uio_iov = &iov;
    224      1.1     pooka 	uio.uio_iovcnt = 1;
    225      1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    226      1.1     pooka 	uio.uio_resid = len;
    227      1.1     pooka 	uio.uio_rw = UIO_READ;
    228      1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    229      1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    230      1.1     pooka 
    231      1.1     pooka 	return (error);
    232      1.1     pooka }
    233      1.1     pooka 
    234      1.1     pooka /*
    235      1.1     pooka  * Like copyout(), but operates on an arbitrary vmspace.
    236      1.1     pooka  */
    237      1.1     pooka int
    238      1.1     pooka copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
    239      1.1     pooka {
    240      1.1     pooka 	struct iovec iov;
    241      1.1     pooka 	struct uio uio;
    242      1.1     pooka 	int error;
    243      1.1     pooka 
    244      1.1     pooka 	if (len == 0)
    245      1.1     pooka 		return (0);
    246      1.1     pooka 
    247      1.3  riastrad 	if (VMSPACE_IS_KERNEL_P(vm)) {
    248      1.3  riastrad 		return kcopy(kaddr, uaddr, len);
    249      1.3  riastrad 	}
    250      1.2  riastrad 	if (__predict_true(vm == curproc->p_vmspace)) {
    251      1.2  riastrad 		return copyout(kaddr, uaddr, len);
    252      1.2  riastrad 	}
    253      1.1     pooka 
    254      1.1     pooka 	iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
    255      1.1     pooka 	iov.iov_len = len;
    256      1.1     pooka 	uio.uio_iov = &iov;
    257      1.1     pooka 	uio.uio_iovcnt = 1;
    258      1.1     pooka 	uio.uio_offset = (off_t)(uintptr_t)uaddr;
    259      1.1     pooka 	uio.uio_resid = len;
    260      1.1     pooka 	uio.uio_rw = UIO_WRITE;
    261      1.1     pooka 	UIO_SETUP_SYSSPACE(&uio);
    262      1.7  christos 	error = uvm_io(&vm->vm_map, &uio, 0);
    263      1.1     pooka 
    264      1.1     pooka 	return (error);
    265      1.1     pooka }
    266      1.1     pooka 
    267      1.1     pooka /*
    268      1.1     pooka  * Like copyin(), but operates on an arbitrary process.
    269      1.1     pooka  */
    270      1.1     pooka int
    271      1.1     pooka copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
    272      1.1     pooka {
    273      1.1     pooka 	struct vmspace *vm;
    274      1.1     pooka 	int error;
    275      1.1     pooka 
    276      1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    277      1.1     pooka 	if (error) {
    278      1.1     pooka 		return error;
    279      1.1     pooka 	}
    280      1.1     pooka 	error = copyin_vmspace(vm, uaddr, kaddr, len);
    281      1.1     pooka 	uvmspace_free(vm);
    282      1.1     pooka 
    283      1.1     pooka 	return error;
    284      1.1     pooka }
    285      1.1     pooka 
    286      1.1     pooka /*
    287      1.1     pooka  * Like copyout(), but operates on an arbitrary process.
    288      1.1     pooka  */
    289      1.1     pooka int
    290      1.1     pooka copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
    291      1.1     pooka {
    292      1.1     pooka 	struct vmspace *vm;
    293      1.1     pooka 	int error;
    294      1.1     pooka 
    295      1.1     pooka 	error = proc_vmspace_getref(p, &vm);
    296      1.1     pooka 	if (error) {
    297      1.1     pooka 		return error;
    298      1.1     pooka 	}
    299      1.1     pooka 	error = copyout_vmspace(vm, kaddr, uaddr, len);
    300      1.1     pooka 	uvmspace_free(vm);
    301      1.1     pooka 
    302      1.1     pooka 	return error;
    303      1.1     pooka }
    304      1.1     pooka 
    305      1.1     pooka /*
    306      1.8       chs  * Like copyin(), but operates on an arbitrary pid.
    307      1.8       chs  */
    308      1.8       chs int
    309      1.8       chs copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
    310      1.8       chs {
    311      1.8       chs 	struct proc *p;
    312      1.8       chs 	struct vmspace *vm;
    313      1.8       chs 	int error;
    314      1.8       chs 
    315      1.8       chs 	mutex_enter(proc_lock);
    316      1.8       chs 	p = proc_find(pid);
    317      1.8       chs 	if (p == NULL) {
    318      1.8       chs 		mutex_exit(proc_lock);
    319      1.8       chs 		return ESRCH;
    320      1.8       chs 	}
    321      1.8       chs 	mutex_enter(p->p_lock);
    322  1.8.2.2    martin 	error = proc_vmspace_getref(p, &vm);
    323      1.8       chs 	mutex_exit(p->p_lock);
    324      1.8       chs 	mutex_exit(proc_lock);
    325      1.8       chs 
    326  1.8.2.2    martin 	if (error == 0) {
    327  1.8.2.2    martin 		error = copyin_vmspace(vm, uaddr, kaddr, len);
    328  1.8.2.2    martin 		uvmspace_free(vm);
    329  1.8.2.2    martin 	}
    330      1.8       chs 	return error;
    331      1.8       chs }
    332      1.8       chs 
    333      1.8       chs /*
    334      1.1     pooka  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
    335      1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    336      1.1     pooka  */
    337      1.1     pooka int
    338      1.1     pooka ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
    339      1.1     pooka {
    340      1.1     pooka 	if (ioctlflags & FKIOCTL)
    341      1.1     pooka 		return kcopy(src, dst, len);
    342      1.1     pooka 	return copyin(src, dst, len);
    343      1.1     pooka }
    344      1.1     pooka 
    345      1.1     pooka /*
    346      1.1     pooka  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
    347      1.1     pooka  * flag is passed in `ioctlflags' from the ioctl call.
    348      1.1     pooka  */
    349      1.1     pooka int
    350      1.1     pooka ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
    351      1.1     pooka {
    352      1.1     pooka 	if (ioctlflags & FKIOCTL)
    353      1.1     pooka 		return kcopy(src, dst, len);
    354      1.1     pooka 	return copyout(src, dst, len);
    355      1.1     pooka }
    356  1.8.2.1  christos 
    357  1.8.2.1  christos /*
    358  1.8.2.1  christos  * User-space CAS / fetch / store
    359  1.8.2.1  christos  */
    360  1.8.2.1  christos 
    361  1.8.2.1  christos #ifdef __NO_STRICT_ALIGNMENT
    362  1.8.2.1  christos #define	CHECK_ALIGNMENT(x)	__nothing
    363  1.8.2.1  christos #else /* ! __NO_STRICT_ALIGNMENT */
    364  1.8.2.1  christos static bool
    365  1.8.2.1  christos ufetchstore_aligned(uintptr_t uaddr, size_t size)
    366  1.8.2.1  christos {
    367  1.8.2.1  christos 	return (uaddr & (size - 1)) == 0;
    368  1.8.2.1  christos }
    369  1.8.2.1  christos 
    370  1.8.2.1  christos #define	CHECK_ALIGNMENT()						\
    371  1.8.2.1  christos do {									\
    372  1.8.2.1  christos 	if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))	\
    373  1.8.2.1  christos 		return EFAULT;						\
    374  1.8.2.1  christos } while (/*CONSTCOND*/0)
    375  1.8.2.1  christos #endif /* __NO_STRICT_ALIGNMENT */
    376  1.8.2.1  christos 
    377  1.8.2.1  christos /*
    378  1.8.2.1  christos  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
    379  1.8.2.1  christos  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
    380  1.8.2.1  christos  *
    381  1.8.2.1  christos  * In all other cases, we provide generic implementations that work on
    382  1.8.2.1  christos  * all platforms.
    383  1.8.2.1  christos  */
    384  1.8.2.1  christos 
    385  1.8.2.1  christos #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
    386  1.8.2.1  christos #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    387  1.8.2.1  christos #include <sys/atomic.h>
    388  1.8.2.1  christos #include <sys/cpu.h>
    389  1.8.2.1  christos #include <sys/once.h>
    390  1.8.2.1  christos #include <sys/mutex.h>
    391  1.8.2.1  christos #include <sys/ipi.h>
    392  1.8.2.1  christos 
    393  1.8.2.1  christos static int ucas_critical_splcookie;
    394  1.8.2.1  christos static volatile u_int ucas_critical_pausing_cpus;
    395  1.8.2.1  christos static u_int ucas_critical_ipi;
    396  1.8.2.1  christos static ONCE_DECL(ucas_critical_init_once)
    397  1.8.2.1  christos 
    398  1.8.2.1  christos static void
    399  1.8.2.1  christos ucas_critical_cpu_gate(void *arg __unused)
    400  1.8.2.1  christos {
    401  1.8.2.1  christos 	int count = SPINLOCK_BACKOFF_MIN;
    402  1.8.2.1  christos 
    403  1.8.2.1  christos 	KASSERT(ucas_critical_pausing_cpus > 0);
    404  1.8.2.1  christos 	atomic_dec_uint(&ucas_critical_pausing_cpus);
    405  1.8.2.1  christos 	while (ucas_critical_pausing_cpus != (u_int)-1) {
    406  1.8.2.1  christos 		SPINLOCK_BACKOFF(count);
    407  1.8.2.1  christos 	}
    408  1.8.2.1  christos }
    409  1.8.2.1  christos 
    410  1.8.2.1  christos static int
    411  1.8.2.1  christos ucas_critical_init(void)
    412  1.8.2.1  christos {
    413  1.8.2.1  christos 	ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
    414  1.8.2.1  christos 	return 0;
    415  1.8.2.1  christos }
    416  1.8.2.1  christos 
    417  1.8.2.1  christos static void
    418  1.8.2.1  christos ucas_critical_wait(void)
    419  1.8.2.1  christos {
    420  1.8.2.1  christos 	int count = SPINLOCK_BACKOFF_MIN;
    421  1.8.2.1  christos 
    422  1.8.2.1  christos 	while (ucas_critical_pausing_cpus > 0) {
    423  1.8.2.1  christos 		SPINLOCK_BACKOFF(count);
    424  1.8.2.1  christos 	}
    425  1.8.2.1  christos }
    426  1.8.2.1  christos #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    427  1.8.2.1  christos 
    428  1.8.2.1  christos static inline void
    429  1.8.2.1  christos ucas_critical_enter(lwp_t * const l)
    430  1.8.2.1  christos {
    431  1.8.2.1  christos 
    432  1.8.2.1  christos #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    433  1.8.2.1  christos 	if (ncpu > 1) {
    434  1.8.2.1  christos 		RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
    435  1.8.2.1  christos 
    436  1.8.2.1  christos 		/*
    437  1.8.2.1  christos 		 * Acquire the mutex first, then go to splhigh() and
    438  1.8.2.1  christos 		 * broadcast the IPI to lock all of the other CPUs
    439  1.8.2.1  christos 		 * behind the gate.
    440  1.8.2.1  christos 		 *
    441  1.8.2.1  christos 		 * N.B. Going to splhigh() implicitly disables preemption,
    442  1.8.2.1  christos 		 * so there's no need to do it explicitly.
    443  1.8.2.1  christos 		 */
    444  1.8.2.1  christos 		mutex_enter(&cpu_lock);
    445  1.8.2.1  christos 		ucas_critical_splcookie = splhigh();
    446  1.8.2.1  christos 		ucas_critical_pausing_cpus = ncpu - 1;
    447  1.8.2.1  christos 		membar_enter();
    448  1.8.2.1  christos 
    449  1.8.2.1  christos 		ipi_trigger_broadcast(ucas_critical_ipi, true);
    450  1.8.2.1  christos 		ucas_critical_wait();
    451  1.8.2.1  christos 		return;
    452  1.8.2.1  christos 	}
    453  1.8.2.1  christos #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    454  1.8.2.1  christos 
    455  1.8.2.1  christos 	KPREEMPT_DISABLE(l);
    456  1.8.2.1  christos }
    457  1.8.2.1  christos 
    458  1.8.2.1  christos static inline void
    459  1.8.2.1  christos ucas_critical_exit(lwp_t * const l)
    460  1.8.2.1  christos {
    461  1.8.2.1  christos 
    462  1.8.2.1  christos #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
    463  1.8.2.1  christos 	if (ncpu > 1) {
    464  1.8.2.1  christos 		membar_exit();
    465  1.8.2.1  christos 		ucas_critical_pausing_cpus = (u_int)-1;
    466  1.8.2.1  christos 		splx(ucas_critical_splcookie);
    467  1.8.2.1  christos 		mutex_exit(&cpu_lock);
    468  1.8.2.1  christos 		return;
    469  1.8.2.1  christos 	}
    470  1.8.2.1  christos #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
    471  1.8.2.1  christos 
    472  1.8.2.1  christos 	KPREEMPT_ENABLE(l);
    473  1.8.2.1  christos }
    474  1.8.2.1  christos 
    475  1.8.2.1  christos int
    476  1.8.2.1  christos _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    477  1.8.2.1  christos {
    478  1.8.2.1  christos 	lwp_t * const l = curlwp;
    479  1.8.2.1  christos 	uint32_t *uva = ((void *)(uintptr_t)uaddr);
    480  1.8.2.1  christos 	int error;
    481  1.8.2.1  christos 
    482  1.8.2.1  christos 	/*
    483  1.8.2.1  christos 	 * Wire the user address down to avoid taking a page fault during
    484  1.8.2.1  christos 	 * the critical section.
    485  1.8.2.1  christos 	 */
    486  1.8.2.1  christos 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    487  1.8.2.1  christos 			   VM_PROT_READ | VM_PROT_WRITE);
    488  1.8.2.1  christos 	if (error)
    489  1.8.2.1  christos 		return error;
    490  1.8.2.1  christos 
    491  1.8.2.1  christos 	ucas_critical_enter(l);
    492  1.8.2.1  christos 	error = _ufetch_32(uva, ret);
    493  1.8.2.1  christos 	if (error == 0 && *ret == old) {
    494  1.8.2.1  christos 		error = _ustore_32(uva, new);
    495  1.8.2.1  christos 	}
    496  1.8.2.1  christos 	ucas_critical_exit(l);
    497  1.8.2.1  christos 
    498  1.8.2.1  christos 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    499  1.8.2.1  christos 
    500  1.8.2.1  christos 	return error;
    501  1.8.2.1  christos }
    502  1.8.2.1  christos 
    503  1.8.2.1  christos #ifdef _LP64
    504  1.8.2.1  christos int
    505  1.8.2.1  christos _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    506  1.8.2.1  christos {
    507  1.8.2.1  christos 	lwp_t * const l = curlwp;
    508  1.8.2.1  christos 	uint64_t *uva = ((void *)(uintptr_t)uaddr);
    509  1.8.2.1  christos 	int error;
    510  1.8.2.1  christos 
    511  1.8.2.1  christos 	/*
    512  1.8.2.1  christos 	 * Wire the user address down to avoid taking a page fault during
    513  1.8.2.1  christos 	 * the critical section.
    514  1.8.2.1  christos 	 */
    515  1.8.2.1  christos 	error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
    516  1.8.2.1  christos 			   VM_PROT_READ | VM_PROT_WRITE);
    517  1.8.2.1  christos 	if (error)
    518  1.8.2.1  christos 		return error;
    519  1.8.2.1  christos 
    520  1.8.2.1  christos 	ucas_critical_enter(l);
    521  1.8.2.1  christos 	error = _ufetch_64(uva, ret);
    522  1.8.2.1  christos 	if (error == 0 && *ret == old) {
    523  1.8.2.1  christos 		error = _ustore_64(uva, new);
    524  1.8.2.1  christos 	}
    525  1.8.2.1  christos 	ucas_critical_exit(l);
    526  1.8.2.1  christos 
    527  1.8.2.1  christos 	uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
    528  1.8.2.1  christos 
    529  1.8.2.1  christos 	return error;
    530  1.8.2.1  christos }
    531  1.8.2.1  christos #endif /* _LP64 */
    532  1.8.2.1  christos #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
    533  1.8.2.1  christos 
    534  1.8.2.1  christos int
    535  1.8.2.1  christos ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
    536  1.8.2.1  christos {
    537  1.8.2.1  christos 
    538  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    539  1.8.2.1  christos 	CHECK_ALIGNMENT();
    540  1.8.2.1  christos #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    541  1.8.2.1  christos     !defined(_RUMPKERNEL)
    542  1.8.2.1  christos 	if (ncpu > 1) {
    543  1.8.2.1  christos 		return _ucas_32_mp(uaddr, old, new, ret);
    544  1.8.2.1  christos 	}
    545  1.8.2.1  christos #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    546  1.8.2.1  christos 	return _ucas_32(uaddr, old, new, ret);
    547  1.8.2.1  christos }
    548  1.8.2.1  christos 
    549  1.8.2.1  christos #ifdef _LP64
    550  1.8.2.1  christos int
    551  1.8.2.1  christos ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
    552  1.8.2.1  christos {
    553  1.8.2.1  christos 
    554  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    555  1.8.2.1  christos 	CHECK_ALIGNMENT();
    556  1.8.2.1  christos #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
    557  1.8.2.1  christos     !defined(_RUMPKERNEL)
    558  1.8.2.1  christos 	if (ncpu > 1) {
    559  1.8.2.1  christos 		return _ucas_64_mp(uaddr, old, new, ret);
    560  1.8.2.1  christos 	}
    561  1.8.2.1  christos #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
    562  1.8.2.1  christos 	return _ucas_64(uaddr, old, new, ret);
    563  1.8.2.1  christos }
    564  1.8.2.1  christos #endif /* _LP64 */
    565  1.8.2.1  christos 
    566  1.8.2.1  christos __strong_alias(ucas_int,ucas_32);
    567  1.8.2.1  christos #ifdef _LP64
    568  1.8.2.1  christos __strong_alias(ucas_ptr,ucas_64);
    569  1.8.2.1  christos #else
    570  1.8.2.1  christos __strong_alias(ucas_ptr,ucas_32);
    571  1.8.2.1  christos #endif /* _LP64 */
    572  1.8.2.1  christos 
    573  1.8.2.1  christos int
    574  1.8.2.1  christos ufetch_8(const uint8_t *uaddr, uint8_t *valp)
    575  1.8.2.1  christos {
    576  1.8.2.1  christos 
    577  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    578  1.8.2.1  christos 	CHECK_ALIGNMENT();
    579  1.8.2.1  christos 	return _ufetch_8(uaddr, valp);
    580  1.8.2.1  christos }
    581  1.8.2.1  christos 
    582  1.8.2.1  christos int
    583  1.8.2.1  christos ufetch_16(const uint16_t *uaddr, uint16_t *valp)
    584  1.8.2.1  christos {
    585  1.8.2.1  christos 
    586  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    587  1.8.2.1  christos 	CHECK_ALIGNMENT();
    588  1.8.2.1  christos 	return _ufetch_16(uaddr, valp);
    589  1.8.2.1  christos }
    590  1.8.2.1  christos 
    591  1.8.2.1  christos int
    592  1.8.2.1  christos ufetch_32(const uint32_t *uaddr, uint32_t *valp)
    593  1.8.2.1  christos {
    594  1.8.2.1  christos 
    595  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    596  1.8.2.1  christos 	CHECK_ALIGNMENT();
    597  1.8.2.1  christos 	return _ufetch_32(uaddr, valp);
    598  1.8.2.1  christos }
    599  1.8.2.1  christos 
    600  1.8.2.1  christos #ifdef _LP64
    601  1.8.2.1  christos int
    602  1.8.2.1  christos ufetch_64(const uint64_t *uaddr, uint64_t *valp)
    603  1.8.2.1  christos {
    604  1.8.2.1  christos 
    605  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    606  1.8.2.1  christos 	CHECK_ALIGNMENT();
    607  1.8.2.1  christos 	return _ufetch_64(uaddr, valp);
    608  1.8.2.1  christos }
    609  1.8.2.1  christos #endif /* _LP64 */
    610  1.8.2.1  christos 
    611  1.8.2.1  christos __strong_alias(ufetch_char,ufetch_8);
    612  1.8.2.1  christos __strong_alias(ufetch_short,ufetch_16);
    613  1.8.2.1  christos __strong_alias(ufetch_int,ufetch_32);
    614  1.8.2.1  christos #ifdef _LP64
    615  1.8.2.1  christos __strong_alias(ufetch_long,ufetch_64);
    616  1.8.2.1  christos __strong_alias(ufetch_ptr,ufetch_64);
    617  1.8.2.1  christos #else
    618  1.8.2.1  christos __strong_alias(ufetch_long,ufetch_32);
    619  1.8.2.1  christos __strong_alias(ufetch_ptr,ufetch_32);
    620  1.8.2.1  christos #endif /* _LP64 */
    621  1.8.2.1  christos 
    622  1.8.2.1  christos int
    623  1.8.2.1  christos ustore_8(uint8_t *uaddr, uint8_t val)
    624  1.8.2.1  christos {
    625  1.8.2.1  christos 
    626  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    627  1.8.2.1  christos 	CHECK_ALIGNMENT();
    628  1.8.2.1  christos 	return _ustore_8(uaddr, val);
    629  1.8.2.1  christos }
    630  1.8.2.1  christos 
    631  1.8.2.1  christos int
    632  1.8.2.1  christos ustore_16(uint16_t *uaddr, uint16_t val)
    633  1.8.2.1  christos {
    634  1.8.2.1  christos 
    635  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    636  1.8.2.1  christos 	CHECK_ALIGNMENT();
    637  1.8.2.1  christos 	return _ustore_16(uaddr, val);
    638  1.8.2.1  christos }
    639  1.8.2.1  christos 
    640  1.8.2.1  christos int
    641  1.8.2.1  christos ustore_32(uint32_t *uaddr, uint32_t val)
    642  1.8.2.1  christos {
    643  1.8.2.1  christos 
    644  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    645  1.8.2.1  christos 	CHECK_ALIGNMENT();
    646  1.8.2.1  christos 	return _ustore_32(uaddr, val);
    647  1.8.2.1  christos }
    648  1.8.2.1  christos 
    649  1.8.2.1  christos #ifdef _LP64
    650  1.8.2.1  christos int
    651  1.8.2.1  christos ustore_64(uint64_t *uaddr, uint64_t val)
    652  1.8.2.1  christos {
    653  1.8.2.1  christos 
    654  1.8.2.1  christos 	ASSERT_SLEEPABLE();
    655  1.8.2.1  christos 	CHECK_ALIGNMENT();
    656  1.8.2.1  christos 	return _ustore_64(uaddr, val);
    657  1.8.2.1  christos }
    658  1.8.2.1  christos #endif /* _LP64 */
    659  1.8.2.1  christos 
    660  1.8.2.1  christos __strong_alias(ustore_char,ustore_8);
    661  1.8.2.1  christos __strong_alias(ustore_short,ustore_16);
    662  1.8.2.1  christos __strong_alias(ustore_int,ustore_32);
    663  1.8.2.1  christos #ifdef _LP64
    664  1.8.2.1  christos __strong_alias(ustore_long,ustore_64);
    665  1.8.2.1  christos __strong_alias(ustore_ptr,ustore_64);
    666  1.8.2.1  christos #else
    667  1.8.2.1  christos __strong_alias(ustore_long,ustore_32);
    668  1.8.2.1  christos __strong_alias(ustore_ptr,ustore_32);
    669  1.8.2.1  christos #endif /* _LP64 */
    670