Home | History | Annotate | Line # | Download | only in asm
uaccess.h revision 1.7
      1 /*	$NetBSD: uaccess.h,v 1.7 2021/12/19 01:55:14 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _ASM_UACCESS_H_
     33 #define _ASM_UACCESS_H_
     34 
     35 #include <sys/types.h>
     36 #include <sys/errno.h>
     37 #include <sys/systm.h>
     38 
     39 /* XXX This is a cop-out.  */
     40 #define	VERIFY_READ	0
     41 #define	VERIFY_WRITE	1
     42 static inline bool
     43 access_ok(int verify_op __unused, const void *uaddr __unused,
     44     size_t nbytes __unused)
     45 {
     46 	return true;
     47 }
     48 
     49 #define	__copy_from_user	copy_from_user
     50 #define	__copy_to_user		copy_to_user
     51 
     52 static inline int
     53 copy_from_user(void *kernel_addr, const void *user_addr, size_t len)
     54 {
     55 	/* XXX errno NetBSD->Linux */
     56 	return -copyin(user_addr, kernel_addr, len);
     57 }
     58 
     59 static inline int
     60 copy_to_user(void *user_addr, const void *kernel_addr, size_t len)
     61 {
     62 	/* XXX errno NetBSD->Linux */
     63 	return -copyout(kernel_addr, user_addr, len);
     64 }
     65 
     66 #define	get_user(KERNEL_LVAL, USER_PTR)					      \
     67 	copy_from_user(&(KERNEL_LVAL), (USER_PTR), sizeof(*(USER_PTR)) +      \
     68 	    0*sizeof(&(KERNEL_LVAL) - (USER_PTR)))
     69 
     70 #define	put_user(KERNEL_RVAL, USER_PTR)	({				      \
     71 	const typeof(*(USER_PTR)) __put_user_tmp = (KERNEL_RVAL);	      \
     72 	copy_to_user((USER_PTR), &__put_user_tmp, sizeof(__put_user_tmp));    \
     73 })
     74 
     75 #define	__get_user	get_user
     76 #define	__put_user	put_user
     77 
     78 #define	user_access_begin()	__nothing
     79 #define	user_access_end()	__nothing
     80 
     81 #define	unsafe_put_user(KERNEL_RVAL, USER_PTR, LABEL)	do {		      \
     82 	if (__put_user(KERNEL_RVAL, USER_PTR))				      \
     83 		goto LABEL;						      \
     84 } while (0)
     85 
     86 static inline size_t
     87 clear_user(void __user *user_ptr, size_t size)
     88 {
     89 	char __user *p = user_ptr;
     90 	size_t n = size;
     91 
     92 	/*
     93 	 * This loop which sets up a fault handler on every iteration
     94 	 * is not going to win any speed records, but it'll do to copy
     95 	 * out an int.
     96 	 */
     97 	while (n --> 0) {
     98 		if (ustore_char(p, 0) != 0)
     99 			return ++n;
    100 	}
    101 
    102 	return 0;
    103 }
    104 
    105 #if 0
    106 /*
    107  * XXX These `inatomic' versions are a cop out, but they should do for
    108  * now -- they are used only in fast paths which can't fault but which
    109  * can fall back to slower paths that arrange things so faulting is OK.
    110  */
    111 
    112 static inline int
    113 __copy_from_user_inatomic(void *kernel_addr __unused,
    114     const void *user_addr __unused, size_t len __unused)
    115 {
    116 	return -EFAULT;
    117 }
    118 
    119 static inline int
    120 __copy_to_user_inatomic(void *user_addr __unused,
    121     const void *kernel_addr __unused, size_t len __unused)
    122 {
    123 	return -EFAULT;
    124 }
    125 #endif	/* 0 */
    126 
    127 static inline int
    128 __copy_from_user_inatomic_nocache(void *kernel_addr __unused,
    129     const void *user_addr __unused, size_t len __unused)
    130 {
    131 	return -EFAULT;
    132 }
    133 
    134 #endif  /* _ASM_UACCESS_H_ */
    135