Home | History | Annotate | Line # | Download | only in asm
uaccess.h revision 1.4
      1 /*	$NetBSD: uaccess.h,v 1.4 2021/12/19 00:50:11 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _ASM_UACCESS_H_
     33 #define _ASM_UACCESS_H_
     34 
     35 #include <sys/types.h>
     36 #include <sys/errno.h>
     37 #include <sys/systm.h>
     38 
     39 /* XXX This is a cop-out.  */
     40 #define	VERIFY_READ	0
     41 #define	VERIFY_WRITE	1
     42 static inline bool
     43 access_ok(int verify_op __unused, const void *uaddr __unused,
     44     size_t nbytes __unused)
     45 {
     46 	return true;
     47 }
     48 
     49 #define	__copy_from_user	copy_from_user
     50 #define	__copy_to_user		copy_to_user
     51 
     52 static inline int
     53 copy_from_user(void *kernel_addr, const void *user_addr, size_t len)
     54 {
     55 	/* XXX errno NetBSD->Linux */
     56 	return -copyin(user_addr, kernel_addr, len);
     57 }
     58 
     59 static inline int
     60 copy_to_user(void *user_addr, const void *kernel_addr, size_t len)
     61 {
     62 	/* XXX errno NetBSD->Linux */
     63 	return -copyout(kernel_addr, user_addr, len);
     64 }
     65 
     66 #define	get_user(KERNEL_LVAL, USER_PTR)					      \
     67 	copy_from_user(&(KERNEL_LVAL), (USER_PTR), sizeof(*(USER_PTR)) +      \
     68 	    0*sizeof(&(KERNEL_LVAL) - (USER_PTR)))
     69 
     70 #define	put_user(KERNEL_RVAL, USER_PTR)	({				      \
     71 	const typeof(*(USER_PTR)) __put_user_tmp = (KERNEL_RVAL);	      \
     72 	copy_to_user((USER_PTR), &__put_user_tmp, sizeof(__put_user_tmp));    \
     73 })
     74 
     75 #if 0
     76 /*
     77  * XXX These `inatomic' versions are a cop out, but they should do for
     78  * now -- they are used only in fast paths which can't fault but which
     79  * can fall back to slower paths that arrange things so faulting is OK.
     80  */
     81 
     82 static inline int
     83 __copy_from_user_inatomic(void *kernel_addr __unused,
     84     const void *user_addr __unused, size_t len __unused)
     85 {
     86 	return -EFAULT;
     87 }
     88 
     89 static inline int
     90 __copy_to_user_inatomic(void *user_addr __unused,
     91     const void *kernel_addr __unused, size_t len __unused)
     92 {
     93 	return -EFAULT;
     94 }
     95 #endif	/* 0 */
     96 
     97 static inline int
     98 __copy_from_user_inatomic_nocache(void *kernel_addr __unused,
     99     const void *user_addr __unused, size_t len __unused)
    100 {
    101 	return -EFAULT;
    102 }
    103 
    104 #endif  /* _ASM_UACCESS_H_ */
    105