uaccess.h revision 1.9 1 /* $NetBSD: uaccess.h,v 1.9 2021/12/19 11:24:14 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _ASM_UACCESS_H_
33 #define _ASM_UACCESS_H_
34
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/systm.h>
38
39 #include <linux/compiler.h>
40
41 /* XXX This is a cop-out. */
42 #define VERIFY_READ 0
43 #define VERIFY_WRITE 1
44 static inline bool
45 access_ok(const void *uaddr __unused, size_t nbytes __unused)
46 {
47 return true;
48 }
49
50 #define __copy_from_user copy_from_user
51 #define __copy_to_user copy_to_user
52
53 static inline int
54 copy_from_user(void *kernel_addr, const void *user_addr, size_t len)
55 {
56 /* XXX errno NetBSD->Linux */
57 return -copyin(user_addr, kernel_addr, len);
58 }
59
60 static inline int
61 copy_to_user(void *user_addr, const void *kernel_addr, size_t len)
62 {
63 /* XXX errno NetBSD->Linux */
64 return -copyout(kernel_addr, user_addr, len);
65 }
66
67 #define get_user(KERNEL_LVAL, USER_PTR) \
68 copy_from_user(&(KERNEL_LVAL), (USER_PTR), sizeof(*(USER_PTR)) + \
69 0*sizeof(&(KERNEL_LVAL) - (USER_PTR)))
70
71 #define put_user(KERNEL_RVAL, USER_PTR) ({ \
72 const typeof(*(USER_PTR)) __put_user_tmp = (KERNEL_RVAL); \
73 copy_to_user((USER_PTR), &__put_user_tmp, sizeof(__put_user_tmp)); \
74 })
75
76 #define __get_user get_user
77 #define __put_user put_user
78
79 #define user_access_begin() __nothing
80 #define user_access_end() __nothing
81
82 #define unsafe_put_user(KERNEL_RVAL, USER_PTR, LABEL) do { \
83 if (__put_user(KERNEL_RVAL, USER_PTR)) \
84 goto LABEL; \
85 } while (0)
86
87 static inline size_t
88 clear_user(void __user *user_ptr, size_t size)
89 {
90 char __user *p = user_ptr;
91 size_t n = size;
92
93 /*
94 * This loop which sets up a fault handler on every iteration
95 * is not going to win any speed records, but it'll do to copy
96 * out an int.
97 */
98 while (n --> 0) {
99 if (ustore_char(p, 0) != 0)
100 return ++n;
101 }
102
103 return 0;
104 }
105
106 #if 0
107 /*
108 * XXX These `inatomic' versions are a cop out, but they should do for
109 * now -- they are used only in fast paths which can't fault but which
110 * can fall back to slower paths that arrange things so faulting is OK.
111 */
112
113 static inline int
114 __copy_from_user_inatomic(void *kernel_addr __unused,
115 const void *user_addr __unused, size_t len __unused)
116 {
117 return -EFAULT;
118 }
119
120 static inline int
121 __copy_to_user_inatomic(void *user_addr __unused,
122 const void *kernel_addr __unused, size_t len __unused)
123 {
124 return -EFAULT;
125 }
126 #endif /* 0 */
127
128 static inline int
129 __copy_from_user_inatomic_nocache(void *kernel_addr __unused,
130 const void *user_addr __unused, size_t len __unused)
131 {
132 return -EFAULT;
133 }
134
135 #endif /* _ASM_UACCESS_H_ */
136