rumpcopy.c revision 1.22.18.2 1 /* $NetBSD: rumpcopy.c,v 1.22.18.2 2020/04/08 14:09:01 martin Exp $ */
2
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpcopy.c,v 1.22.18.2 2020/04/08 14:09:01 martin Exp $");
30
31 #define __UFETCHSTORE_PRIVATE
32 #define __UCAS_PRIVATE
33
34 #include <sys/param.h>
35 #include <sys/lwp.h>
36 #include <sys/systm.h>
37 #include <sys/uio.h>
38
39 #include <rump-sys/kern.h>
40
41 #include <rump/rumpuser.h>
42
43 int
44 copyin(const void *uaddr, void *kaddr, size_t len)
45 {
46 int error = 0;
47
48 if (len == 0)
49 return 0;
50
51 if (__predict_false(uaddr == NULL && len)) {
52 return EFAULT;
53 }
54
55 if (RUMP_LOCALPROC_P(curproc)) {
56 memcpy(kaddr, uaddr, len);
57 } else if (len) {
58 error = rump_sysproxy_copyin(RUMP_SPVM2CTL(curproc->p_vmspace),
59 uaddr, kaddr, len);
60 }
61
62 return error;
63 }
64
65 int
66 copyout(const void *kaddr, void *uaddr, size_t len)
67 {
68 int error = 0;
69
70 if (len == 0)
71 return 0;
72
73 if (__predict_false(uaddr == NULL && len)) {
74 return EFAULT;
75 }
76
77 if (RUMP_LOCALPROC_P(curproc)) {
78 memcpy(uaddr, kaddr, len);
79 } else if (len) {
80 error = rump_sysproxy_copyout(RUMP_SPVM2CTL(curproc->p_vmspace),
81 kaddr, uaddr, len);
82 }
83 return error;
84 }
85
86 int
87 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
88 {
89 uint8_t *to = kdaddr;
90 const uint8_t *from = kfaddr;
91 size_t actlen = 0;
92
93 while (len-- > 0 && (*to++ = *from++) != 0)
94 actlen++;
95
96 if (len+1 == 0 && *(to-1) != 0)
97 return ENAMETOOLONG;
98
99 if (done)
100 *done = actlen+1; /* + '\0' */
101 return 0;
102 }
103
104 int
105 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
106 {
107 uint8_t *to;
108 int rv;
109
110 if (len == 0)
111 return 0;
112
113 if (__predict_false(uaddr == NULL)) {
114 return EFAULT;
115 }
116
117 if (RUMP_LOCALPROC_P(curproc))
118 return copystr(uaddr, kaddr, len, done);
119
120 if ((rv = rump_sysproxy_copyinstr(RUMP_SPVM2CTL(curproc->p_vmspace),
121 uaddr, kaddr, &len)) != 0)
122 return rv;
123
124 /* figure out if we got a terminated string or not */
125 to = (uint8_t *)kaddr + (len-1);
126 while (to >= (uint8_t *)kaddr) {
127 if (*to == 0)
128 goto found;
129 to--;
130 }
131 return ENAMETOOLONG;
132
133 found:
134 if (done)
135 *done = strlen(kaddr)+1; /* includes termination */
136
137 return 0;
138 }
139
140 int
141 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
142 {
143 size_t slen;
144 int error;
145
146 if (len == 0)
147 return 0;
148
149 if (__predict_false(uaddr == NULL && len)) {
150 return EFAULT;
151 }
152
153 if (RUMP_LOCALPROC_P(curproc))
154 return copystr(kaddr, uaddr, len, done);
155
156 slen = strlen(kaddr)+1;
157 if (slen > len)
158 return ENAMETOOLONG;
159
160 error = rump_sysproxy_copyoutstr(RUMP_SPVM2CTL(curproc->p_vmspace),
161 kaddr, uaddr, &slen);
162 if (done)
163 *done = slen;
164
165 return error;
166 }
167
168 int
169 kcopy(const void *src, void *dst, size_t len)
170 {
171
172 if (len == 0)
173 return 0;
174
175 memcpy(dst, src, len);
176 return 0;
177 }
178
179 /*
180 * Low-level I/O routine. This is used only when "all else fails",
181 * i.e. the current thread does not have an appropriate vm context.
182 */
183 int
184 uvm_io(struct vm_map *vm, struct uio *uio, int flag)
185 {
186 int error = 0;
187
188 /* loop over iovecs one-by-one and copyout */
189 for (; uio->uio_resid && uio->uio_iovcnt;
190 uio->uio_iovcnt--, uio->uio_iov++) {
191 struct iovec *iov = uio->uio_iov;
192 size_t curlen = MIN(uio->uio_resid, iov->iov_len);
193
194 if (__predict_false(curlen == 0))
195 continue;
196
197 if (uio->uio_rw == UIO_READ) {
198 error = rump_sysproxy_copyin(RUMP_SPVM2CTL(vm),
199 (void *)(vaddr_t)uio->uio_offset, iov->iov_base,
200 curlen);
201 } else {
202 error = rump_sysproxy_copyout(RUMP_SPVM2CTL(vm),
203 iov->iov_base, (void *)(vaddr_t)uio->uio_offset,
204 curlen);
205 }
206 if (error)
207 break;
208
209 iov->iov_base = (uint8_t *)iov->iov_base + curlen;
210 iov->iov_len -= curlen;
211
212 uio->uio_resid -= curlen;
213 uio->uio_offset += curlen;
214 }
215
216 return error;
217 }
218
219 int
220 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
221 {
222 uint32_t *uva = ((void *)(uintptr_t)uaddr);
223 int error;
224
225 /* XXXXJRT do we need a MP CPU gate? */
226
227 kpreempt_disable();
228 error = _ufetch_32(uva, ret);
229 if (error == 0 && *ret == old) {
230 error = _ustore_32(uva, new);
231 }
232 kpreempt_enable();
233
234 return error;
235 }
236
237 #ifdef _LP64
238 int
239 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
240 {
241 uint64_t *uva = ((void *)(uintptr_t)uaddr);
242 int error;
243
244 /* XXXXJRT do we need a MP CPU gate? */
245
246 kpreempt_disable();
247 error = _ufetch_64(uva, ret);
248 if (error == 0 && *ret == old) {
249 error = _ustore_64(uva, new);
250 }
251 kpreempt_enable();
252
253 return error;
254 }
255 #endif /* _LP64 */
256
257 #define UFETCH(sz) \
258 int \
259 _ufetch_ ## sz(const uint ## sz ##_t *uaddr, uint ## sz ## _t *valp) \
260 { \
261 int error = 0; \
262 \
263 if (RUMP_LOCALPROC_P(curproc)) { \
264 *valp = *uaddr; \
265 } else { \
266 error = rump_sysproxy_copyin( \
267 RUMP_SPVM2CTL(curproc->p_vmspace), \
268 uaddr, valp, sizeof(*valp)); \
269 } \
270 return error; \
271 }
272
273 UFETCH(8)
274 UFETCH(16)
275 UFETCH(32)
276 #ifdef _LP64
277 UFETCH(64)
278 #endif
279
280 #undef UFETCH
281
282 #define USTORE(sz) \
283 int \
284 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \
285 { \
286 int error = 0; \
287 \
288 if (RUMP_LOCALPROC_P(curproc)) { \
289 *uaddr = val; \
290 } else { \
291 error = rump_sysproxy_copyout( \
292 RUMP_SPVM2CTL(curproc->p_vmspace), \
293 &val, uaddr, sizeof(val)); \
294 } \
295 return error; \
296 }
297
298 USTORE(8)
299 USTORE(16)
300 USTORE(32)
301 #ifdef _LP64
302 USTORE(64)
303 #endif
304
305 #undef USTORE
306