rumpcopy.c revision 1.23 1 /* $NetBSD: rumpcopy.c,v 1.23 2019/04/06 03:06:28 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2009 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: rumpcopy.c,v 1.23 2019/04/06 03:06:28 thorpej Exp $");
30
31 #define __UFETCHSTORE_PRIVATE
32 #define __UCAS_PRIVATE
33
34 #include <sys/param.h>
35 #include <sys/lwp.h>
36 #include <sys/systm.h>
37 #include <sys/uio.h>
38
39 #include <rump-sys/kern.h>
40
41 #include <rump/rumpuser.h>
42
43 int
44 copyin(const void *uaddr, void *kaddr, size_t len)
45 {
46 int error = 0;
47
48 if (__predict_false(uaddr == NULL && len)) {
49 return EFAULT;
50 }
51
52 if (RUMP_LOCALPROC_P(curproc)) {
53 memcpy(kaddr, uaddr, len);
54 } else if (len) {
55 error = rump_sysproxy_copyin(RUMP_SPVM2CTL(curproc->p_vmspace),
56 uaddr, kaddr, len);
57 }
58
59 return error;
60 }
61
62 int
63 copyout(const void *kaddr, void *uaddr, size_t len)
64 {
65 int error = 0;
66
67 if (__predict_false(uaddr == NULL && len)) {
68 return EFAULT;
69 }
70
71 if (RUMP_LOCALPROC_P(curproc)) {
72 memcpy(uaddr, kaddr, len);
73 } else if (len) {
74 error = rump_sysproxy_copyout(RUMP_SPVM2CTL(curproc->p_vmspace),
75 kaddr, uaddr, len);
76 }
77 return error;
78 }
79
80 int
81 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
82 {
83 uint8_t *to = kdaddr;
84 const uint8_t *from = kfaddr;
85 size_t actlen = 0;
86
87 while (len-- > 0 && (*to++ = *from++) != 0)
88 actlen++;
89
90 if (len+1 == 0 && *(to-1) != 0)
91 return ENAMETOOLONG;
92
93 if (done)
94 *done = actlen+1; /* + '\0' */
95 return 0;
96 }
97
98 int
99 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
100 {
101 uint8_t *to;
102 int rv;
103
104 if (len == 0)
105 return 0;
106
107 if (__predict_false(uaddr == NULL)) {
108 return EFAULT;
109 }
110
111 if (RUMP_LOCALPROC_P(curproc))
112 return copystr(uaddr, kaddr, len, done);
113
114 if ((rv = rump_sysproxy_copyinstr(RUMP_SPVM2CTL(curproc->p_vmspace),
115 uaddr, kaddr, &len)) != 0)
116 return rv;
117
118 /* figure out if we got a terminated string or not */
119 to = (uint8_t *)kaddr + (len-1);
120 while (to >= (uint8_t *)kaddr) {
121 if (*to == 0)
122 goto found;
123 to--;
124 }
125 return ENAMETOOLONG;
126
127 found:
128 if (done)
129 *done = strlen(kaddr)+1; /* includes termination */
130
131 return 0;
132 }
133
134 int
135 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
136 {
137 size_t slen;
138 int error;
139
140 if (__predict_false(uaddr == NULL && len)) {
141 return EFAULT;
142 }
143
144 if (RUMP_LOCALPROC_P(curproc))
145 return copystr(kaddr, uaddr, len, done);
146
147 slen = strlen(kaddr)+1;
148 if (slen > len)
149 return ENAMETOOLONG;
150
151 error = rump_sysproxy_copyoutstr(RUMP_SPVM2CTL(curproc->p_vmspace),
152 kaddr, uaddr, &slen);
153 if (done)
154 *done = slen;
155
156 return error;
157 }
158
159 int
160 kcopy(const void *src, void *dst, size_t len)
161 {
162
163 memcpy(dst, src, len);
164 return 0;
165 }
166
167 /*
168 * Low-level I/O routine. This is used only when "all else fails",
169 * i.e. the current thread does not have an appropriate vm context.
170 */
171 int
172 uvm_io(struct vm_map *vm, struct uio *uio, int flag)
173 {
174 int error = 0;
175
176 /* loop over iovecs one-by-one and copyout */
177 for (; uio->uio_resid && uio->uio_iovcnt;
178 uio->uio_iovcnt--, uio->uio_iov++) {
179 struct iovec *iov = uio->uio_iov;
180 size_t curlen = MIN(uio->uio_resid, iov->iov_len);
181
182 if (__predict_false(curlen == 0))
183 continue;
184
185 if (uio->uio_rw == UIO_READ) {
186 error = rump_sysproxy_copyin(RUMP_SPVM2CTL(vm),
187 (void *)(vaddr_t)uio->uio_offset, iov->iov_base,
188 curlen);
189 } else {
190 error = rump_sysproxy_copyout(RUMP_SPVM2CTL(vm),
191 iov->iov_base, (void *)(vaddr_t)uio->uio_offset,
192 curlen);
193 }
194 if (error)
195 break;
196
197 iov->iov_base = (uint8_t *)iov->iov_base + curlen;
198 iov->iov_len -= curlen;
199
200 uio->uio_resid -= curlen;
201 uio->uio_offset += curlen;
202 }
203
204 return error;
205 }
206
207 int
208 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
209 {
210 uint32_t *uva = ((void *)(uintptr_t)uaddr);
211 int error;
212
213 /* XXXXJRT do we need a MP CPU gate? */
214
215 kpreempt_disable();
216 error = _ufetch_32(uva, ret);
217 if (error == 0 && *ret == old) {
218 error = _ustore_32(uva, new);
219 }
220 kpreempt_enable();
221
222 return error;
223 }
224
225 #ifdef _LP64
226 int
227 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
228 {
229 uint64_t *uva = ((void *)(uintptr_t)uaddr);
230 int error;
231
232 /* XXXXJRT do we need a MP CPU gate? */
233
234 kpreempt_disable();
235 error = _ufetch_64(uva, ret);
236 if (error == 0 && *ret == old) {
237 error = _ustore_64(uva, new);
238 }
239 kpreempt_enable();
240
241 return error;
242 }
243 #endif /* _LP64 */
244
245 #define UFETCH(sz) \
246 int \
247 _ufetch_ ## sz(const uint ## sz ##_t *uaddr, uint ## sz ## _t *valp) \
248 { \
249 int error = 0; \
250 \
251 if (RUMP_LOCALPROC_P(curproc)) { \
252 *valp = *uaddr; \
253 } else { \
254 error = rump_sysproxy_copyin( \
255 RUMP_SPVM2CTL(curproc->p_vmspace), \
256 uaddr, valp, sizeof(*valp)); \
257 } \
258 return error; \
259 }
260
261 UFETCH(8)
262 UFETCH(16)
263 UFETCH(32)
264 #ifdef _LP64
265 UFETCH(64)
266 #endif
267
268 #undef UFETCH
269
270 #define USTORE(sz) \
271 int \
272 _ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val) \
273 { \
274 int error = 0; \
275 \
276 if (RUMP_LOCALPROC_P(curproc)) { \
277 *uaddr = val; \
278 } else { \
279 error = rump_sysproxy_copyout( \
280 RUMP_SPVM2CTL(curproc->p_vmspace), \
281 &val, uaddr, sizeof(val)); \
282 } \
283 return error; \
284 }
285
286 USTORE(8)
287 USTORE(16)
288 USTORE(32)
289 #ifdef _LP64
290 USTORE(64)
291 #endif
292
293 #undef USTORE
294