libnvmm.c revision 1.7 1 /* $NetBSD: libnvmm.c,v 1.7 2019/03/21 20:21:40 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38 #include <fcntl.h>
39 #include <errno.h>
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/queue.h>
43
44 #include "nvmm.h"
45
46 struct nvmm_callbacks __callbacks;
47
48 typedef struct __area {
49 LIST_ENTRY(__area) list;
50 gpaddr_t gpa;
51 uintptr_t hva;
52 size_t size;
53 } area_t;
54
55 typedef LIST_HEAD(, __area) area_list_t;
56
57 static int nvmm_fd = -1;
58
59 /* -------------------------------------------------------------------------- */
60
61 static bool
62 __area_isvalid(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
63 size_t size)
64 {
65 area_list_t *areas = mach->areas;
66 area_t *ent;
67
68 LIST_FOREACH(ent, areas, list) {
69 /* Collision on GPA */
70 if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
71 return false;
72 }
73 if (gpa + size > ent->gpa &&
74 gpa + size <= ent->gpa + ent->size) {
75 return false;
76 }
77 if (gpa <= ent->gpa && gpa + size >= ent->gpa + ent->size) {
78 return false;
79 }
80 }
81
82 return true;
83 }
84
85 static int
86 __area_add(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa, size_t size)
87 {
88 area_list_t *areas = mach->areas;
89 area_t *area;
90
91 if (!__area_isvalid(mach, hva, gpa, size)) {
92 errno = EINVAL;
93 return -1;
94 }
95
96 area = malloc(sizeof(*area));
97 if (area == NULL)
98 return -1;
99 area->gpa = gpa;
100 area->hva = hva;
101 area->size = size;
102
103 LIST_INSERT_HEAD(areas, area, list);
104
105 return 0;
106 }
107
108 static int
109 __area_delete(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
110 size_t size)
111 {
112 area_list_t *areas = mach->areas;
113 area_t *ent, *nxt;
114
115 LIST_FOREACH_SAFE(ent, areas, list, nxt) {
116 if (hva == ent->hva && gpa == ent->gpa && size == ent->size) {
117 LIST_REMOVE(ent, list);
118 free(ent);
119 return 0;
120 }
121 }
122
123 return -1;
124 }
125
126 static void
127 __area_remove_all(struct nvmm_machine *mach)
128 {
129 area_list_t *areas = mach->areas;
130 area_t *ent;
131
132 while ((ent = LIST_FIRST(areas)) != NULL) {
133 LIST_REMOVE(ent, list);
134 free(ent);
135 }
136
137 free(areas);
138 }
139
140 /* -------------------------------------------------------------------------- */
141
142 static int
143 nvmm_init(void)
144 {
145 if (nvmm_fd != -1)
146 return 0;
147 nvmm_fd = open("/dev/nvmm", O_RDWR);
148 if (nvmm_fd == -1)
149 return -1;
150 return 0;
151 }
152
153 int
154 nvmm_capability(struct nvmm_capability *cap)
155 {
156 struct nvmm_ioc_capability args;
157 int ret;
158
159 if (nvmm_init() == -1) {
160 return -1;
161 }
162
163 ret = ioctl(nvmm_fd, NVMM_IOC_CAPABILITY, &args);
164 if (ret == -1)
165 return -1;
166
167 memcpy(cap, &args.cap, sizeof(args.cap));
168
169 return 0;
170 }
171
172 int
173 nvmm_machine_create(struct nvmm_machine *mach)
174 {
175 struct nvmm_ioc_machine_create args;
176 area_list_t *areas;
177 int ret;
178
179 if (nvmm_init() == -1) {
180 return -1;
181 }
182
183 areas = calloc(1, sizeof(*areas));
184 if (areas == NULL)
185 return -1;
186
187 ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CREATE, &args);
188 if (ret == -1) {
189 free(areas);
190 return -1;
191 }
192
193 memset(mach, 0, sizeof(*mach));
194 LIST_INIT(areas);
195 mach->areas = areas;
196 mach->machid = args.machid;
197
198 return 0;
199 }
200
201 int
202 nvmm_machine_destroy(struct nvmm_machine *mach)
203 {
204 struct nvmm_ioc_machine_destroy args;
205 int ret;
206
207 if (nvmm_init() == -1) {
208 return -1;
209 }
210
211 args.machid = mach->machid;
212
213 ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_DESTROY, &args);
214 if (ret == -1)
215 return -1;
216
217 __area_remove_all(mach);
218
219 return 0;
220 }
221
222 int
223 nvmm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *conf)
224 {
225 struct nvmm_ioc_machine_configure args;
226 int ret;
227
228 if (nvmm_init() == -1) {
229 return -1;
230 }
231
232 args.machid = mach->machid;
233 args.op = op;
234 args.conf = conf;
235
236 ret = ioctl(nvmm_fd, NVMM_IOC_MACHINE_CONFIGURE, &args);
237 if (ret == -1)
238 return -1;
239
240 return 0;
241 }
242
243 int
244 nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
245 {
246 struct nvmm_ioc_vcpu_create args;
247 int ret;
248
249 if (nvmm_init() == -1) {
250 return -1;
251 }
252
253 args.machid = mach->machid;
254 args.cpuid = cpuid;
255
256 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_CREATE, &args);
257 if (ret == -1)
258 return -1;
259
260 return 0;
261 }
262
263 int
264 nvmm_vcpu_destroy(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
265 {
266 struct nvmm_ioc_vcpu_destroy args;
267 int ret;
268
269 if (nvmm_init() == -1) {
270 return -1;
271 }
272
273 args.machid = mach->machid;
274 args.cpuid = cpuid;
275
276 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args);
277 if (ret == -1)
278 return -1;
279
280 return 0;
281 }
282
283 int
284 nvmm_vcpu_setstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
285 void *state, uint64_t flags)
286 {
287 struct nvmm_ioc_vcpu_setstate args;
288 int ret;
289
290 if (nvmm_init() == -1) {
291 return -1;
292 }
293
294 args.machid = mach->machid;
295 args.cpuid = cpuid;
296 args.state = state;
297 args.flags = flags;
298
299 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_SETSTATE, &args);
300 if (ret == -1)
301 return -1;
302
303 return 0;
304 }
305
306 int
307 nvmm_vcpu_getstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
308 void *state, uint64_t flags)
309 {
310 struct nvmm_ioc_vcpu_getstate args;
311 int ret;
312
313 if (nvmm_init() == -1) {
314 return -1;
315 }
316
317 args.machid = mach->machid;
318 args.cpuid = cpuid;
319 args.state = state;
320 args.flags = flags;
321
322 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args);
323 if (ret == -1)
324 return -1;
325
326 return 0;
327 }
328
329 int
330 nvmm_vcpu_inject(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
331 struct nvmm_event *event)
332 {
333 struct nvmm_ioc_vcpu_inject args;
334 int ret;
335
336 if (nvmm_init() == -1) {
337 return -1;
338 }
339
340 args.machid = mach->machid;
341 args.cpuid = cpuid;
342 memcpy(&args.event, event, sizeof(args.event));
343
344 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_INJECT, &args);
345 if (ret == -1)
346 return -1;
347
348 return 0;
349 }
350
351 int
352 nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
353 struct nvmm_exit *exit)
354 {
355 struct nvmm_ioc_vcpu_run args;
356 int ret;
357
358 if (nvmm_init() == -1) {
359 return -1;
360 }
361
362 args.machid = mach->machid;
363 args.cpuid = cpuid;
364 memset(&args.exit, 0, sizeof(args.exit));
365
366 ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args);
367 if (ret == -1)
368 return -1;
369
370 memcpy(exit, &args.exit, sizeof(args.exit));
371
372 return 0;
373 }
374
375 int
376 nvmm_gpa_map(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
377 size_t size, int prot)
378 {
379 struct nvmm_ioc_gpa_map args;
380 int ret;
381
382 if (nvmm_init() == -1) {
383 return -1;
384 }
385
386 ret = __area_add(mach, hva, gpa, size);
387 if (ret == -1)
388 return -1;
389
390 args.machid = mach->machid;
391 args.hva = hva;
392 args.gpa = gpa;
393 args.size = size;
394 args.prot = prot;
395
396 ret = ioctl(nvmm_fd, NVMM_IOC_GPA_MAP, &args);
397 if (ret == -1) {
398 /* Can't recover. */
399 abort();
400 }
401
402 return 0;
403 }
404
405 int
406 nvmm_gpa_unmap(struct nvmm_machine *mach, uintptr_t hva, gpaddr_t gpa,
407 size_t size)
408 {
409 struct nvmm_ioc_gpa_unmap args;
410 int ret;
411
412 if (nvmm_init() == -1) {
413 return -1;
414 }
415
416 ret = __area_delete(mach, hva, gpa, size);
417 if (ret == -1)
418 return -1;
419
420 args.machid = mach->machid;
421 args.gpa = gpa;
422 args.size = size;
423
424 ret = ioctl(nvmm_fd, NVMM_IOC_GPA_UNMAP, &args);
425 if (ret == -1) {
426 /* Can't recover. */
427 abort();
428 }
429
430 return 0;
431 }
432
433 int
434 nvmm_hva_map(struct nvmm_machine *mach, uintptr_t hva, size_t size)
435 {
436 struct nvmm_ioc_hva_map args;
437 int ret;
438
439 if (nvmm_init() == -1) {
440 return -1;
441 }
442
443 args.machid = mach->machid;
444 args.hva = hva;
445 args.size = size;
446
447 ret = ioctl(nvmm_fd, NVMM_IOC_HVA_MAP, &args);
448 if (ret == -1)
449 return -1;
450
451 return 0;
452 }
453
454 int
455 nvmm_hva_unmap(struct nvmm_machine *mach, uintptr_t hva, size_t size)
456 {
457 struct nvmm_ioc_hva_unmap args;
458 int ret;
459
460 if (nvmm_init() == -1) {
461 return -1;
462 }
463
464 args.machid = mach->machid;
465 args.hva = hva;
466 args.size = size;
467
468 ret = ioctl(nvmm_fd, NVMM_IOC_HVA_UNMAP, &args);
469 if (ret == -1)
470 return -1;
471
472 return 0;
473 }
474
475 /*
476 * nvmm_gva_to_gpa(): architecture-specific.
477 */
478
479 int
480 nvmm_gpa_to_hva(struct nvmm_machine *mach, gpaddr_t gpa, uintptr_t *hva)
481 {
482 area_list_t *areas = mach->areas;
483 area_t *ent;
484
485 LIST_FOREACH(ent, areas, list) {
486 if (gpa >= ent->gpa && gpa < ent->gpa + ent->size) {
487 *hva = ent->hva + (gpa - ent->gpa);
488 return 0;
489 }
490 }
491
492 errno = ENOENT;
493 return -1;
494 }
495
496 /*
497 * nvmm_assist_io(): architecture-specific.
498 */
499
500 /*
501 * nvmm_assist_mem(): architecture-specific.
502 */
503
504 void
505 nvmm_callbacks_register(const struct nvmm_callbacks *cbs)
506 {
507 memcpy(&__callbacks, cbs, sizeof(__callbacks));
508 }
509