nvmm.c revision 1.9 1 /* $NetBSD: nvmm.c,v 1.9 2019/03/07 15:22:21 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.9 2019/03/07 15:22:21 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38
39 #include <sys/cpu.h>
40 #include <sys/conf.h>
41 #include <sys/kmem.h>
42 #include <sys/module.h>
43 #include <sys/proc.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 #include "ioconf.h"
49
50 #include <dev/nvmm/nvmm.h>
51 #include <dev/nvmm/nvmm_internal.h>
52 #include <dev/nvmm/nvmm_ioctl.h>
53
54 static struct nvmm_machine machines[NVMM_MAX_MACHINES];
55
56 static const struct nvmm_impl *nvmm_impl_list[] = {
57 &nvmm_x86_svm, /* x86 AMD SVM */
58 &nvmm_x86_vmx /* x86 Intel VMX */
59 };
60
61 static const struct nvmm_impl *nvmm_impl = NULL;
62
63 /* -------------------------------------------------------------------------- */
64
65 static int
66 nvmm_machine_alloc(struct nvmm_machine **ret)
67 {
68 struct nvmm_machine *mach;
69 size_t i;
70
71 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
72 mach = &machines[i];
73
74 rw_enter(&mach->lock, RW_WRITER);
75 if (mach->present) {
76 rw_exit(&mach->lock);
77 continue;
78 }
79
80 mach->present = true;
81 *ret = mach;
82 return 0;
83 }
84
85 return ENOBUFS;
86 }
87
88 static void
89 nvmm_machine_free(struct nvmm_machine *mach)
90 {
91 KASSERT(rw_write_held(&mach->lock));
92 KASSERT(mach->present);
93 mach->present = false;
94 }
95
96 static int
97 nvmm_machine_get(nvmm_machid_t machid, struct nvmm_machine **ret, bool writer)
98 {
99 struct nvmm_machine *mach;
100 krw_t op = writer ? RW_WRITER : RW_READER;
101
102 if (machid >= NVMM_MAX_MACHINES) {
103 return EINVAL;
104 }
105 mach = &machines[machid];
106
107 rw_enter(&mach->lock, op);
108 if (!mach->present) {
109 rw_exit(&mach->lock);
110 return ENOENT;
111 }
112 if (mach->procid != curproc->p_pid) {
113 rw_exit(&mach->lock);
114 return EPERM;
115 }
116 *ret = mach;
117
118 return 0;
119 }
120
121 static void
122 nvmm_machine_put(struct nvmm_machine *mach)
123 {
124 rw_exit(&mach->lock);
125 }
126
127 /* -------------------------------------------------------------------------- */
128
129 static int
130 nvmm_vcpu_alloc(struct nvmm_machine *mach, struct nvmm_cpu **ret)
131 {
132 struct nvmm_cpu *vcpu;
133 size_t i;
134
135 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
136 vcpu = &mach->cpus[i];
137
138 mutex_enter(&vcpu->lock);
139 if (vcpu->present) {
140 mutex_exit(&vcpu->lock);
141 continue;
142 }
143
144 vcpu->present = true;
145 vcpu->cpuid = i;
146 vcpu->state = kmem_zalloc(nvmm_impl->state_size, KM_SLEEP);
147 *ret = vcpu;
148 return 0;
149 }
150
151 return ENOBUFS;
152 }
153
154 static void
155 nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
156 {
157 KASSERT(mutex_owned(&vcpu->lock));
158 vcpu->present = false;
159 kmem_free(vcpu->state, nvmm_impl->state_size);
160 vcpu->hcpu_last = -1;
161 }
162
163 int
164 nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
165 struct nvmm_cpu **ret)
166 {
167 struct nvmm_cpu *vcpu;
168
169 if (cpuid >= NVMM_MAX_VCPUS) {
170 return EINVAL;
171 }
172 vcpu = &mach->cpus[cpuid];
173
174 mutex_enter(&vcpu->lock);
175 if (!vcpu->present) {
176 mutex_exit(&vcpu->lock);
177 return ENOENT;
178 }
179 *ret = vcpu;
180
181 return 0;
182 }
183
184 void
185 nvmm_vcpu_put(struct nvmm_cpu *vcpu)
186 {
187 mutex_exit(&vcpu->lock);
188 }
189
190 /* -------------------------------------------------------------------------- */
191
192 static void
193 nvmm_kill_machines(pid_t pid)
194 {
195 struct nvmm_machine *mach;
196 struct nvmm_cpu *vcpu;
197 size_t i, j;
198 int error;
199
200 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
201 mach = &machines[i];
202
203 rw_enter(&mach->lock, RW_WRITER);
204 if (!mach->present || mach->procid != pid) {
205 rw_exit(&mach->lock);
206 continue;
207 }
208
209 /* Kill it. */
210 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
211 error = nvmm_vcpu_get(mach, j, &vcpu);
212 if (error)
213 continue;
214 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
215 nvmm_vcpu_free(mach, vcpu);
216 nvmm_vcpu_put(vcpu);
217 }
218 uvmspace_free(mach->vm);
219
220 /* Drop the kernel UOBJ refs. */
221 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
222 if (!mach->hmap[j].present)
223 continue;
224 uao_detach(mach->hmap[j].uobj);
225 }
226
227 nvmm_machine_free(mach);
228
229 rw_exit(&mach->lock);
230 }
231 }
232
233 /* -------------------------------------------------------------------------- */
234
235 static int
236 nvmm_capability(struct nvmm_ioc_capability *args)
237 {
238 args->cap.version = NVMM_CAPABILITY_VERSION;
239 args->cap.state_size = nvmm_impl->state_size;
240 args->cap.max_machines = NVMM_MAX_MACHINES;
241 args->cap.max_vcpus = NVMM_MAX_VCPUS;
242 args->cap.max_ram = NVMM_MAX_RAM;
243
244 (*nvmm_impl->capability)(&args->cap);
245
246 return 0;
247 }
248
249 static int
250 nvmm_machine_create(struct nvmm_ioc_machine_create *args)
251 {
252 struct nvmm_machine *mach;
253 int error;
254
255 error = nvmm_machine_alloc(&mach);
256 if (error)
257 return error;
258
259 /* Curproc owns the machine. */
260 mach->procid = curproc->p_pid;
261
262 /* Zero out the host mappings. */
263 memset(&mach->hmap, 0, sizeof(mach->hmap));
264
265 /* Create the machine vmspace. */
266 mach->gpa_begin = 0;
267 mach->gpa_end = NVMM_MAX_RAM;
268 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
269
270 (*nvmm_impl->machine_create)(mach);
271
272 args->machid = mach->machid;
273 nvmm_machine_put(mach);
274
275 return 0;
276 }
277
278 static int
279 nvmm_machine_destroy(struct nvmm_ioc_machine_destroy *args)
280 {
281 struct nvmm_machine *mach;
282 struct nvmm_cpu *vcpu;
283 int error;
284 size_t i;
285
286 error = nvmm_machine_get(args->machid, &mach, true);
287 if (error)
288 return error;
289
290 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
291 error = nvmm_vcpu_get(mach, i, &vcpu);
292 if (error)
293 continue;
294
295 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
296 nvmm_vcpu_free(mach, vcpu);
297 nvmm_vcpu_put(vcpu);
298 }
299
300 (*nvmm_impl->machine_destroy)(mach);
301
302 /* Free the machine vmspace. */
303 uvmspace_free(mach->vm);
304
305 /* Drop the kernel UOBJ refs. */
306 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
307 if (!mach->hmap[i].present)
308 continue;
309 uao_detach(mach->hmap[i].uobj);
310 }
311
312 nvmm_machine_free(mach);
313 nvmm_machine_put(mach);
314
315 return 0;
316 }
317
318 static int
319 nvmm_machine_configure(struct nvmm_ioc_machine_configure *args)
320 {
321 struct nvmm_machine *mach;
322 size_t allocsz;
323 void *data;
324 int error;
325
326 if (__predict_false(args->op >= nvmm_impl->conf_max)) {
327 return EINVAL;
328 }
329
330 allocsz = nvmm_impl->conf_sizes[args->op];
331 data = kmem_alloc(allocsz, KM_SLEEP);
332
333 error = nvmm_machine_get(args->machid, &mach, true);
334 if (error) {
335 kmem_free(data, allocsz);
336 return error;
337 }
338
339 error = copyin(args->conf, data, allocsz);
340 if (error) {
341 goto out;
342 }
343
344 error = (*nvmm_impl->machine_configure)(mach, args->op, data);
345
346 out:
347 nvmm_machine_put(mach);
348 kmem_free(data, allocsz);
349 return error;
350 }
351
352 static int
353 nvmm_vcpu_create(struct nvmm_ioc_vcpu_create *args)
354 {
355 struct nvmm_machine *mach;
356 struct nvmm_cpu *vcpu;
357 int error;
358
359 error = nvmm_machine_get(args->machid, &mach, false);
360 if (error)
361 return error;
362
363 error = nvmm_vcpu_alloc(mach, &vcpu);
364 if (error)
365 goto out;
366
367 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
368 if (error) {
369 nvmm_vcpu_free(mach, vcpu);
370 nvmm_vcpu_put(vcpu);
371 goto out;
372 }
373
374 nvmm_vcpu_put(vcpu);
375
376 out:
377 nvmm_machine_put(mach);
378 return error;
379 }
380
381 static int
382 nvmm_vcpu_destroy(struct nvmm_ioc_vcpu_destroy *args)
383 {
384 struct nvmm_machine *mach;
385 struct nvmm_cpu *vcpu;
386 int error;
387
388 error = nvmm_machine_get(args->machid, &mach, false);
389 if (error)
390 return error;
391
392 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
393 if (error)
394 goto out;
395
396 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
397 nvmm_vcpu_free(mach, vcpu);
398 nvmm_vcpu_put(vcpu);
399
400 out:
401 nvmm_machine_put(mach);
402 return error;
403 }
404
405 static int
406 nvmm_vcpu_setstate(struct nvmm_ioc_vcpu_setstate *args)
407 {
408 struct nvmm_machine *mach;
409 struct nvmm_cpu *vcpu;
410 int error;
411
412 error = nvmm_machine_get(args->machid, &mach, false);
413 if (error)
414 return error;
415
416 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
417 if (error)
418 goto out;
419
420 error = copyin(args->state, vcpu->state, nvmm_impl->state_size);
421 if (error) {
422 nvmm_vcpu_put(vcpu);
423 goto out;
424 }
425
426 (*nvmm_impl->vcpu_setstate)(vcpu, vcpu->state, args->flags);
427 nvmm_vcpu_put(vcpu);
428
429 out:
430 nvmm_machine_put(mach);
431 return error;
432 }
433
434 static int
435 nvmm_vcpu_getstate(struct nvmm_ioc_vcpu_getstate *args)
436 {
437 struct nvmm_machine *mach;
438 struct nvmm_cpu *vcpu;
439 int error;
440
441 error = nvmm_machine_get(args->machid, &mach, false);
442 if (error)
443 return error;
444
445 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
446 if (error)
447 goto out;
448
449 (*nvmm_impl->vcpu_getstate)(vcpu, vcpu->state, args->flags);
450 nvmm_vcpu_put(vcpu);
451 error = copyout(vcpu->state, args->state, nvmm_impl->state_size);
452
453 out:
454 nvmm_machine_put(mach);
455 return error;
456 }
457
458 static int
459 nvmm_vcpu_inject(struct nvmm_ioc_vcpu_inject *args)
460 {
461 struct nvmm_machine *mach;
462 struct nvmm_cpu *vcpu;
463 int error;
464
465 error = nvmm_machine_get(args->machid, &mach, false);
466 if (error)
467 return error;
468
469 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
470 if (error)
471 goto out;
472
473 error = (*nvmm_impl->vcpu_inject)(mach, vcpu, &args->event);
474 nvmm_vcpu_put(vcpu);
475
476 out:
477 nvmm_machine_put(mach);
478 return error;
479 }
480
481 static void
482 nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
483 struct nvmm_exit *exit)
484 {
485 struct vmspace *vm = mach->vm;
486
487 while (1) {
488 (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
489
490 if (__predict_true(exit->reason != NVMM_EXIT_MEMORY)) {
491 break;
492 }
493 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, VM_PROT_ALL)) {
494 break;
495 }
496 }
497 }
498
499 static int
500 nvmm_vcpu_run(struct nvmm_ioc_vcpu_run *args)
501 {
502 struct nvmm_machine *mach;
503 struct nvmm_cpu *vcpu;
504 int error;
505
506 error = nvmm_machine_get(args->machid, &mach, false);
507 if (error)
508 return error;
509
510 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
511 if (error)
512 goto out;
513
514 nvmm_do_vcpu_run(mach, vcpu, &args->exit);
515 nvmm_vcpu_put(vcpu);
516
517 out:
518 nvmm_machine_put(mach);
519 return error;
520 }
521
522 /* -------------------------------------------------------------------------- */
523
524 static struct uvm_object *
525 nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
526 size_t *off)
527 {
528 struct nvmm_hmapping *hmapping;
529 size_t i;
530
531 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
532 hmapping = &mach->hmap[i];
533 if (!hmapping->present) {
534 continue;
535 }
536 if (hva >= hmapping->hva &&
537 hva + size <= hmapping->hva + hmapping->size) {
538 *off = hva - hmapping->hva;
539 return hmapping->uobj;
540 }
541 }
542
543 return NULL;
544 }
545
546 static int
547 nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
548 {
549 struct nvmm_hmapping *hmapping;
550 size_t i;
551
552 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
553 return EINVAL;
554 }
555 if (hva == 0) {
556 return EINVAL;
557 }
558
559 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
560 hmapping = &mach->hmap[i];
561 if (!hmapping->present) {
562 continue;
563 }
564
565 if (hva >= hmapping->hva &&
566 hva + size <= hmapping->hva + hmapping->size) {
567 break;
568 }
569
570 if (hva >= hmapping->hva &&
571 hva < hmapping->hva + hmapping->size) {
572 return EEXIST;
573 }
574 if (hva + size > hmapping->hva &&
575 hva + size <= hmapping->hva + hmapping->size) {
576 return EEXIST;
577 }
578 if (hva <= hmapping->hva &&
579 hva + size >= hmapping->hva + hmapping->size) {
580 return EEXIST;
581 }
582 }
583
584 return 0;
585 }
586
587 static struct nvmm_hmapping *
588 nvmm_hmapping_alloc(struct nvmm_machine *mach)
589 {
590 struct nvmm_hmapping *hmapping;
591 size_t i;
592
593 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
594 hmapping = &mach->hmap[i];
595 if (!hmapping->present) {
596 hmapping->present = true;
597 return hmapping;
598 }
599 }
600
601 return NULL;
602 }
603
604 static int
605 nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
606 {
607 struct vmspace *vmspace = curproc->p_vmspace;
608 struct nvmm_hmapping *hmapping;
609 size_t i;
610
611 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
612 hmapping = &mach->hmap[i];
613 if (!hmapping->present || hmapping->hva != hva ||
614 hmapping->size != size) {
615 continue;
616 }
617
618 uvm_unmap(&vmspace->vm_map, hmapping->hva,
619 hmapping->hva + hmapping->size);
620 uao_detach(hmapping->uobj);
621
622 hmapping->uobj = NULL;
623 hmapping->present = false;
624
625 return 0;
626 }
627
628 return ENOENT;
629 }
630
631 static int
632 nvmm_hva_map(struct nvmm_ioc_hva_map *args)
633 {
634 struct vmspace *vmspace = curproc->p_vmspace;
635 struct nvmm_machine *mach;
636 struct nvmm_hmapping *hmapping;
637 vaddr_t uva;
638 int error;
639
640 error = nvmm_machine_get(args->machid, &mach, true);
641 if (error)
642 return error;
643
644 error = nvmm_hmapping_validate(mach, args->hva, args->size);
645 if (error)
646 goto out;
647
648 hmapping = nvmm_hmapping_alloc(mach);
649 if (hmapping == NULL) {
650 error = ENOBUFS;
651 goto out;
652 }
653
654 hmapping->hva = args->hva;
655 hmapping->size = args->size;
656 hmapping->uobj = uao_create(hmapping->size, 0);
657 uva = hmapping->hva;
658
659 /* Take a reference for the user. */
660 uao_reference(hmapping->uobj);
661
662 /* Map the uobj into the user address space, as pageable. */
663 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
664 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
665 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
666 if (error) {
667 uao_detach(hmapping->uobj);
668 }
669
670 out:
671 nvmm_machine_put(mach);
672 return error;
673 }
674
675 static int
676 nvmm_hva_unmap(struct nvmm_ioc_hva_unmap *args)
677 {
678 struct nvmm_machine *mach;
679 int error;
680
681 error = nvmm_machine_get(args->machid, &mach, true);
682 if (error)
683 return error;
684
685 error = nvmm_hmapping_free(mach, args->hva, args->size);
686
687 nvmm_machine_put(mach);
688 return error;
689 }
690
691 /* -------------------------------------------------------------------------- */
692
693 static int
694 nvmm_gpa_map(struct nvmm_ioc_gpa_map *args)
695 {
696 struct nvmm_machine *mach;
697 struct uvm_object *uobj;
698 gpaddr_t gpa;
699 size_t off;
700 int error;
701
702 error = nvmm_machine_get(args->machid, &mach, false);
703 if (error)
704 return error;
705
706 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
707 (args->hva % PAGE_SIZE) != 0) {
708 error = EINVAL;
709 goto out;
710 }
711 if (args->hva == 0) {
712 error = EINVAL;
713 goto out;
714 }
715 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
716 error = EINVAL;
717 goto out;
718 }
719 if (args->gpa + args->size <= args->gpa) {
720 error = EINVAL;
721 goto out;
722 }
723 if (args->gpa + args->size > mach->gpa_end) {
724 error = EINVAL;
725 goto out;
726 }
727 gpa = args->gpa;
728
729 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
730 if (uobj == NULL) {
731 error = EINVAL;
732 goto out;
733 }
734
735 /* Take a reference for the machine. */
736 uao_reference(uobj);
737
738 /* Map the uobj into the machine address space, as pageable. */
739 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
740 UVM_MAPFLAG(UVM_PROT_RWX, UVM_PROT_RWX, UVM_INH_NONE,
741 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
742 if (error) {
743 uao_detach(uobj);
744 goto out;
745 }
746 if (gpa != args->gpa) {
747 uao_detach(uobj);
748 printf("[!] uvm_map problem\n");
749 error = EINVAL;
750 goto out;
751 }
752
753 out:
754 nvmm_machine_put(mach);
755 return error;
756 }
757
758 static int
759 nvmm_gpa_unmap(struct nvmm_ioc_gpa_unmap *args)
760 {
761 struct nvmm_machine *mach;
762 gpaddr_t gpa;
763 int error;
764
765 error = nvmm_machine_get(args->machid, &mach, false);
766 if (error)
767 return error;
768
769 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
770 error = EINVAL;
771 goto out;
772 }
773 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
774 error = EINVAL;
775 goto out;
776 }
777 if (args->gpa + args->size <= args->gpa) {
778 error = EINVAL;
779 goto out;
780 }
781 if (args->gpa + args->size >= mach->gpa_end) {
782 error = EINVAL;
783 goto out;
784 }
785 gpa = args->gpa;
786
787 /* Unmap the memory from the machine. */
788 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
789
790 out:
791 nvmm_machine_put(mach);
792 return error;
793 }
794
795 /* -------------------------------------------------------------------------- */
796
797 static int
798 nvmm_init(void)
799 {
800 size_t i, n;
801
802 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
803 if (!(*nvmm_impl_list[i]->ident)()) {
804 continue;
805 }
806 nvmm_impl = nvmm_impl_list[i];
807 break;
808 }
809 if (nvmm_impl == NULL) {
810 printf("[!] No implementation found\n");
811 return ENOTSUP;
812 }
813
814 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
815 machines[i].machid = i;
816 rw_init(&machines[i].lock);
817 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
818 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
819 IPL_NONE);
820 machines[i].cpus[n].hcpu_last = -1;
821 }
822 }
823
824 (*nvmm_impl->init)();
825
826 return 0;
827 }
828
829 static void
830 nvmm_fini(void)
831 {
832 size_t i, n;
833
834 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
835 rw_destroy(&machines[i].lock);
836 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
837 mutex_destroy(&machines[i].cpus[n].lock);
838 }
839 /* TODO need to free stuff, etc */
840 }
841
842 (*nvmm_impl->fini)();
843 }
844
845 /* -------------------------------------------------------------------------- */
846
847 static int
848 nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
849 {
850 if (minor(dev) != 0) {
851 return EXDEV;
852 }
853
854 return 0;
855 }
856
857 static int
858 nvmm_close(dev_t dev, int flags, int type, struct lwp *l)
859 {
860 KASSERT(minor(dev) == 0);
861
862 nvmm_kill_machines(l->l_proc->p_pid);
863
864 return 0;
865 }
866
867 static int
868 nvmm_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
869 {
870 KASSERT(minor(dev) == 0);
871
872 switch (cmd) {
873 case NVMM_IOC_CAPABILITY:
874 return nvmm_capability(data);
875 case NVMM_IOC_MACHINE_CREATE:
876 return nvmm_machine_create(data);
877 case NVMM_IOC_MACHINE_DESTROY:
878 return nvmm_machine_destroy(data);
879 case NVMM_IOC_MACHINE_CONFIGURE:
880 return nvmm_machine_configure(data);
881 case NVMM_IOC_VCPU_CREATE:
882 return nvmm_vcpu_create(data);
883 case NVMM_IOC_VCPU_DESTROY:
884 return nvmm_vcpu_destroy(data);
885 case NVMM_IOC_VCPU_SETSTATE:
886 return nvmm_vcpu_setstate(data);
887 case NVMM_IOC_VCPU_GETSTATE:
888 return nvmm_vcpu_getstate(data);
889 case NVMM_IOC_VCPU_INJECT:
890 return nvmm_vcpu_inject(data);
891 case NVMM_IOC_VCPU_RUN:
892 return nvmm_vcpu_run(data);
893 case NVMM_IOC_GPA_MAP:
894 return nvmm_gpa_map(data);
895 case NVMM_IOC_GPA_UNMAP:
896 return nvmm_gpa_unmap(data);
897 case NVMM_IOC_HVA_MAP:
898 return nvmm_hva_map(data);
899 case NVMM_IOC_HVA_UNMAP:
900 return nvmm_hva_unmap(data);
901 default:
902 return EINVAL;
903 }
904 }
905
906 const struct cdevsw nvmm_cdevsw = {
907 .d_open = nvmm_open,
908 .d_close = nvmm_close,
909 .d_read = noread,
910 .d_write = nowrite,
911 .d_ioctl = nvmm_ioctl,
912 .d_stop = nostop,
913 .d_tty = notty,
914 .d_poll = nopoll,
915 .d_mmap = nommap,
916 .d_kqfilter = nokqfilter,
917 .d_discard = nodiscard,
918 .d_flag = D_OTHER | D_MPSAFE
919 };
920
921 void
922 nvmmattach(int nunits)
923 {
924 /* nothing */
925 }
926
927 MODULE(MODULE_CLASS_DRIVER, nvmm, NULL);
928
929 static int
930 nvmm_modcmd(modcmd_t cmd, void *arg)
931 {
932 int error;
933
934 switch (cmd) {
935 case MODULE_CMD_INIT:
936 error = nvmm_init();
937 if (error)
938 return error;
939
940 #if defined(_MODULE)
941 {
942 devmajor_t bmajor = NODEVMAJOR;
943 devmajor_t cmajor = 345;
944
945 /* mknod /dev/nvmm c 345 0 */
946 error = devsw_attach("nvmm", NULL, &bmajor,
947 &nvmm_cdevsw, &cmajor);
948 if (error) {
949 nvmm_fini();
950 return error;
951 }
952 }
953 #endif
954 return 0;
955
956 case MODULE_CMD_FINI:
957 #if defined(_MODULE)
958 {
959 error = devsw_detach(NULL, &nvmm_cdevsw);
960 if (error) {
961 return error;
962 }
963 }
964 #endif
965 nvmm_fini();
966 return 0;
967
968 default:
969 return ENOTTY;
970 }
971 }
972