nvmm.c revision 1.29 1 /* $NetBSD: nvmm.c,v 1.29 2020/05/21 07:43:23 maxv Exp $ */
2
3 /*
4 * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.29 2020/05/21 07:43:23 maxv Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38
39 #include <sys/cpu.h>
40 #include <sys/conf.h>
41 #include <sys/kmem.h>
42 #include <sys/module.h>
43 #include <sys/proc.h>
44 #include <sys/mman.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kauth.h>
48
49 #include <uvm/uvm.h>
50 #include <uvm/uvm_page.h>
51
52 #include "ioconf.h"
53
54 #include <dev/nvmm/nvmm.h>
55 #include <dev/nvmm/nvmm_internal.h>
56 #include <dev/nvmm/nvmm_ioctl.h>
57
58 static struct nvmm_machine machines[NVMM_MAX_MACHINES];
59 static volatile unsigned int nmachines __cacheline_aligned;
60
61 static const struct nvmm_impl *nvmm_impl_list[] = {
62 &nvmm_x86_svm, /* x86 AMD SVM */
63 &nvmm_x86_vmx /* x86 Intel VMX */
64 };
65
66 static const struct nvmm_impl *nvmm_impl = NULL;
67
68 static struct nvmm_owner root_owner;
69
70 /* -------------------------------------------------------------------------- */
71
72 static int
73 nvmm_machine_alloc(struct nvmm_machine **ret)
74 {
75 struct nvmm_machine *mach;
76 size_t i;
77
78 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
79 mach = &machines[i];
80
81 rw_enter(&mach->lock, RW_WRITER);
82 if (mach->present) {
83 rw_exit(&mach->lock);
84 continue;
85 }
86
87 mach->present = true;
88 mach->time = time_second;
89 *ret = mach;
90 atomic_inc_uint(&nmachines);
91 return 0;
92 }
93
94 return ENOBUFS;
95 }
96
97 static void
98 nvmm_machine_free(struct nvmm_machine *mach)
99 {
100 KASSERT(rw_write_held(&mach->lock));
101 KASSERT(mach->present);
102 mach->present = false;
103 atomic_dec_uint(&nmachines);
104 }
105
106 static int
107 nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
108 struct nvmm_machine **ret, bool writer)
109 {
110 struct nvmm_machine *mach;
111 krw_t op = writer ? RW_WRITER : RW_READER;
112
113 if (machid >= NVMM_MAX_MACHINES) {
114 return EINVAL;
115 }
116 mach = &machines[machid];
117
118 rw_enter(&mach->lock, op);
119 if (!mach->present) {
120 rw_exit(&mach->lock);
121 return ENOENT;
122 }
123 if (owner != &root_owner && mach->owner != owner) {
124 rw_exit(&mach->lock);
125 return EPERM;
126 }
127 *ret = mach;
128
129 return 0;
130 }
131
132 static void
133 nvmm_machine_put(struct nvmm_machine *mach)
134 {
135 rw_exit(&mach->lock);
136 }
137
138 /* -------------------------------------------------------------------------- */
139
140 static int
141 nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
142 struct nvmm_cpu **ret)
143 {
144 struct nvmm_cpu *vcpu;
145
146 if (cpuid >= NVMM_MAX_VCPUS) {
147 return EINVAL;
148 }
149 vcpu = &mach->cpus[cpuid];
150
151 mutex_enter(&vcpu->lock);
152 if (vcpu->present) {
153 mutex_exit(&vcpu->lock);
154 return EBUSY;
155 }
156
157 vcpu->present = true;
158 vcpu->comm = NULL;
159 vcpu->hcpu_last = -1;
160 *ret = vcpu;
161 return 0;
162 }
163
164 static void
165 nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
166 {
167 KASSERT(mutex_owned(&vcpu->lock));
168 vcpu->present = false;
169 if (vcpu->comm != NULL) {
170 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
171 }
172 }
173
174 static int
175 nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
176 struct nvmm_cpu **ret)
177 {
178 struct nvmm_cpu *vcpu;
179
180 if (cpuid >= NVMM_MAX_VCPUS) {
181 return EINVAL;
182 }
183 vcpu = &mach->cpus[cpuid];
184
185 mutex_enter(&vcpu->lock);
186 if (!vcpu->present) {
187 mutex_exit(&vcpu->lock);
188 return ENOENT;
189 }
190 *ret = vcpu;
191
192 return 0;
193 }
194
195 static void
196 nvmm_vcpu_put(struct nvmm_cpu *vcpu)
197 {
198 mutex_exit(&vcpu->lock);
199 }
200
201 /* -------------------------------------------------------------------------- */
202
203 static void
204 nvmm_kill_machines(struct nvmm_owner *owner)
205 {
206 struct nvmm_machine *mach;
207 struct nvmm_cpu *vcpu;
208 size_t i, j;
209 int error;
210
211 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
212 mach = &machines[i];
213
214 rw_enter(&mach->lock, RW_WRITER);
215 if (!mach->present || mach->owner != owner) {
216 rw_exit(&mach->lock);
217 continue;
218 }
219
220 /* Kill it. */
221 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
222 error = nvmm_vcpu_get(mach, j, &vcpu);
223 if (error)
224 continue;
225 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
226 nvmm_vcpu_free(mach, vcpu);
227 nvmm_vcpu_put(vcpu);
228 }
229 (*nvmm_impl->machine_destroy)(mach);
230 uvmspace_free(mach->vm);
231
232 /* Drop the kernel UOBJ refs. */
233 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
234 if (!mach->hmap[j].present)
235 continue;
236 uao_detach(mach->hmap[j].uobj);
237 }
238
239 nvmm_machine_free(mach);
240
241 rw_exit(&mach->lock);
242 }
243 }
244
245 /* -------------------------------------------------------------------------- */
246
247 static int
248 nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
249 {
250 args->cap.version = NVMM_KERN_VERSION;
251 args->cap.state_size = nvmm_impl->state_size;
252 args->cap.max_machines = NVMM_MAX_MACHINES;
253 args->cap.max_vcpus = NVMM_MAX_VCPUS;
254 args->cap.max_ram = NVMM_MAX_RAM;
255
256 (*nvmm_impl->capability)(&args->cap);
257
258 return 0;
259 }
260
261 static int
262 nvmm_machine_create(struct nvmm_owner *owner,
263 struct nvmm_ioc_machine_create *args)
264 {
265 struct nvmm_machine *mach;
266 int error;
267
268 error = nvmm_machine_alloc(&mach);
269 if (error)
270 return error;
271
272 /* Curproc owns the machine. */
273 mach->owner = owner;
274
275 /* Zero out the host mappings. */
276 memset(&mach->hmap, 0, sizeof(mach->hmap));
277
278 /* Create the machine vmspace. */
279 mach->gpa_begin = 0;
280 mach->gpa_end = NVMM_MAX_RAM;
281 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
282
283 /* Create the comm uobj. */
284 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
285
286 (*nvmm_impl->machine_create)(mach);
287
288 args->machid = mach->machid;
289 nvmm_machine_put(mach);
290
291 return 0;
292 }
293
294 static int
295 nvmm_machine_destroy(struct nvmm_owner *owner,
296 struct nvmm_ioc_machine_destroy *args)
297 {
298 struct nvmm_machine *mach;
299 struct nvmm_cpu *vcpu;
300 int error;
301 size_t i;
302
303 error = nvmm_machine_get(owner, args->machid, &mach, true);
304 if (error)
305 return error;
306
307 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
308 error = nvmm_vcpu_get(mach, i, &vcpu);
309 if (error)
310 continue;
311
312 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
313 nvmm_vcpu_free(mach, vcpu);
314 nvmm_vcpu_put(vcpu);
315 }
316
317 (*nvmm_impl->machine_destroy)(mach);
318
319 /* Free the machine vmspace. */
320 uvmspace_free(mach->vm);
321
322 /* Drop the kernel UOBJ refs. */
323 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
324 if (!mach->hmap[i].present)
325 continue;
326 uao_detach(mach->hmap[i].uobj);
327 }
328
329 nvmm_machine_free(mach);
330 nvmm_machine_put(mach);
331
332 return 0;
333 }
334
335 static int
336 nvmm_machine_configure(struct nvmm_owner *owner,
337 struct nvmm_ioc_machine_configure *args)
338 {
339 struct nvmm_machine *mach;
340 size_t allocsz;
341 uint64_t op;
342 void *data;
343 int error;
344
345 op = NVMM_MACH_CONF_MD(args->op);
346 if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
347 return EINVAL;
348 }
349
350 allocsz = nvmm_impl->mach_conf_sizes[op];
351 data = kmem_alloc(allocsz, KM_SLEEP);
352
353 error = nvmm_machine_get(owner, args->machid, &mach, true);
354 if (error) {
355 kmem_free(data, allocsz);
356 return error;
357 }
358
359 error = copyin(args->conf, data, allocsz);
360 if (error) {
361 goto out;
362 }
363
364 error = (*nvmm_impl->machine_configure)(mach, op, data);
365
366 out:
367 nvmm_machine_put(mach);
368 kmem_free(data, allocsz);
369 return error;
370 }
371
372 static int
373 nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
374 {
375 struct nvmm_machine *mach;
376 struct nvmm_cpu *vcpu;
377 int error;
378
379 error = nvmm_machine_get(owner, args->machid, &mach, false);
380 if (error)
381 return error;
382
383 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
384 if (error)
385 goto out;
386
387 /* Allocate the comm page. */
388 uao_reference(mach->commuobj);
389 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
390 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
391 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
392 if (error) {
393 uao_detach(mach->commuobj);
394 nvmm_vcpu_free(mach, vcpu);
395 nvmm_vcpu_put(vcpu);
396 goto out;
397 }
398 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
399 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
400 if (error) {
401 nvmm_vcpu_free(mach, vcpu);
402 nvmm_vcpu_put(vcpu);
403 goto out;
404 }
405 memset(vcpu->comm, 0, PAGE_SIZE);
406
407 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
408 if (error) {
409 nvmm_vcpu_free(mach, vcpu);
410 nvmm_vcpu_put(vcpu);
411 goto out;
412 }
413
414 nvmm_vcpu_put(vcpu);
415
416 atomic_inc_uint(&mach->ncpus);
417
418 out:
419 nvmm_machine_put(mach);
420 return error;
421 }
422
423 static int
424 nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
425 {
426 struct nvmm_machine *mach;
427 struct nvmm_cpu *vcpu;
428 int error;
429
430 error = nvmm_machine_get(owner, args->machid, &mach, false);
431 if (error)
432 return error;
433
434 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
435 if (error)
436 goto out;
437
438 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
439 nvmm_vcpu_free(mach, vcpu);
440 nvmm_vcpu_put(vcpu);
441
442 atomic_dec_uint(&mach->ncpus);
443
444 out:
445 nvmm_machine_put(mach);
446 return error;
447 }
448
449 static int
450 nvmm_vcpu_configure(struct nvmm_owner *owner,
451 struct nvmm_ioc_vcpu_configure *args)
452 {
453 struct nvmm_machine *mach;
454 struct nvmm_cpu *vcpu;
455 size_t allocsz;
456 uint64_t op;
457 void *data;
458 int error;
459
460 op = NVMM_VCPU_CONF_MD(args->op);
461 if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
462 return EINVAL;
463
464 allocsz = nvmm_impl->vcpu_conf_sizes[op];
465 data = kmem_alloc(allocsz, KM_SLEEP);
466
467 error = nvmm_machine_get(owner, args->machid, &mach, false);
468 if (error) {
469 kmem_free(data, allocsz);
470 return error;
471 }
472
473 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
474 if (error) {
475 nvmm_machine_put(mach);
476 kmem_free(data, allocsz);
477 return error;
478 }
479
480 error = copyin(args->conf, data, allocsz);
481 if (error) {
482 goto out;
483 }
484
485 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
486
487 out:
488 nvmm_vcpu_put(vcpu);
489 nvmm_machine_put(mach);
490 kmem_free(data, allocsz);
491 return error;
492 }
493
494 static int
495 nvmm_vcpu_setstate(struct nvmm_owner *owner,
496 struct nvmm_ioc_vcpu_setstate *args)
497 {
498 struct nvmm_machine *mach;
499 struct nvmm_cpu *vcpu;
500 int error;
501
502 error = nvmm_machine_get(owner, args->machid, &mach, false);
503 if (error)
504 return error;
505
506 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
507 if (error)
508 goto out;
509
510 (*nvmm_impl->vcpu_setstate)(vcpu);
511 nvmm_vcpu_put(vcpu);
512
513 out:
514 nvmm_machine_put(mach);
515 return error;
516 }
517
518 static int
519 nvmm_vcpu_getstate(struct nvmm_owner *owner,
520 struct nvmm_ioc_vcpu_getstate *args)
521 {
522 struct nvmm_machine *mach;
523 struct nvmm_cpu *vcpu;
524 int error;
525
526 error = nvmm_machine_get(owner, args->machid, &mach, false);
527 if (error)
528 return error;
529
530 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
531 if (error)
532 goto out;
533
534 (*nvmm_impl->vcpu_getstate)(vcpu);
535 nvmm_vcpu_put(vcpu);
536
537 out:
538 nvmm_machine_put(mach);
539 return error;
540 }
541
542 static int
543 nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
544 {
545 struct nvmm_machine *mach;
546 struct nvmm_cpu *vcpu;
547 int error;
548
549 error = nvmm_machine_get(owner, args->machid, &mach, false);
550 if (error)
551 return error;
552
553 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
554 if (error)
555 goto out;
556
557 error = (*nvmm_impl->vcpu_inject)(vcpu);
558 nvmm_vcpu_put(vcpu);
559
560 out:
561 nvmm_machine_put(mach);
562 return error;
563 }
564
565 static int
566 nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
567 struct nvmm_vcpu_exit *exit)
568 {
569 struct vmspace *vm = mach->vm;
570 int ret;
571
572 while (1) {
573 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
574 if (__predict_false(ret != 0)) {
575 return ret;
576 }
577
578 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
579 break;
580 }
581 if (exit->u.mem.gpa >= mach->gpa_end) {
582 break;
583 }
584 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
585 break;
586 }
587 }
588
589 return 0;
590 }
591
592 static int
593 nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
594 {
595 struct nvmm_machine *mach;
596 struct nvmm_cpu *vcpu;
597 int error;
598
599 error = nvmm_machine_get(owner, args->machid, &mach, false);
600 if (error)
601 return error;
602
603 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
604 if (error)
605 goto out;
606
607 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
608 nvmm_vcpu_put(vcpu);
609
610 out:
611 nvmm_machine_put(mach);
612 return error;
613 }
614
615 /* -------------------------------------------------------------------------- */
616
617 static struct uvm_object *
618 nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
619 size_t *off)
620 {
621 struct nvmm_hmapping *hmapping;
622 size_t i;
623
624 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
625 hmapping = &mach->hmap[i];
626 if (!hmapping->present) {
627 continue;
628 }
629 if (hva >= hmapping->hva &&
630 hva + size <= hmapping->hva + hmapping->size) {
631 *off = hva - hmapping->hva;
632 return hmapping->uobj;
633 }
634 }
635
636 return NULL;
637 }
638
639 static int
640 nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
641 {
642 struct nvmm_hmapping *hmapping;
643 size_t i;
644
645 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
646 return EINVAL;
647 }
648 if (hva == 0) {
649 return EINVAL;
650 }
651
652 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
653 hmapping = &mach->hmap[i];
654 if (!hmapping->present) {
655 continue;
656 }
657
658 if (hva >= hmapping->hva &&
659 hva + size <= hmapping->hva + hmapping->size) {
660 break;
661 }
662
663 if (hva >= hmapping->hva &&
664 hva < hmapping->hva + hmapping->size) {
665 return EEXIST;
666 }
667 if (hva + size > hmapping->hva &&
668 hva + size <= hmapping->hva + hmapping->size) {
669 return EEXIST;
670 }
671 if (hva <= hmapping->hva &&
672 hva + size >= hmapping->hva + hmapping->size) {
673 return EEXIST;
674 }
675 }
676
677 return 0;
678 }
679
680 static struct nvmm_hmapping *
681 nvmm_hmapping_alloc(struct nvmm_machine *mach)
682 {
683 struct nvmm_hmapping *hmapping;
684 size_t i;
685
686 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
687 hmapping = &mach->hmap[i];
688 if (!hmapping->present) {
689 hmapping->present = true;
690 return hmapping;
691 }
692 }
693
694 return NULL;
695 }
696
697 static int
698 nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
699 {
700 struct vmspace *vmspace = curproc->p_vmspace;
701 struct nvmm_hmapping *hmapping;
702 size_t i;
703
704 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
705 hmapping = &mach->hmap[i];
706 if (!hmapping->present || hmapping->hva != hva ||
707 hmapping->size != size) {
708 continue;
709 }
710
711 uvm_unmap(&vmspace->vm_map, hmapping->hva,
712 hmapping->hva + hmapping->size);
713 uao_detach(hmapping->uobj);
714
715 hmapping->uobj = NULL;
716 hmapping->present = false;
717
718 return 0;
719 }
720
721 return ENOENT;
722 }
723
724 static int
725 nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
726 {
727 struct vmspace *vmspace = curproc->p_vmspace;
728 struct nvmm_machine *mach;
729 struct nvmm_hmapping *hmapping;
730 vaddr_t uva;
731 int error;
732
733 error = nvmm_machine_get(owner, args->machid, &mach, true);
734 if (error)
735 return error;
736
737 error = nvmm_hmapping_validate(mach, args->hva, args->size);
738 if (error)
739 goto out;
740
741 hmapping = nvmm_hmapping_alloc(mach);
742 if (hmapping == NULL) {
743 error = ENOBUFS;
744 goto out;
745 }
746
747 hmapping->hva = args->hva;
748 hmapping->size = args->size;
749 hmapping->uobj = uao_create(hmapping->size, 0);
750 uva = hmapping->hva;
751
752 /* Take a reference for the user. */
753 uao_reference(hmapping->uobj);
754
755 /* Map the uobj into the user address space, as pageable. */
756 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
757 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
758 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
759 if (error) {
760 uao_detach(hmapping->uobj);
761 }
762
763 out:
764 nvmm_machine_put(mach);
765 return error;
766 }
767
768 static int
769 nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
770 {
771 struct nvmm_machine *mach;
772 int error;
773
774 error = nvmm_machine_get(owner, args->machid, &mach, true);
775 if (error)
776 return error;
777
778 error = nvmm_hmapping_free(mach, args->hva, args->size);
779
780 nvmm_machine_put(mach);
781 return error;
782 }
783
784 /* -------------------------------------------------------------------------- */
785
786 static int
787 nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
788 {
789 struct nvmm_machine *mach;
790 struct uvm_object *uobj;
791 gpaddr_t gpa;
792 size_t off;
793 int error;
794
795 error = nvmm_machine_get(owner, args->machid, &mach, false);
796 if (error)
797 return error;
798
799 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
800 error = EINVAL;
801 goto out;
802 }
803
804 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
805 (args->hva % PAGE_SIZE) != 0) {
806 error = EINVAL;
807 goto out;
808 }
809 if (args->hva == 0) {
810 error = EINVAL;
811 goto out;
812 }
813 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
814 error = EINVAL;
815 goto out;
816 }
817 if (args->gpa + args->size <= args->gpa) {
818 error = EINVAL;
819 goto out;
820 }
821 if (args->gpa + args->size > mach->gpa_end) {
822 error = EINVAL;
823 goto out;
824 }
825 gpa = args->gpa;
826
827 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
828 if (uobj == NULL) {
829 error = EINVAL;
830 goto out;
831 }
832
833 /* Take a reference for the machine. */
834 uao_reference(uobj);
835
836 /* Map the uobj into the machine address space, as pageable. */
837 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
838 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
839 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
840 if (error) {
841 uao_detach(uobj);
842 goto out;
843 }
844 if (gpa != args->gpa) {
845 uao_detach(uobj);
846 printf("[!] uvm_map problem\n");
847 error = EINVAL;
848 goto out;
849 }
850
851 out:
852 nvmm_machine_put(mach);
853 return error;
854 }
855
856 static int
857 nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
858 {
859 struct nvmm_machine *mach;
860 gpaddr_t gpa;
861 int error;
862
863 error = nvmm_machine_get(owner, args->machid, &mach, false);
864 if (error)
865 return error;
866
867 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
868 error = EINVAL;
869 goto out;
870 }
871 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
872 error = EINVAL;
873 goto out;
874 }
875 if (args->gpa + args->size <= args->gpa) {
876 error = EINVAL;
877 goto out;
878 }
879 if (args->gpa + args->size >= mach->gpa_end) {
880 error = EINVAL;
881 goto out;
882 }
883 gpa = args->gpa;
884
885 /* Unmap the memory from the machine. */
886 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
887
888 out:
889 nvmm_machine_put(mach);
890 return error;
891 }
892
893 /* -------------------------------------------------------------------------- */
894
895 static int
896 nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
897 {
898 struct nvmm_ctl_mach_info ctl;
899 struct nvmm_machine *mach;
900 struct nvmm_cpu *vcpu;
901 int error;
902 size_t i;
903
904 if (args->size != sizeof(ctl))
905 return EINVAL;
906 error = copyin(args->data, &ctl, sizeof(ctl));
907 if (error)
908 return error;
909
910 error = nvmm_machine_get(owner, ctl.machid, &mach, true);
911 if (error)
912 return error;
913
914 ctl.nvcpus = 0;
915 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
916 error = nvmm_vcpu_get(mach, i, &vcpu);
917 if (error)
918 continue;
919 ctl.nvcpus++;
920 nvmm_vcpu_put(vcpu);
921 }
922
923 ctl.nram = 0;
924 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
925 if (!mach->hmap[i].present)
926 continue;
927 ctl.nram += mach->hmap[i].size;
928 }
929
930 ctl.pid = mach->owner->pid;
931 ctl.time = mach->time;
932
933 nvmm_machine_put(mach);
934
935 error = copyout(&ctl, args->data, sizeof(ctl));
936 if (error)
937 return error;
938
939 return 0;
940 }
941
942 static int
943 nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
944 {
945 switch (args->op) {
946 case NVMM_CTL_MACH_INFO:
947 return nvmm_ctl_mach_info(owner, args);
948 default:
949 return EINVAL;
950 }
951 }
952
953 /* -------------------------------------------------------------------------- */
954
955 static int
956 nvmm_init(void)
957 {
958 size_t i, n;
959
960 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
961 if (!(*nvmm_impl_list[i]->ident)()) {
962 continue;
963 }
964 nvmm_impl = nvmm_impl_list[i];
965 break;
966 }
967 if (nvmm_impl == NULL) {
968 printf("NVMM: CPU not supported\n");
969 return ENOTSUP;
970 }
971
972 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
973 machines[i].machid = i;
974 rw_init(&machines[i].lock);
975 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
976 machines[i].cpus[n].present = false;
977 machines[i].cpus[n].cpuid = n;
978 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
979 IPL_NONE);
980 }
981 }
982
983 (*nvmm_impl->init)();
984
985 return 0;
986 }
987
988 static void
989 nvmm_fini(void)
990 {
991 size_t i, n;
992
993 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
994 rw_destroy(&machines[i].lock);
995 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
996 mutex_destroy(&machines[i].cpus[n].lock);
997 }
998 }
999
1000 (*nvmm_impl->fini)();
1001 nvmm_impl = NULL;
1002 }
1003
1004 /* -------------------------------------------------------------------------- */
1005
1006 static dev_type_open(nvmm_open);
1007
1008 const struct cdevsw nvmm_cdevsw = {
1009 .d_open = nvmm_open,
1010 .d_close = noclose,
1011 .d_read = noread,
1012 .d_write = nowrite,
1013 .d_ioctl = noioctl,
1014 .d_stop = nostop,
1015 .d_tty = notty,
1016 .d_poll = nopoll,
1017 .d_mmap = nommap,
1018 .d_kqfilter = nokqfilter,
1019 .d_discard = nodiscard,
1020 .d_flag = D_OTHER | D_MPSAFE
1021 };
1022
1023 static int nvmm_ioctl(file_t *, u_long, void *);
1024 static int nvmm_close(file_t *);
1025 static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *,
1026 struct uvm_object **, int *);
1027
1028 const struct fileops nvmm_fileops = {
1029 .fo_read = fbadop_read,
1030 .fo_write = fbadop_write,
1031 .fo_ioctl = nvmm_ioctl,
1032 .fo_fcntl = fnullop_fcntl,
1033 .fo_poll = fnullop_poll,
1034 .fo_stat = fbadop_stat,
1035 .fo_close = nvmm_close,
1036 .fo_kqfilter = fnullop_kqfilter,
1037 .fo_restart = fnullop_restart,
1038 .fo_mmap = nvmm_mmap,
1039 };
1040
1041 static int
1042 nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
1043 {
1044 struct nvmm_owner *owner;
1045 struct file *fp;
1046 int error, fd;
1047
1048 if (__predict_false(nvmm_impl == NULL))
1049 return ENXIO;
1050 if (minor(dev) != 0)
1051 return EXDEV;
1052 if (!(flags & O_CLOEXEC))
1053 return EINVAL;
1054 error = fd_allocfile(&fp, &fd);
1055 if (error)
1056 return error;
1057
1058 if (OFLAGS(flags) & O_WRONLY) {
1059 owner = &root_owner;
1060 } else {
1061 owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1062 owner->pid = l->l_proc->p_pid;
1063 }
1064
1065 return fd_clone(fp, fd, flags, &nvmm_fileops, owner);
1066 }
1067
1068 static int
1069 nvmm_close(file_t *fp)
1070 {
1071 struct nvmm_owner *owner = fp->f_data;
1072
1073 KASSERT(owner != NULL);
1074 nvmm_kill_machines(owner);
1075 if (owner != &root_owner) {
1076 kmem_free(owner, sizeof(*owner));
1077 }
1078 fp->f_data = NULL;
1079
1080 return 0;
1081 }
1082
1083 static int
1084 nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
1085 int *advicep, struct uvm_object **uobjp, int *maxprotp)
1086 {
1087 struct nvmm_owner *owner = fp->f_data;
1088 struct nvmm_machine *mach;
1089 nvmm_machid_t machid;
1090 nvmm_cpuid_t cpuid;
1091 int error;
1092
1093 if (prot & PROT_EXEC)
1094 return EACCES;
1095 if (size != PAGE_SIZE)
1096 return EINVAL;
1097
1098 cpuid = NVMM_COMM_CPUID(*offp);
1099 if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1100 return EINVAL;
1101
1102 machid = NVMM_COMM_MACHID(*offp);
1103 error = nvmm_machine_get(owner, machid, &mach, false);
1104 if (error)
1105 return error;
1106
1107 uao_reference(mach->commuobj);
1108 *uobjp = mach->commuobj;
1109 *offp = cpuid * PAGE_SIZE;
1110 *maxprotp = prot;
1111 *advicep = UVM_ADV_RANDOM;
1112
1113 nvmm_machine_put(mach);
1114 return 0;
1115 }
1116
1117 static int
1118 nvmm_ioctl(file_t *fp, u_long cmd, void *data)
1119 {
1120 struct nvmm_owner *owner = fp->f_data;
1121
1122 KASSERT(owner != NULL);
1123
1124 switch (cmd) {
1125 case NVMM_IOC_CAPABILITY:
1126 return nvmm_capability(owner, data);
1127 case NVMM_IOC_MACHINE_CREATE:
1128 return nvmm_machine_create(owner, data);
1129 case NVMM_IOC_MACHINE_DESTROY:
1130 return nvmm_machine_destroy(owner, data);
1131 case NVMM_IOC_MACHINE_CONFIGURE:
1132 return nvmm_machine_configure(owner, data);
1133 case NVMM_IOC_VCPU_CREATE:
1134 return nvmm_vcpu_create(owner, data);
1135 case NVMM_IOC_VCPU_DESTROY:
1136 return nvmm_vcpu_destroy(owner, data);
1137 case NVMM_IOC_VCPU_CONFIGURE:
1138 return nvmm_vcpu_configure(owner, data);
1139 case NVMM_IOC_VCPU_SETSTATE:
1140 return nvmm_vcpu_setstate(owner, data);
1141 case NVMM_IOC_VCPU_GETSTATE:
1142 return nvmm_vcpu_getstate(owner, data);
1143 case NVMM_IOC_VCPU_INJECT:
1144 return nvmm_vcpu_inject(owner, data);
1145 case NVMM_IOC_VCPU_RUN:
1146 return nvmm_vcpu_run(owner, data);
1147 case NVMM_IOC_GPA_MAP:
1148 return nvmm_gpa_map(owner, data);
1149 case NVMM_IOC_GPA_UNMAP:
1150 return nvmm_gpa_unmap(owner, data);
1151 case NVMM_IOC_HVA_MAP:
1152 return nvmm_hva_map(owner, data);
1153 case NVMM_IOC_HVA_UNMAP:
1154 return nvmm_hva_unmap(owner, data);
1155 case NVMM_IOC_CTL:
1156 return nvmm_ctl(owner, data);
1157 default:
1158 return EINVAL;
1159 }
1160 }
1161
1162 /* -------------------------------------------------------------------------- */
1163
1164 void
1165 nvmmattach(int nunits)
1166 {
1167 /* nothing */
1168 }
1169
1170 MODULE(MODULE_CLASS_MISC, nvmm, NULL);
1171
1172 static int
1173 nvmm_modcmd(modcmd_t cmd, void *arg)
1174 {
1175 int error;
1176
1177 switch (cmd) {
1178 case MODULE_CMD_INIT:
1179 error = nvmm_init();
1180 if (error)
1181 return error;
1182
1183 #if defined(_MODULE)
1184 {
1185 devmajor_t bmajor = NODEVMAJOR;
1186 devmajor_t cmajor = 345;
1187
1188 /* mknod /dev/nvmm c 345 0 */
1189 error = devsw_attach("nvmm", NULL, &bmajor,
1190 &nvmm_cdevsw, &cmajor);
1191 if (error) {
1192 nvmm_fini();
1193 return error;
1194 }
1195 }
1196 #endif
1197 return 0;
1198
1199 case MODULE_CMD_FINI:
1200 if (nmachines > 0) {
1201 return EBUSY;
1202 }
1203 #if defined(_MODULE)
1204 {
1205 error = devsw_detach(NULL, &nvmm_cdevsw);
1206 if (error) {
1207 return error;
1208 }
1209 }
1210 #endif
1211 nvmm_fini();
1212 return 0;
1213
1214 case MODULE_CMD_AUTOUNLOAD:
1215 return EBUSY;
1216
1217 default:
1218 return ENOTTY;
1219 }
1220 }
1221