nvmm.c revision 1.27 1 1.27 maxv /* $NetBSD: nvmm.c,v 1.27 2020/04/30 16:50:17 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.14 maxv * Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 maxv * by Maxime Villard.
9 1.1 maxv *
10 1.1 maxv * Redistribution and use in source and binary forms, with or without
11 1.1 maxv * modification, are permitted provided that the following conditions
12 1.1 maxv * are met:
13 1.1 maxv * 1. Redistributions of source code must retain the above copyright
14 1.1 maxv * notice, this list of conditions and the following disclaimer.
15 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 maxv * notice, this list of conditions and the following disclaimer in the
17 1.1 maxv * documentation and/or other materials provided with the distribution.
18 1.1 maxv *
19 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
30 1.1 maxv */
31 1.1 maxv
32 1.1 maxv #include <sys/cdefs.h>
33 1.27 maxv __KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.27 2020/04/30 16:50:17 maxv Exp $");
34 1.1 maxv
35 1.1 maxv #include <sys/param.h>
36 1.1 maxv #include <sys/systm.h>
37 1.1 maxv #include <sys/kernel.h>
38 1.1 maxv
39 1.1 maxv #include <sys/cpu.h>
40 1.1 maxv #include <sys/conf.h>
41 1.1 maxv #include <sys/kmem.h>
42 1.1 maxv #include <sys/module.h>
43 1.1 maxv #include <sys/proc.h>
44 1.11 maxv #include <sys/mman.h>
45 1.14 maxv #include <sys/file.h>
46 1.14 maxv #include <sys/filedesc.h>
47 1.17 maxv #include <sys/kauth.h>
48 1.1 maxv
49 1.1 maxv #include <uvm/uvm.h>
50 1.1 maxv #include <uvm/uvm_page.h>
51 1.1 maxv
52 1.1 maxv #include "ioconf.h"
53 1.1 maxv
54 1.1 maxv #include <dev/nvmm/nvmm.h>
55 1.1 maxv #include <dev/nvmm/nvmm_internal.h>
56 1.1 maxv #include <dev/nvmm/nvmm_ioctl.h>
57 1.1 maxv
58 1.1 maxv static struct nvmm_machine machines[NVMM_MAX_MACHINES];
59 1.13 maxv static volatile unsigned int nmachines __cacheline_aligned;
60 1.1 maxv
61 1.1 maxv static const struct nvmm_impl *nvmm_impl_list[] = {
62 1.7 maxv &nvmm_x86_svm, /* x86 AMD SVM */
63 1.7 maxv &nvmm_x86_vmx /* x86 Intel VMX */
64 1.1 maxv };
65 1.1 maxv
66 1.1 maxv static const struct nvmm_impl *nvmm_impl = NULL;
67 1.1 maxv
68 1.17 maxv static struct nvmm_owner root_owner;
69 1.17 maxv
70 1.1 maxv /* -------------------------------------------------------------------------- */
71 1.1 maxv
72 1.1 maxv static int
73 1.1 maxv nvmm_machine_alloc(struct nvmm_machine **ret)
74 1.1 maxv {
75 1.1 maxv struct nvmm_machine *mach;
76 1.1 maxv size_t i;
77 1.1 maxv
78 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
79 1.1 maxv mach = &machines[i];
80 1.1 maxv
81 1.1 maxv rw_enter(&mach->lock, RW_WRITER);
82 1.1 maxv if (mach->present) {
83 1.1 maxv rw_exit(&mach->lock);
84 1.1 maxv continue;
85 1.1 maxv }
86 1.1 maxv
87 1.1 maxv mach->present = true;
88 1.17 maxv mach->time = time_second;
89 1.1 maxv *ret = mach;
90 1.13 maxv atomic_inc_uint(&nmachines);
91 1.1 maxv return 0;
92 1.1 maxv }
93 1.1 maxv
94 1.1 maxv return ENOBUFS;
95 1.1 maxv }
96 1.1 maxv
97 1.1 maxv static void
98 1.1 maxv nvmm_machine_free(struct nvmm_machine *mach)
99 1.1 maxv {
100 1.1 maxv KASSERT(rw_write_held(&mach->lock));
101 1.1 maxv KASSERT(mach->present);
102 1.1 maxv mach->present = false;
103 1.13 maxv atomic_dec_uint(&nmachines);
104 1.1 maxv }
105 1.1 maxv
106 1.1 maxv static int
107 1.14 maxv nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
108 1.14 maxv struct nvmm_machine **ret, bool writer)
109 1.1 maxv {
110 1.1 maxv struct nvmm_machine *mach;
111 1.1 maxv krw_t op = writer ? RW_WRITER : RW_READER;
112 1.1 maxv
113 1.1 maxv if (machid >= NVMM_MAX_MACHINES) {
114 1.1 maxv return EINVAL;
115 1.1 maxv }
116 1.1 maxv mach = &machines[machid];
117 1.1 maxv
118 1.1 maxv rw_enter(&mach->lock, op);
119 1.1 maxv if (!mach->present) {
120 1.1 maxv rw_exit(&mach->lock);
121 1.1 maxv return ENOENT;
122 1.1 maxv }
123 1.17 maxv if (owner != &root_owner && mach->owner != owner) {
124 1.1 maxv rw_exit(&mach->lock);
125 1.1 maxv return EPERM;
126 1.1 maxv }
127 1.1 maxv *ret = mach;
128 1.1 maxv
129 1.1 maxv return 0;
130 1.1 maxv }
131 1.1 maxv
132 1.1 maxv static void
133 1.1 maxv nvmm_machine_put(struct nvmm_machine *mach)
134 1.1 maxv {
135 1.1 maxv rw_exit(&mach->lock);
136 1.1 maxv }
137 1.1 maxv
138 1.1 maxv /* -------------------------------------------------------------------------- */
139 1.1 maxv
140 1.1 maxv static int
141 1.18 maxv nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
142 1.18 maxv struct nvmm_cpu **ret)
143 1.1 maxv {
144 1.1 maxv struct nvmm_cpu *vcpu;
145 1.1 maxv
146 1.18 maxv if (cpuid >= NVMM_MAX_VCPUS) {
147 1.18 maxv return EINVAL;
148 1.18 maxv }
149 1.18 maxv vcpu = &mach->cpus[cpuid];
150 1.1 maxv
151 1.18 maxv mutex_enter(&vcpu->lock);
152 1.18 maxv if (vcpu->present) {
153 1.18 maxv mutex_exit(&vcpu->lock);
154 1.18 maxv return EBUSY;
155 1.1 maxv }
156 1.1 maxv
157 1.18 maxv vcpu->present = true;
158 1.19 maxv vcpu->comm = NULL;
159 1.18 maxv vcpu->hcpu_last = -1;
160 1.18 maxv *ret = vcpu;
161 1.18 maxv return 0;
162 1.1 maxv }
163 1.1 maxv
164 1.1 maxv static void
165 1.1 maxv nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
166 1.1 maxv {
167 1.1 maxv KASSERT(mutex_owned(&vcpu->lock));
168 1.1 maxv vcpu->present = false;
169 1.19 maxv if (vcpu->comm != NULL) {
170 1.19 maxv uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
171 1.19 maxv }
172 1.1 maxv }
173 1.1 maxv
174 1.22 maxv static int
175 1.1 maxv nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
176 1.1 maxv struct nvmm_cpu **ret)
177 1.1 maxv {
178 1.1 maxv struct nvmm_cpu *vcpu;
179 1.1 maxv
180 1.1 maxv if (cpuid >= NVMM_MAX_VCPUS) {
181 1.1 maxv return EINVAL;
182 1.1 maxv }
183 1.1 maxv vcpu = &mach->cpus[cpuid];
184 1.1 maxv
185 1.1 maxv mutex_enter(&vcpu->lock);
186 1.1 maxv if (!vcpu->present) {
187 1.1 maxv mutex_exit(&vcpu->lock);
188 1.1 maxv return ENOENT;
189 1.1 maxv }
190 1.1 maxv *ret = vcpu;
191 1.1 maxv
192 1.1 maxv return 0;
193 1.1 maxv }
194 1.1 maxv
195 1.22 maxv static void
196 1.1 maxv nvmm_vcpu_put(struct nvmm_cpu *vcpu)
197 1.1 maxv {
198 1.1 maxv mutex_exit(&vcpu->lock);
199 1.1 maxv }
200 1.1 maxv
201 1.1 maxv /* -------------------------------------------------------------------------- */
202 1.1 maxv
203 1.1 maxv static void
204 1.14 maxv nvmm_kill_machines(struct nvmm_owner *owner)
205 1.1 maxv {
206 1.1 maxv struct nvmm_machine *mach;
207 1.1 maxv struct nvmm_cpu *vcpu;
208 1.1 maxv size_t i, j;
209 1.1 maxv int error;
210 1.1 maxv
211 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
212 1.1 maxv mach = &machines[i];
213 1.1 maxv
214 1.1 maxv rw_enter(&mach->lock, RW_WRITER);
215 1.14 maxv if (!mach->present || mach->owner != owner) {
216 1.1 maxv rw_exit(&mach->lock);
217 1.1 maxv continue;
218 1.1 maxv }
219 1.1 maxv
220 1.1 maxv /* Kill it. */
221 1.1 maxv for (j = 0; j < NVMM_MAX_VCPUS; j++) {
222 1.1 maxv error = nvmm_vcpu_get(mach, j, &vcpu);
223 1.1 maxv if (error)
224 1.1 maxv continue;
225 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
226 1.1 maxv nvmm_vcpu_free(mach, vcpu);
227 1.1 maxv nvmm_vcpu_put(vcpu);
228 1.1 maxv }
229 1.15 maxv (*nvmm_impl->machine_destroy)(mach);
230 1.1 maxv uvmspace_free(mach->vm);
231 1.4 maxv
232 1.4 maxv /* Drop the kernel UOBJ refs. */
233 1.9 maxv for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
234 1.9 maxv if (!mach->hmap[j].present)
235 1.4 maxv continue;
236 1.9 maxv uao_detach(mach->hmap[j].uobj);
237 1.4 maxv }
238 1.4 maxv
239 1.1 maxv nvmm_machine_free(mach);
240 1.1 maxv
241 1.1 maxv rw_exit(&mach->lock);
242 1.1 maxv }
243 1.1 maxv }
244 1.1 maxv
245 1.1 maxv /* -------------------------------------------------------------------------- */
246 1.1 maxv
247 1.1 maxv static int
248 1.14 maxv nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
249 1.1 maxv {
250 1.23 maxv args->cap.version = NVMM_KERN_VERSION;
251 1.1 maxv args->cap.state_size = nvmm_impl->state_size;
252 1.1 maxv args->cap.max_machines = NVMM_MAX_MACHINES;
253 1.1 maxv args->cap.max_vcpus = NVMM_MAX_VCPUS;
254 1.1 maxv args->cap.max_ram = NVMM_MAX_RAM;
255 1.1 maxv
256 1.1 maxv (*nvmm_impl->capability)(&args->cap);
257 1.1 maxv
258 1.1 maxv return 0;
259 1.1 maxv }
260 1.1 maxv
261 1.1 maxv static int
262 1.14 maxv nvmm_machine_create(struct nvmm_owner *owner,
263 1.14 maxv struct nvmm_ioc_machine_create *args)
264 1.1 maxv {
265 1.1 maxv struct nvmm_machine *mach;
266 1.1 maxv int error;
267 1.1 maxv
268 1.1 maxv error = nvmm_machine_alloc(&mach);
269 1.1 maxv if (error)
270 1.1 maxv return error;
271 1.1 maxv
272 1.1 maxv /* Curproc owns the machine. */
273 1.14 maxv mach->owner = owner;
274 1.1 maxv
275 1.9 maxv /* Zero out the host mappings. */
276 1.9 maxv memset(&mach->hmap, 0, sizeof(mach->hmap));
277 1.4 maxv
278 1.1 maxv /* Create the machine vmspace. */
279 1.1 maxv mach->gpa_begin = 0;
280 1.1 maxv mach->gpa_end = NVMM_MAX_RAM;
281 1.1 maxv mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
282 1.1 maxv
283 1.19 maxv /* Create the comm uobj. */
284 1.19 maxv mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
285 1.19 maxv
286 1.1 maxv (*nvmm_impl->machine_create)(mach);
287 1.1 maxv
288 1.1 maxv args->machid = mach->machid;
289 1.1 maxv nvmm_machine_put(mach);
290 1.1 maxv
291 1.1 maxv return 0;
292 1.1 maxv }
293 1.1 maxv
294 1.1 maxv static int
295 1.14 maxv nvmm_machine_destroy(struct nvmm_owner *owner,
296 1.14 maxv struct nvmm_ioc_machine_destroy *args)
297 1.1 maxv {
298 1.1 maxv struct nvmm_machine *mach;
299 1.1 maxv struct nvmm_cpu *vcpu;
300 1.1 maxv int error;
301 1.1 maxv size_t i;
302 1.1 maxv
303 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
304 1.1 maxv if (error)
305 1.1 maxv return error;
306 1.1 maxv
307 1.1 maxv for (i = 0; i < NVMM_MAX_VCPUS; i++) {
308 1.1 maxv error = nvmm_vcpu_get(mach, i, &vcpu);
309 1.1 maxv if (error)
310 1.1 maxv continue;
311 1.1 maxv
312 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
313 1.1 maxv nvmm_vcpu_free(mach, vcpu);
314 1.1 maxv nvmm_vcpu_put(vcpu);
315 1.1 maxv }
316 1.1 maxv
317 1.1 maxv (*nvmm_impl->machine_destroy)(mach);
318 1.1 maxv
319 1.1 maxv /* Free the machine vmspace. */
320 1.1 maxv uvmspace_free(mach->vm);
321 1.4 maxv
322 1.4 maxv /* Drop the kernel UOBJ refs. */
323 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
324 1.9 maxv if (!mach->hmap[i].present)
325 1.4 maxv continue;
326 1.9 maxv uao_detach(mach->hmap[i].uobj);
327 1.4 maxv }
328 1.1 maxv
329 1.1 maxv nvmm_machine_free(mach);
330 1.1 maxv nvmm_machine_put(mach);
331 1.1 maxv
332 1.1 maxv return 0;
333 1.1 maxv }
334 1.1 maxv
335 1.1 maxv static int
336 1.14 maxv nvmm_machine_configure(struct nvmm_owner *owner,
337 1.14 maxv struct nvmm_ioc_machine_configure *args)
338 1.1 maxv {
339 1.1 maxv struct nvmm_machine *mach;
340 1.1 maxv size_t allocsz;
341 1.21 maxv uint64_t op;
342 1.1 maxv void *data;
343 1.1 maxv int error;
344 1.1 maxv
345 1.21 maxv op = NVMM_MACH_CONF_MD(args->op);
346 1.23 maxv if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
347 1.1 maxv return EINVAL;
348 1.1 maxv }
349 1.1 maxv
350 1.23 maxv allocsz = nvmm_impl->mach_conf_sizes[op];
351 1.1 maxv data = kmem_alloc(allocsz, KM_SLEEP);
352 1.1 maxv
353 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
354 1.1 maxv if (error) {
355 1.1 maxv kmem_free(data, allocsz);
356 1.1 maxv return error;
357 1.1 maxv }
358 1.1 maxv
359 1.1 maxv error = copyin(args->conf, data, allocsz);
360 1.1 maxv if (error) {
361 1.1 maxv goto out;
362 1.1 maxv }
363 1.1 maxv
364 1.21 maxv error = (*nvmm_impl->machine_configure)(mach, op, data);
365 1.1 maxv
366 1.1 maxv out:
367 1.1 maxv nvmm_machine_put(mach);
368 1.1 maxv kmem_free(data, allocsz);
369 1.1 maxv return error;
370 1.1 maxv }
371 1.1 maxv
372 1.1 maxv static int
373 1.14 maxv nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
374 1.1 maxv {
375 1.1 maxv struct nvmm_machine *mach;
376 1.1 maxv struct nvmm_cpu *vcpu;
377 1.1 maxv int error;
378 1.1 maxv
379 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
380 1.1 maxv if (error)
381 1.1 maxv return error;
382 1.1 maxv
383 1.18 maxv error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
384 1.1 maxv if (error)
385 1.1 maxv goto out;
386 1.1 maxv
387 1.19 maxv /* Allocate the comm page. */
388 1.19 maxv uao_reference(mach->commuobj);
389 1.19 maxv error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
390 1.19 maxv mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
391 1.19 maxv UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
392 1.19 maxv if (error) {
393 1.19 maxv uao_detach(mach->commuobj);
394 1.19 maxv nvmm_vcpu_free(mach, vcpu);
395 1.19 maxv nvmm_vcpu_put(vcpu);
396 1.19 maxv goto out;
397 1.19 maxv }
398 1.19 maxv error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
399 1.19 maxv (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
400 1.19 maxv if (error) {
401 1.19 maxv nvmm_vcpu_free(mach, vcpu);
402 1.19 maxv nvmm_vcpu_put(vcpu);
403 1.19 maxv goto out;
404 1.19 maxv }
405 1.19 maxv memset(vcpu->comm, 0, PAGE_SIZE);
406 1.19 maxv
407 1.1 maxv error = (*nvmm_impl->vcpu_create)(mach, vcpu);
408 1.1 maxv if (error) {
409 1.1 maxv nvmm_vcpu_free(mach, vcpu);
410 1.1 maxv nvmm_vcpu_put(vcpu);
411 1.1 maxv goto out;
412 1.1 maxv }
413 1.1 maxv
414 1.1 maxv nvmm_vcpu_put(vcpu);
415 1.1 maxv
416 1.1 maxv out:
417 1.1 maxv nvmm_machine_put(mach);
418 1.1 maxv return error;
419 1.1 maxv }
420 1.1 maxv
421 1.1 maxv static int
422 1.14 maxv nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
423 1.1 maxv {
424 1.1 maxv struct nvmm_machine *mach;
425 1.1 maxv struct nvmm_cpu *vcpu;
426 1.1 maxv int error;
427 1.1 maxv
428 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
429 1.1 maxv if (error)
430 1.1 maxv return error;
431 1.1 maxv
432 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
433 1.1 maxv if (error)
434 1.1 maxv goto out;
435 1.1 maxv
436 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
437 1.1 maxv nvmm_vcpu_free(mach, vcpu);
438 1.1 maxv nvmm_vcpu_put(vcpu);
439 1.1 maxv
440 1.1 maxv out:
441 1.1 maxv nvmm_machine_put(mach);
442 1.1 maxv return error;
443 1.1 maxv }
444 1.1 maxv
445 1.1 maxv static int
446 1.23 maxv nvmm_vcpu_configure(struct nvmm_owner *owner,
447 1.23 maxv struct nvmm_ioc_vcpu_configure *args)
448 1.23 maxv {
449 1.23 maxv struct nvmm_machine *mach;
450 1.23 maxv struct nvmm_cpu *vcpu;
451 1.23 maxv size_t allocsz;
452 1.23 maxv uint64_t op;
453 1.23 maxv void *data;
454 1.23 maxv int error;
455 1.23 maxv
456 1.23 maxv op = NVMM_VCPU_CONF_MD(args->op);
457 1.23 maxv if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
458 1.23 maxv return EINVAL;
459 1.23 maxv
460 1.23 maxv allocsz = nvmm_impl->vcpu_conf_sizes[op];
461 1.23 maxv data = kmem_alloc(allocsz, KM_SLEEP);
462 1.23 maxv
463 1.23 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
464 1.23 maxv if (error) {
465 1.23 maxv kmem_free(data, allocsz);
466 1.23 maxv return error;
467 1.23 maxv }
468 1.23 maxv
469 1.23 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
470 1.23 maxv if (error) {
471 1.23 maxv nvmm_machine_put(mach);
472 1.23 maxv kmem_free(data, allocsz);
473 1.23 maxv return error;
474 1.23 maxv }
475 1.23 maxv
476 1.23 maxv error = copyin(args->conf, data, allocsz);
477 1.23 maxv if (error) {
478 1.23 maxv goto out;
479 1.23 maxv }
480 1.23 maxv
481 1.23 maxv error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
482 1.23 maxv
483 1.23 maxv out:
484 1.23 maxv nvmm_vcpu_put(vcpu);
485 1.23 maxv nvmm_machine_put(mach);
486 1.23 maxv kmem_free(data, allocsz);
487 1.23 maxv return error;
488 1.23 maxv }
489 1.23 maxv
490 1.23 maxv static int
491 1.14 maxv nvmm_vcpu_setstate(struct nvmm_owner *owner,
492 1.14 maxv struct nvmm_ioc_vcpu_setstate *args)
493 1.1 maxv {
494 1.1 maxv struct nvmm_machine *mach;
495 1.1 maxv struct nvmm_cpu *vcpu;
496 1.1 maxv int error;
497 1.1 maxv
498 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
499 1.6 maxv if (error)
500 1.1 maxv return error;
501 1.1 maxv
502 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
503 1.1 maxv if (error)
504 1.1 maxv goto out;
505 1.1 maxv
506 1.19 maxv (*nvmm_impl->vcpu_setstate)(vcpu);
507 1.1 maxv nvmm_vcpu_put(vcpu);
508 1.1 maxv
509 1.1 maxv out:
510 1.1 maxv nvmm_machine_put(mach);
511 1.1 maxv return error;
512 1.1 maxv }
513 1.1 maxv
514 1.1 maxv static int
515 1.14 maxv nvmm_vcpu_getstate(struct nvmm_owner *owner,
516 1.14 maxv struct nvmm_ioc_vcpu_getstate *args)
517 1.1 maxv {
518 1.1 maxv struct nvmm_machine *mach;
519 1.1 maxv struct nvmm_cpu *vcpu;
520 1.1 maxv int error;
521 1.1 maxv
522 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
523 1.6 maxv if (error)
524 1.1 maxv return error;
525 1.1 maxv
526 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
527 1.1 maxv if (error)
528 1.1 maxv goto out;
529 1.1 maxv
530 1.19 maxv (*nvmm_impl->vcpu_getstate)(vcpu);
531 1.1 maxv nvmm_vcpu_put(vcpu);
532 1.1 maxv
533 1.1 maxv out:
534 1.1 maxv nvmm_machine_put(mach);
535 1.1 maxv return error;
536 1.1 maxv }
537 1.1 maxv
538 1.1 maxv static int
539 1.14 maxv nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
540 1.1 maxv {
541 1.1 maxv struct nvmm_machine *mach;
542 1.1 maxv struct nvmm_cpu *vcpu;
543 1.1 maxv int error;
544 1.1 maxv
545 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
546 1.1 maxv if (error)
547 1.1 maxv return error;
548 1.1 maxv
549 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
550 1.1 maxv if (error)
551 1.1 maxv goto out;
552 1.1 maxv
553 1.20 maxv error = (*nvmm_impl->vcpu_inject)(vcpu);
554 1.1 maxv nvmm_vcpu_put(vcpu);
555 1.1 maxv
556 1.1 maxv out:
557 1.1 maxv nvmm_machine_put(mach);
558 1.1 maxv return error;
559 1.1 maxv }
560 1.1 maxv
561 1.22 maxv static int
562 1.8 maxv nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
563 1.23 maxv struct nvmm_vcpu_exit *exit)
564 1.8 maxv {
565 1.8 maxv struct vmspace *vm = mach->vm;
566 1.22 maxv int ret;
567 1.8 maxv
568 1.8 maxv while (1) {
569 1.22 maxv ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
570 1.22 maxv if (__predict_false(ret != 0)) {
571 1.22 maxv return ret;
572 1.22 maxv }
573 1.8 maxv
574 1.23 maxv if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
575 1.8 maxv break;
576 1.8 maxv }
577 1.10 maxv if (exit->u.mem.gpa >= mach->gpa_end) {
578 1.10 maxv break;
579 1.10 maxv }
580 1.11 maxv if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
581 1.8 maxv break;
582 1.8 maxv }
583 1.8 maxv }
584 1.22 maxv
585 1.22 maxv return 0;
586 1.8 maxv }
587 1.8 maxv
588 1.1 maxv static int
589 1.14 maxv nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
590 1.1 maxv {
591 1.1 maxv struct nvmm_machine *mach;
592 1.1 maxv struct nvmm_cpu *vcpu;
593 1.1 maxv int error;
594 1.1 maxv
595 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
596 1.1 maxv if (error)
597 1.1 maxv return error;
598 1.1 maxv
599 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
600 1.1 maxv if (error)
601 1.1 maxv goto out;
602 1.1 maxv
603 1.22 maxv error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
604 1.1 maxv nvmm_vcpu_put(vcpu);
605 1.1 maxv
606 1.1 maxv out:
607 1.1 maxv nvmm_machine_put(mach);
608 1.1 maxv return error;
609 1.1 maxv }
610 1.1 maxv
611 1.1 maxv /* -------------------------------------------------------------------------- */
612 1.1 maxv
613 1.4 maxv static struct uvm_object *
614 1.9 maxv nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
615 1.4 maxv size_t *off)
616 1.4 maxv {
617 1.9 maxv struct nvmm_hmapping *hmapping;
618 1.4 maxv size_t i;
619 1.4 maxv
620 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
621 1.9 maxv hmapping = &mach->hmap[i];
622 1.9 maxv if (!hmapping->present) {
623 1.4 maxv continue;
624 1.4 maxv }
625 1.9 maxv if (hva >= hmapping->hva &&
626 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
627 1.9 maxv *off = hva - hmapping->hva;
628 1.9 maxv return hmapping->uobj;
629 1.4 maxv }
630 1.4 maxv }
631 1.4 maxv
632 1.4 maxv return NULL;
633 1.4 maxv }
634 1.4 maxv
635 1.4 maxv static int
636 1.9 maxv nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
637 1.4 maxv {
638 1.9 maxv struct nvmm_hmapping *hmapping;
639 1.4 maxv size_t i;
640 1.4 maxv
641 1.4 maxv if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
642 1.4 maxv return EINVAL;
643 1.4 maxv }
644 1.4 maxv if (hva == 0) {
645 1.4 maxv return EINVAL;
646 1.4 maxv }
647 1.4 maxv
648 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
649 1.9 maxv hmapping = &mach->hmap[i];
650 1.9 maxv if (!hmapping->present) {
651 1.4 maxv continue;
652 1.4 maxv }
653 1.4 maxv
654 1.9 maxv if (hva >= hmapping->hva &&
655 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
656 1.4 maxv break;
657 1.4 maxv }
658 1.4 maxv
659 1.9 maxv if (hva >= hmapping->hva &&
660 1.9 maxv hva < hmapping->hva + hmapping->size) {
661 1.4 maxv return EEXIST;
662 1.4 maxv }
663 1.9 maxv if (hva + size > hmapping->hva &&
664 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
665 1.4 maxv return EEXIST;
666 1.4 maxv }
667 1.9 maxv if (hva <= hmapping->hva &&
668 1.9 maxv hva + size >= hmapping->hva + hmapping->size) {
669 1.4 maxv return EEXIST;
670 1.4 maxv }
671 1.4 maxv }
672 1.4 maxv
673 1.4 maxv return 0;
674 1.4 maxv }
675 1.4 maxv
676 1.9 maxv static struct nvmm_hmapping *
677 1.9 maxv nvmm_hmapping_alloc(struct nvmm_machine *mach)
678 1.4 maxv {
679 1.9 maxv struct nvmm_hmapping *hmapping;
680 1.4 maxv size_t i;
681 1.4 maxv
682 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
683 1.9 maxv hmapping = &mach->hmap[i];
684 1.9 maxv if (!hmapping->present) {
685 1.9 maxv hmapping->present = true;
686 1.9 maxv return hmapping;
687 1.4 maxv }
688 1.4 maxv }
689 1.4 maxv
690 1.4 maxv return NULL;
691 1.4 maxv }
692 1.4 maxv
693 1.9 maxv static int
694 1.9 maxv nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
695 1.4 maxv {
696 1.4 maxv struct vmspace *vmspace = curproc->p_vmspace;
697 1.9 maxv struct nvmm_hmapping *hmapping;
698 1.9 maxv size_t i;
699 1.4 maxv
700 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
701 1.9 maxv hmapping = &mach->hmap[i];
702 1.9 maxv if (!hmapping->present || hmapping->hva != hva ||
703 1.9 maxv hmapping->size != size) {
704 1.9 maxv continue;
705 1.9 maxv }
706 1.9 maxv
707 1.9 maxv uvm_unmap(&vmspace->vm_map, hmapping->hva,
708 1.9 maxv hmapping->hva + hmapping->size);
709 1.9 maxv uao_detach(hmapping->uobj);
710 1.4 maxv
711 1.9 maxv hmapping->uobj = NULL;
712 1.9 maxv hmapping->present = false;
713 1.9 maxv
714 1.9 maxv return 0;
715 1.9 maxv }
716 1.9 maxv
717 1.9 maxv return ENOENT;
718 1.4 maxv }
719 1.4 maxv
720 1.4 maxv static int
721 1.14 maxv nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
722 1.4 maxv {
723 1.4 maxv struct vmspace *vmspace = curproc->p_vmspace;
724 1.4 maxv struct nvmm_machine *mach;
725 1.9 maxv struct nvmm_hmapping *hmapping;
726 1.4 maxv vaddr_t uva;
727 1.4 maxv int error;
728 1.4 maxv
729 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
730 1.4 maxv if (error)
731 1.4 maxv return error;
732 1.4 maxv
733 1.9 maxv error = nvmm_hmapping_validate(mach, args->hva, args->size);
734 1.4 maxv if (error)
735 1.4 maxv goto out;
736 1.4 maxv
737 1.9 maxv hmapping = nvmm_hmapping_alloc(mach);
738 1.9 maxv if (hmapping == NULL) {
739 1.4 maxv error = ENOBUFS;
740 1.4 maxv goto out;
741 1.4 maxv }
742 1.4 maxv
743 1.9 maxv hmapping->hva = args->hva;
744 1.9 maxv hmapping->size = args->size;
745 1.9 maxv hmapping->uobj = uao_create(hmapping->size, 0);
746 1.9 maxv uva = hmapping->hva;
747 1.4 maxv
748 1.4 maxv /* Take a reference for the user. */
749 1.9 maxv uao_reference(hmapping->uobj);
750 1.4 maxv
751 1.4 maxv /* Map the uobj into the user address space, as pageable. */
752 1.9 maxv error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
753 1.9 maxv 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
754 1.4 maxv UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
755 1.4 maxv if (error) {
756 1.9 maxv uao_detach(hmapping->uobj);
757 1.4 maxv }
758 1.4 maxv
759 1.4 maxv out:
760 1.4 maxv nvmm_machine_put(mach);
761 1.4 maxv return error;
762 1.4 maxv }
763 1.4 maxv
764 1.4 maxv static int
765 1.14 maxv nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
766 1.4 maxv {
767 1.4 maxv struct nvmm_machine *mach;
768 1.4 maxv int error;
769 1.4 maxv
770 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
771 1.4 maxv if (error)
772 1.4 maxv return error;
773 1.4 maxv
774 1.9 maxv error = nvmm_hmapping_free(mach, args->hva, args->size);
775 1.4 maxv
776 1.4 maxv nvmm_machine_put(mach);
777 1.9 maxv return error;
778 1.4 maxv }
779 1.4 maxv
780 1.4 maxv /* -------------------------------------------------------------------------- */
781 1.4 maxv
782 1.1 maxv static int
783 1.14 maxv nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
784 1.1 maxv {
785 1.1 maxv struct nvmm_machine *mach;
786 1.4 maxv struct uvm_object *uobj;
787 1.1 maxv gpaddr_t gpa;
788 1.4 maxv size_t off;
789 1.1 maxv int error;
790 1.1 maxv
791 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
792 1.1 maxv if (error)
793 1.1 maxv return error;
794 1.1 maxv
795 1.11 maxv if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
796 1.11 maxv error = EINVAL;
797 1.11 maxv goto out;
798 1.11 maxv }
799 1.11 maxv
800 1.1 maxv if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
801 1.1 maxv (args->hva % PAGE_SIZE) != 0) {
802 1.1 maxv error = EINVAL;
803 1.1 maxv goto out;
804 1.1 maxv }
805 1.1 maxv if (args->hva == 0) {
806 1.1 maxv error = EINVAL;
807 1.1 maxv goto out;
808 1.1 maxv }
809 1.1 maxv if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
810 1.1 maxv error = EINVAL;
811 1.1 maxv goto out;
812 1.1 maxv }
813 1.1 maxv if (args->gpa + args->size <= args->gpa) {
814 1.1 maxv error = EINVAL;
815 1.1 maxv goto out;
816 1.1 maxv }
817 1.3 maxv if (args->gpa + args->size > mach->gpa_end) {
818 1.1 maxv error = EINVAL;
819 1.1 maxv goto out;
820 1.1 maxv }
821 1.1 maxv gpa = args->gpa;
822 1.1 maxv
823 1.9 maxv uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
824 1.4 maxv if (uobj == NULL) {
825 1.4 maxv error = EINVAL;
826 1.4 maxv goto out;
827 1.4 maxv }
828 1.4 maxv
829 1.4 maxv /* Take a reference for the machine. */
830 1.4 maxv uao_reference(uobj);
831 1.1 maxv
832 1.1 maxv /* Map the uobj into the machine address space, as pageable. */
833 1.4 maxv error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
834 1.11 maxv UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
835 1.4 maxv UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
836 1.1 maxv if (error) {
837 1.4 maxv uao_detach(uobj);
838 1.1 maxv goto out;
839 1.1 maxv }
840 1.1 maxv if (gpa != args->gpa) {
841 1.4 maxv uao_detach(uobj);
842 1.1 maxv printf("[!] uvm_map problem\n");
843 1.1 maxv error = EINVAL;
844 1.1 maxv goto out;
845 1.1 maxv }
846 1.1 maxv
847 1.1 maxv out:
848 1.1 maxv nvmm_machine_put(mach);
849 1.1 maxv return error;
850 1.1 maxv }
851 1.1 maxv
852 1.1 maxv static int
853 1.14 maxv nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
854 1.1 maxv {
855 1.1 maxv struct nvmm_machine *mach;
856 1.1 maxv gpaddr_t gpa;
857 1.1 maxv int error;
858 1.1 maxv
859 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
860 1.1 maxv if (error)
861 1.1 maxv return error;
862 1.1 maxv
863 1.1 maxv if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
864 1.1 maxv error = EINVAL;
865 1.1 maxv goto out;
866 1.1 maxv }
867 1.1 maxv if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
868 1.1 maxv error = EINVAL;
869 1.1 maxv goto out;
870 1.1 maxv }
871 1.1 maxv if (args->gpa + args->size <= args->gpa) {
872 1.1 maxv error = EINVAL;
873 1.1 maxv goto out;
874 1.1 maxv }
875 1.1 maxv if (args->gpa + args->size >= mach->gpa_end) {
876 1.1 maxv error = EINVAL;
877 1.1 maxv goto out;
878 1.1 maxv }
879 1.1 maxv gpa = args->gpa;
880 1.1 maxv
881 1.1 maxv /* Unmap the memory from the machine. */
882 1.1 maxv uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
883 1.1 maxv
884 1.1 maxv out:
885 1.1 maxv nvmm_machine_put(mach);
886 1.1 maxv return error;
887 1.1 maxv }
888 1.1 maxv
889 1.1 maxv /* -------------------------------------------------------------------------- */
890 1.1 maxv
891 1.1 maxv static int
892 1.24 maxv nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
893 1.17 maxv {
894 1.17 maxv struct nvmm_ctl_mach_info ctl;
895 1.17 maxv struct nvmm_machine *mach;
896 1.17 maxv struct nvmm_cpu *vcpu;
897 1.17 maxv int error;
898 1.17 maxv size_t i;
899 1.17 maxv
900 1.17 maxv if (args->size != sizeof(ctl))
901 1.17 maxv return EINVAL;
902 1.17 maxv error = copyin(args->data, &ctl, sizeof(ctl));
903 1.17 maxv if (error)
904 1.17 maxv return error;
905 1.17 maxv
906 1.24 maxv error = nvmm_machine_get(owner, ctl.machid, &mach, true);
907 1.17 maxv if (error)
908 1.17 maxv return error;
909 1.17 maxv
910 1.17 maxv ctl.nvcpus = 0;
911 1.17 maxv for (i = 0; i < NVMM_MAX_VCPUS; i++) {
912 1.17 maxv error = nvmm_vcpu_get(mach, i, &vcpu);
913 1.17 maxv if (error)
914 1.17 maxv continue;
915 1.17 maxv ctl.nvcpus++;
916 1.17 maxv nvmm_vcpu_put(vcpu);
917 1.17 maxv }
918 1.25 maxv
919 1.25 maxv ctl.nram = 0;
920 1.25 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
921 1.25 maxv if (!mach->hmap[i].present)
922 1.25 maxv continue;
923 1.25 maxv ctl.nram += mach->hmap[i].size;
924 1.25 maxv }
925 1.25 maxv
926 1.17 maxv ctl.pid = mach->owner->pid;
927 1.17 maxv ctl.time = mach->time;
928 1.17 maxv
929 1.17 maxv nvmm_machine_put(mach);
930 1.17 maxv
931 1.17 maxv error = copyout(&ctl, args->data, sizeof(ctl));
932 1.17 maxv if (error)
933 1.17 maxv return error;
934 1.17 maxv
935 1.17 maxv return 0;
936 1.17 maxv }
937 1.17 maxv
938 1.17 maxv static int
939 1.17 maxv nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
940 1.17 maxv {
941 1.17 maxv switch (args->op) {
942 1.17 maxv case NVMM_CTL_MACH_INFO:
943 1.24 maxv return nvmm_ctl_mach_info(owner, args);
944 1.17 maxv default:
945 1.17 maxv return EINVAL;
946 1.17 maxv }
947 1.17 maxv }
948 1.17 maxv
949 1.17 maxv /* -------------------------------------------------------------------------- */
950 1.17 maxv
951 1.17 maxv static int
952 1.1 maxv nvmm_init(void)
953 1.1 maxv {
954 1.1 maxv size_t i, n;
955 1.1 maxv
956 1.1 maxv for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
957 1.1 maxv if (!(*nvmm_impl_list[i]->ident)()) {
958 1.1 maxv continue;
959 1.1 maxv }
960 1.1 maxv nvmm_impl = nvmm_impl_list[i];
961 1.1 maxv break;
962 1.1 maxv }
963 1.1 maxv if (nvmm_impl == NULL) {
964 1.27 maxv printf("NVMM: CPU not supported\n");
965 1.1 maxv return ENOTSUP;
966 1.1 maxv }
967 1.1 maxv
968 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
969 1.1 maxv machines[i].machid = i;
970 1.1 maxv rw_init(&machines[i].lock);
971 1.1 maxv for (n = 0; n < NVMM_MAX_VCPUS; n++) {
972 1.18 maxv machines[i].cpus[n].present = false;
973 1.18 maxv machines[i].cpus[n].cpuid = n;
974 1.1 maxv mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
975 1.1 maxv IPL_NONE);
976 1.1 maxv }
977 1.1 maxv }
978 1.1 maxv
979 1.1 maxv (*nvmm_impl->init)();
980 1.1 maxv
981 1.1 maxv return 0;
982 1.1 maxv }
983 1.1 maxv
984 1.1 maxv static void
985 1.1 maxv nvmm_fini(void)
986 1.1 maxv {
987 1.1 maxv size_t i, n;
988 1.1 maxv
989 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
990 1.1 maxv rw_destroy(&machines[i].lock);
991 1.1 maxv for (n = 0; n < NVMM_MAX_VCPUS; n++) {
992 1.1 maxv mutex_destroy(&machines[i].cpus[n].lock);
993 1.1 maxv }
994 1.1 maxv }
995 1.1 maxv
996 1.1 maxv (*nvmm_impl->fini)();
997 1.1 maxv }
998 1.1 maxv
999 1.1 maxv /* -------------------------------------------------------------------------- */
1000 1.1 maxv
1001 1.14 maxv static dev_type_open(nvmm_open);
1002 1.14 maxv
1003 1.14 maxv const struct cdevsw nvmm_cdevsw = {
1004 1.14 maxv .d_open = nvmm_open,
1005 1.14 maxv .d_close = noclose,
1006 1.14 maxv .d_read = noread,
1007 1.14 maxv .d_write = nowrite,
1008 1.14 maxv .d_ioctl = noioctl,
1009 1.14 maxv .d_stop = nostop,
1010 1.14 maxv .d_tty = notty,
1011 1.14 maxv .d_poll = nopoll,
1012 1.14 maxv .d_mmap = nommap,
1013 1.14 maxv .d_kqfilter = nokqfilter,
1014 1.14 maxv .d_discard = nodiscard,
1015 1.14 maxv .d_flag = D_OTHER | D_MPSAFE
1016 1.14 maxv };
1017 1.14 maxv
1018 1.14 maxv static int nvmm_ioctl(file_t *, u_long, void *);
1019 1.14 maxv static int nvmm_close(file_t *);
1020 1.19 maxv static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *,
1021 1.19 maxv struct uvm_object **, int *);
1022 1.14 maxv
1023 1.14 maxv const struct fileops nvmm_fileops = {
1024 1.14 maxv .fo_read = fbadop_read,
1025 1.14 maxv .fo_write = fbadop_write,
1026 1.14 maxv .fo_ioctl = nvmm_ioctl,
1027 1.14 maxv .fo_fcntl = fnullop_fcntl,
1028 1.14 maxv .fo_poll = fnullop_poll,
1029 1.14 maxv .fo_stat = fbadop_stat,
1030 1.14 maxv .fo_close = nvmm_close,
1031 1.14 maxv .fo_kqfilter = fnullop_kqfilter,
1032 1.14 maxv .fo_restart = fnullop_restart,
1033 1.19 maxv .fo_mmap = nvmm_mmap,
1034 1.14 maxv };
1035 1.14 maxv
1036 1.1 maxv static int
1037 1.1 maxv nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
1038 1.1 maxv {
1039 1.14 maxv struct nvmm_owner *owner;
1040 1.14 maxv struct file *fp;
1041 1.14 maxv int error, fd;
1042 1.14 maxv
1043 1.26 maxv if (__predict_false(nvmm_impl == NULL))
1044 1.26 maxv return ENXIO;
1045 1.14 maxv if (minor(dev) != 0)
1046 1.1 maxv return EXDEV;
1047 1.23 maxv if (!(flags & O_CLOEXEC))
1048 1.23 maxv return EINVAL;
1049 1.14 maxv error = fd_allocfile(&fp, &fd);
1050 1.14 maxv if (error)
1051 1.14 maxv return error;
1052 1.14 maxv
1053 1.24 maxv if (OFLAGS(flags) & O_WRONLY) {
1054 1.24 maxv owner = &root_owner;
1055 1.24 maxv } else {
1056 1.24 maxv owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1057 1.24 maxv owner->pid = l->l_proc->p_pid;
1058 1.24 maxv }
1059 1.1 maxv
1060 1.14 maxv return fd_clone(fp, fd, flags, &nvmm_fileops, owner);
1061 1.1 maxv }
1062 1.1 maxv
1063 1.1 maxv static int
1064 1.14 maxv nvmm_close(file_t *fp)
1065 1.1 maxv {
1066 1.14 maxv struct nvmm_owner *owner = fp->f_data;
1067 1.1 maxv
1068 1.14 maxv KASSERT(owner != NULL);
1069 1.14 maxv nvmm_kill_machines(owner);
1070 1.24 maxv if (owner != &root_owner) {
1071 1.24 maxv kmem_free(owner, sizeof(*owner));
1072 1.24 maxv }
1073 1.14 maxv fp->f_data = NULL;
1074 1.1 maxv
1075 1.14 maxv return 0;
1076 1.1 maxv }
1077 1.1 maxv
1078 1.1 maxv static int
1079 1.19 maxv nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
1080 1.19 maxv int *advicep, struct uvm_object **uobjp, int *maxprotp)
1081 1.19 maxv {
1082 1.19 maxv struct nvmm_owner *owner = fp->f_data;
1083 1.19 maxv struct nvmm_machine *mach;
1084 1.19 maxv nvmm_machid_t machid;
1085 1.19 maxv nvmm_cpuid_t cpuid;
1086 1.19 maxv int error;
1087 1.19 maxv
1088 1.19 maxv if (prot & PROT_EXEC)
1089 1.19 maxv return EACCES;
1090 1.19 maxv if (size != PAGE_SIZE)
1091 1.19 maxv return EINVAL;
1092 1.19 maxv
1093 1.19 maxv cpuid = NVMM_COMM_CPUID(*offp);
1094 1.19 maxv if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1095 1.19 maxv return EINVAL;
1096 1.19 maxv
1097 1.19 maxv machid = NVMM_COMM_MACHID(*offp);
1098 1.19 maxv error = nvmm_machine_get(owner, machid, &mach, false);
1099 1.19 maxv if (error)
1100 1.19 maxv return error;
1101 1.19 maxv
1102 1.19 maxv uao_reference(mach->commuobj);
1103 1.19 maxv *uobjp = mach->commuobj;
1104 1.19 maxv *offp = cpuid * PAGE_SIZE;
1105 1.19 maxv *maxprotp = prot;
1106 1.19 maxv *advicep = UVM_ADV_RANDOM;
1107 1.19 maxv
1108 1.19 maxv nvmm_machine_put(mach);
1109 1.19 maxv return 0;
1110 1.19 maxv }
1111 1.19 maxv
1112 1.19 maxv static int
1113 1.14 maxv nvmm_ioctl(file_t *fp, u_long cmd, void *data)
1114 1.1 maxv {
1115 1.14 maxv struct nvmm_owner *owner = fp->f_data;
1116 1.14 maxv
1117 1.14 maxv KASSERT(owner != NULL);
1118 1.1 maxv
1119 1.1 maxv switch (cmd) {
1120 1.1 maxv case NVMM_IOC_CAPABILITY:
1121 1.14 maxv return nvmm_capability(owner, data);
1122 1.1 maxv case NVMM_IOC_MACHINE_CREATE:
1123 1.14 maxv return nvmm_machine_create(owner, data);
1124 1.1 maxv case NVMM_IOC_MACHINE_DESTROY:
1125 1.14 maxv return nvmm_machine_destroy(owner, data);
1126 1.1 maxv case NVMM_IOC_MACHINE_CONFIGURE:
1127 1.14 maxv return nvmm_machine_configure(owner, data);
1128 1.1 maxv case NVMM_IOC_VCPU_CREATE:
1129 1.14 maxv return nvmm_vcpu_create(owner, data);
1130 1.1 maxv case NVMM_IOC_VCPU_DESTROY:
1131 1.14 maxv return nvmm_vcpu_destroy(owner, data);
1132 1.23 maxv case NVMM_IOC_VCPU_CONFIGURE:
1133 1.23 maxv return nvmm_vcpu_configure(owner, data);
1134 1.1 maxv case NVMM_IOC_VCPU_SETSTATE:
1135 1.14 maxv return nvmm_vcpu_setstate(owner, data);
1136 1.1 maxv case NVMM_IOC_VCPU_GETSTATE:
1137 1.14 maxv return nvmm_vcpu_getstate(owner, data);
1138 1.1 maxv case NVMM_IOC_VCPU_INJECT:
1139 1.14 maxv return nvmm_vcpu_inject(owner, data);
1140 1.1 maxv case NVMM_IOC_VCPU_RUN:
1141 1.14 maxv return nvmm_vcpu_run(owner, data);
1142 1.1 maxv case NVMM_IOC_GPA_MAP:
1143 1.14 maxv return nvmm_gpa_map(owner, data);
1144 1.1 maxv case NVMM_IOC_GPA_UNMAP:
1145 1.14 maxv return nvmm_gpa_unmap(owner, data);
1146 1.4 maxv case NVMM_IOC_HVA_MAP:
1147 1.14 maxv return nvmm_hva_map(owner, data);
1148 1.4 maxv case NVMM_IOC_HVA_UNMAP:
1149 1.14 maxv return nvmm_hva_unmap(owner, data);
1150 1.17 maxv case NVMM_IOC_CTL:
1151 1.17 maxv return nvmm_ctl(owner, data);
1152 1.1 maxv default:
1153 1.1 maxv return EINVAL;
1154 1.1 maxv }
1155 1.1 maxv }
1156 1.1 maxv
1157 1.14 maxv /* -------------------------------------------------------------------------- */
1158 1.1 maxv
1159 1.1 maxv void
1160 1.1 maxv nvmmattach(int nunits)
1161 1.1 maxv {
1162 1.1 maxv /* nothing */
1163 1.1 maxv }
1164 1.1 maxv
1165 1.16 maxv MODULE(MODULE_CLASS_MISC, nvmm, NULL);
1166 1.1 maxv
1167 1.1 maxv static int
1168 1.1 maxv nvmm_modcmd(modcmd_t cmd, void *arg)
1169 1.1 maxv {
1170 1.1 maxv int error;
1171 1.1 maxv
1172 1.1 maxv switch (cmd) {
1173 1.1 maxv case MODULE_CMD_INIT:
1174 1.1 maxv error = nvmm_init();
1175 1.1 maxv if (error)
1176 1.1 maxv return error;
1177 1.1 maxv
1178 1.1 maxv #if defined(_MODULE)
1179 1.1 maxv {
1180 1.1 maxv devmajor_t bmajor = NODEVMAJOR;
1181 1.1 maxv devmajor_t cmajor = 345;
1182 1.1 maxv
1183 1.1 maxv /* mknod /dev/nvmm c 345 0 */
1184 1.1 maxv error = devsw_attach("nvmm", NULL, &bmajor,
1185 1.1 maxv &nvmm_cdevsw, &cmajor);
1186 1.1 maxv if (error) {
1187 1.1 maxv nvmm_fini();
1188 1.1 maxv return error;
1189 1.1 maxv }
1190 1.1 maxv }
1191 1.1 maxv #endif
1192 1.1 maxv return 0;
1193 1.1 maxv
1194 1.1 maxv case MODULE_CMD_FINI:
1195 1.13 maxv if (nmachines > 0) {
1196 1.13 maxv return EBUSY;
1197 1.13 maxv }
1198 1.1 maxv #if defined(_MODULE)
1199 1.1 maxv {
1200 1.1 maxv error = devsw_detach(NULL, &nvmm_cdevsw);
1201 1.1 maxv if (error) {
1202 1.1 maxv return error;
1203 1.1 maxv }
1204 1.1 maxv }
1205 1.1 maxv #endif
1206 1.1 maxv nvmm_fini();
1207 1.1 maxv return 0;
1208 1.1 maxv
1209 1.13 maxv case MODULE_CMD_AUTOUNLOAD:
1210 1.13 maxv return EBUSY;
1211 1.13 maxv
1212 1.1 maxv default:
1213 1.1 maxv return ENOTTY;
1214 1.1 maxv }
1215 1.1 maxv }
1216