nvmm.c revision 1.33 1 1.33 maxv /* $NetBSD: nvmm.c,v 1.33 2020/08/01 08:18:36 maxv Exp $ */
2 1.1 maxv
3 1.1 maxv /*
4 1.32 maxv * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
5 1.1 maxv * All rights reserved.
6 1.1 maxv *
7 1.1 maxv * This code is derived from software contributed to The NetBSD Foundation
8 1.1 maxv * by Maxime Villard.
9 1.1 maxv *
10 1.1 maxv * Redistribution and use in source and binary forms, with or without
11 1.1 maxv * modification, are permitted provided that the following conditions
12 1.1 maxv * are met:
13 1.1 maxv * 1. Redistributions of source code must retain the above copyright
14 1.1 maxv * notice, this list of conditions and the following disclaimer.
15 1.1 maxv * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 maxv * notice, this list of conditions and the following disclaimer in the
17 1.1 maxv * documentation and/or other materials provided with the distribution.
18 1.1 maxv *
19 1.1 maxv * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 maxv * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 maxv * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 maxv * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 maxv * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 maxv * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 maxv * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 maxv * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 maxv * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 maxv * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 maxv * POSSIBILITY OF SUCH DAMAGE.
30 1.1 maxv */
31 1.1 maxv
32 1.1 maxv #include <sys/cdefs.h>
33 1.33 maxv __KERNEL_RCSID(0, "$NetBSD: nvmm.c,v 1.33 2020/08/01 08:18:36 maxv Exp $");
34 1.1 maxv
35 1.1 maxv #include <sys/param.h>
36 1.1 maxv #include <sys/systm.h>
37 1.1 maxv #include <sys/kernel.h>
38 1.1 maxv
39 1.1 maxv #include <sys/cpu.h>
40 1.1 maxv #include <sys/conf.h>
41 1.1 maxv #include <sys/kmem.h>
42 1.1 maxv #include <sys/module.h>
43 1.1 maxv #include <sys/proc.h>
44 1.11 maxv #include <sys/mman.h>
45 1.14 maxv #include <sys/file.h>
46 1.14 maxv #include <sys/filedesc.h>
47 1.31 maxv #include <sys/device.h>
48 1.1 maxv
49 1.1 maxv #include <uvm/uvm.h>
50 1.1 maxv #include <uvm/uvm_page.h>
51 1.1 maxv
52 1.1 maxv #include "ioconf.h"
53 1.1 maxv
54 1.1 maxv #include <dev/nvmm/nvmm.h>
55 1.1 maxv #include <dev/nvmm/nvmm_internal.h>
56 1.1 maxv #include <dev/nvmm/nvmm_ioctl.h>
57 1.1 maxv
58 1.1 maxv static struct nvmm_machine machines[NVMM_MAX_MACHINES];
59 1.13 maxv static volatile unsigned int nmachines __cacheline_aligned;
60 1.1 maxv
61 1.1 maxv static const struct nvmm_impl *nvmm_impl_list[] = {
62 1.33 maxv #if defined(__x86_64__)
63 1.7 maxv &nvmm_x86_svm, /* x86 AMD SVM */
64 1.7 maxv &nvmm_x86_vmx /* x86 Intel VMX */
65 1.33 maxv #endif
66 1.1 maxv };
67 1.1 maxv
68 1.1 maxv static const struct nvmm_impl *nvmm_impl = NULL;
69 1.1 maxv
70 1.17 maxv static struct nvmm_owner root_owner;
71 1.17 maxv
72 1.1 maxv /* -------------------------------------------------------------------------- */
73 1.1 maxv
74 1.1 maxv static int
75 1.1 maxv nvmm_machine_alloc(struct nvmm_machine **ret)
76 1.1 maxv {
77 1.1 maxv struct nvmm_machine *mach;
78 1.1 maxv size_t i;
79 1.1 maxv
80 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
81 1.1 maxv mach = &machines[i];
82 1.1 maxv
83 1.1 maxv rw_enter(&mach->lock, RW_WRITER);
84 1.1 maxv if (mach->present) {
85 1.1 maxv rw_exit(&mach->lock);
86 1.1 maxv continue;
87 1.1 maxv }
88 1.1 maxv
89 1.1 maxv mach->present = true;
90 1.17 maxv mach->time = time_second;
91 1.1 maxv *ret = mach;
92 1.13 maxv atomic_inc_uint(&nmachines);
93 1.1 maxv return 0;
94 1.1 maxv }
95 1.1 maxv
96 1.1 maxv return ENOBUFS;
97 1.1 maxv }
98 1.1 maxv
99 1.1 maxv static void
100 1.1 maxv nvmm_machine_free(struct nvmm_machine *mach)
101 1.1 maxv {
102 1.1 maxv KASSERT(rw_write_held(&mach->lock));
103 1.1 maxv KASSERT(mach->present);
104 1.1 maxv mach->present = false;
105 1.13 maxv atomic_dec_uint(&nmachines);
106 1.1 maxv }
107 1.1 maxv
108 1.1 maxv static int
109 1.14 maxv nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
110 1.14 maxv struct nvmm_machine **ret, bool writer)
111 1.1 maxv {
112 1.1 maxv struct nvmm_machine *mach;
113 1.1 maxv krw_t op = writer ? RW_WRITER : RW_READER;
114 1.1 maxv
115 1.1 maxv if (machid >= NVMM_MAX_MACHINES) {
116 1.1 maxv return EINVAL;
117 1.1 maxv }
118 1.1 maxv mach = &machines[machid];
119 1.1 maxv
120 1.1 maxv rw_enter(&mach->lock, op);
121 1.1 maxv if (!mach->present) {
122 1.1 maxv rw_exit(&mach->lock);
123 1.1 maxv return ENOENT;
124 1.1 maxv }
125 1.17 maxv if (owner != &root_owner && mach->owner != owner) {
126 1.1 maxv rw_exit(&mach->lock);
127 1.1 maxv return EPERM;
128 1.1 maxv }
129 1.1 maxv *ret = mach;
130 1.1 maxv
131 1.1 maxv return 0;
132 1.1 maxv }
133 1.1 maxv
134 1.1 maxv static void
135 1.1 maxv nvmm_machine_put(struct nvmm_machine *mach)
136 1.1 maxv {
137 1.1 maxv rw_exit(&mach->lock);
138 1.1 maxv }
139 1.1 maxv
140 1.1 maxv /* -------------------------------------------------------------------------- */
141 1.1 maxv
142 1.1 maxv static int
143 1.18 maxv nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
144 1.18 maxv struct nvmm_cpu **ret)
145 1.1 maxv {
146 1.1 maxv struct nvmm_cpu *vcpu;
147 1.1 maxv
148 1.18 maxv if (cpuid >= NVMM_MAX_VCPUS) {
149 1.18 maxv return EINVAL;
150 1.18 maxv }
151 1.18 maxv vcpu = &mach->cpus[cpuid];
152 1.1 maxv
153 1.18 maxv mutex_enter(&vcpu->lock);
154 1.18 maxv if (vcpu->present) {
155 1.18 maxv mutex_exit(&vcpu->lock);
156 1.18 maxv return EBUSY;
157 1.1 maxv }
158 1.1 maxv
159 1.18 maxv vcpu->present = true;
160 1.19 maxv vcpu->comm = NULL;
161 1.18 maxv vcpu->hcpu_last = -1;
162 1.18 maxv *ret = vcpu;
163 1.18 maxv return 0;
164 1.1 maxv }
165 1.1 maxv
166 1.1 maxv static void
167 1.1 maxv nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
168 1.1 maxv {
169 1.1 maxv KASSERT(mutex_owned(&vcpu->lock));
170 1.1 maxv vcpu->present = false;
171 1.19 maxv if (vcpu->comm != NULL) {
172 1.19 maxv uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
173 1.19 maxv }
174 1.1 maxv }
175 1.1 maxv
176 1.22 maxv static int
177 1.1 maxv nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
178 1.1 maxv struct nvmm_cpu **ret)
179 1.1 maxv {
180 1.1 maxv struct nvmm_cpu *vcpu;
181 1.1 maxv
182 1.1 maxv if (cpuid >= NVMM_MAX_VCPUS) {
183 1.1 maxv return EINVAL;
184 1.1 maxv }
185 1.1 maxv vcpu = &mach->cpus[cpuid];
186 1.1 maxv
187 1.1 maxv mutex_enter(&vcpu->lock);
188 1.1 maxv if (!vcpu->present) {
189 1.1 maxv mutex_exit(&vcpu->lock);
190 1.1 maxv return ENOENT;
191 1.1 maxv }
192 1.1 maxv *ret = vcpu;
193 1.1 maxv
194 1.1 maxv return 0;
195 1.1 maxv }
196 1.1 maxv
197 1.22 maxv static void
198 1.1 maxv nvmm_vcpu_put(struct nvmm_cpu *vcpu)
199 1.1 maxv {
200 1.1 maxv mutex_exit(&vcpu->lock);
201 1.1 maxv }
202 1.1 maxv
203 1.1 maxv /* -------------------------------------------------------------------------- */
204 1.1 maxv
205 1.1 maxv static void
206 1.14 maxv nvmm_kill_machines(struct nvmm_owner *owner)
207 1.1 maxv {
208 1.1 maxv struct nvmm_machine *mach;
209 1.1 maxv struct nvmm_cpu *vcpu;
210 1.1 maxv size_t i, j;
211 1.1 maxv int error;
212 1.1 maxv
213 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
214 1.1 maxv mach = &machines[i];
215 1.1 maxv
216 1.1 maxv rw_enter(&mach->lock, RW_WRITER);
217 1.14 maxv if (!mach->present || mach->owner != owner) {
218 1.1 maxv rw_exit(&mach->lock);
219 1.1 maxv continue;
220 1.1 maxv }
221 1.1 maxv
222 1.1 maxv /* Kill it. */
223 1.1 maxv for (j = 0; j < NVMM_MAX_VCPUS; j++) {
224 1.1 maxv error = nvmm_vcpu_get(mach, j, &vcpu);
225 1.1 maxv if (error)
226 1.1 maxv continue;
227 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
228 1.1 maxv nvmm_vcpu_free(mach, vcpu);
229 1.1 maxv nvmm_vcpu_put(vcpu);
230 1.1 maxv }
231 1.15 maxv (*nvmm_impl->machine_destroy)(mach);
232 1.1 maxv uvmspace_free(mach->vm);
233 1.4 maxv
234 1.4 maxv /* Drop the kernel UOBJ refs. */
235 1.9 maxv for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
236 1.9 maxv if (!mach->hmap[j].present)
237 1.4 maxv continue;
238 1.9 maxv uao_detach(mach->hmap[j].uobj);
239 1.4 maxv }
240 1.4 maxv
241 1.1 maxv nvmm_machine_free(mach);
242 1.1 maxv
243 1.1 maxv rw_exit(&mach->lock);
244 1.1 maxv }
245 1.1 maxv }
246 1.1 maxv
247 1.1 maxv /* -------------------------------------------------------------------------- */
248 1.1 maxv
249 1.1 maxv static int
250 1.14 maxv nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
251 1.1 maxv {
252 1.23 maxv args->cap.version = NVMM_KERN_VERSION;
253 1.1 maxv args->cap.state_size = nvmm_impl->state_size;
254 1.1 maxv args->cap.max_machines = NVMM_MAX_MACHINES;
255 1.1 maxv args->cap.max_vcpus = NVMM_MAX_VCPUS;
256 1.1 maxv args->cap.max_ram = NVMM_MAX_RAM;
257 1.1 maxv
258 1.1 maxv (*nvmm_impl->capability)(&args->cap);
259 1.1 maxv
260 1.1 maxv return 0;
261 1.1 maxv }
262 1.1 maxv
263 1.1 maxv static int
264 1.14 maxv nvmm_machine_create(struct nvmm_owner *owner,
265 1.14 maxv struct nvmm_ioc_machine_create *args)
266 1.1 maxv {
267 1.1 maxv struct nvmm_machine *mach;
268 1.1 maxv int error;
269 1.1 maxv
270 1.1 maxv error = nvmm_machine_alloc(&mach);
271 1.1 maxv if (error)
272 1.1 maxv return error;
273 1.1 maxv
274 1.1 maxv /* Curproc owns the machine. */
275 1.14 maxv mach->owner = owner;
276 1.1 maxv
277 1.9 maxv /* Zero out the host mappings. */
278 1.9 maxv memset(&mach->hmap, 0, sizeof(mach->hmap));
279 1.4 maxv
280 1.1 maxv /* Create the machine vmspace. */
281 1.1 maxv mach->gpa_begin = 0;
282 1.1 maxv mach->gpa_end = NVMM_MAX_RAM;
283 1.1 maxv mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
284 1.1 maxv
285 1.19 maxv /* Create the comm uobj. */
286 1.19 maxv mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
287 1.19 maxv
288 1.1 maxv (*nvmm_impl->machine_create)(mach);
289 1.1 maxv
290 1.1 maxv args->machid = mach->machid;
291 1.1 maxv nvmm_machine_put(mach);
292 1.1 maxv
293 1.1 maxv return 0;
294 1.1 maxv }
295 1.1 maxv
296 1.1 maxv static int
297 1.14 maxv nvmm_machine_destroy(struct nvmm_owner *owner,
298 1.14 maxv struct nvmm_ioc_machine_destroy *args)
299 1.1 maxv {
300 1.1 maxv struct nvmm_machine *mach;
301 1.1 maxv struct nvmm_cpu *vcpu;
302 1.1 maxv int error;
303 1.1 maxv size_t i;
304 1.1 maxv
305 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
306 1.1 maxv if (error)
307 1.1 maxv return error;
308 1.1 maxv
309 1.1 maxv for (i = 0; i < NVMM_MAX_VCPUS; i++) {
310 1.1 maxv error = nvmm_vcpu_get(mach, i, &vcpu);
311 1.1 maxv if (error)
312 1.1 maxv continue;
313 1.1 maxv
314 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
315 1.1 maxv nvmm_vcpu_free(mach, vcpu);
316 1.1 maxv nvmm_vcpu_put(vcpu);
317 1.1 maxv }
318 1.1 maxv
319 1.1 maxv (*nvmm_impl->machine_destroy)(mach);
320 1.1 maxv
321 1.1 maxv /* Free the machine vmspace. */
322 1.1 maxv uvmspace_free(mach->vm);
323 1.4 maxv
324 1.4 maxv /* Drop the kernel UOBJ refs. */
325 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
326 1.9 maxv if (!mach->hmap[i].present)
327 1.4 maxv continue;
328 1.9 maxv uao_detach(mach->hmap[i].uobj);
329 1.4 maxv }
330 1.1 maxv
331 1.1 maxv nvmm_machine_free(mach);
332 1.1 maxv nvmm_machine_put(mach);
333 1.1 maxv
334 1.1 maxv return 0;
335 1.1 maxv }
336 1.1 maxv
337 1.1 maxv static int
338 1.14 maxv nvmm_machine_configure(struct nvmm_owner *owner,
339 1.14 maxv struct nvmm_ioc_machine_configure *args)
340 1.1 maxv {
341 1.1 maxv struct nvmm_machine *mach;
342 1.1 maxv size_t allocsz;
343 1.21 maxv uint64_t op;
344 1.1 maxv void *data;
345 1.1 maxv int error;
346 1.1 maxv
347 1.21 maxv op = NVMM_MACH_CONF_MD(args->op);
348 1.23 maxv if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
349 1.1 maxv return EINVAL;
350 1.1 maxv }
351 1.1 maxv
352 1.23 maxv allocsz = nvmm_impl->mach_conf_sizes[op];
353 1.1 maxv data = kmem_alloc(allocsz, KM_SLEEP);
354 1.1 maxv
355 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
356 1.1 maxv if (error) {
357 1.1 maxv kmem_free(data, allocsz);
358 1.1 maxv return error;
359 1.1 maxv }
360 1.1 maxv
361 1.1 maxv error = copyin(args->conf, data, allocsz);
362 1.1 maxv if (error) {
363 1.1 maxv goto out;
364 1.1 maxv }
365 1.1 maxv
366 1.21 maxv error = (*nvmm_impl->machine_configure)(mach, op, data);
367 1.1 maxv
368 1.1 maxv out:
369 1.1 maxv nvmm_machine_put(mach);
370 1.1 maxv kmem_free(data, allocsz);
371 1.1 maxv return error;
372 1.1 maxv }
373 1.1 maxv
374 1.1 maxv static int
375 1.14 maxv nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
376 1.1 maxv {
377 1.1 maxv struct nvmm_machine *mach;
378 1.1 maxv struct nvmm_cpu *vcpu;
379 1.1 maxv int error;
380 1.1 maxv
381 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
382 1.1 maxv if (error)
383 1.1 maxv return error;
384 1.1 maxv
385 1.18 maxv error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
386 1.1 maxv if (error)
387 1.1 maxv goto out;
388 1.1 maxv
389 1.19 maxv /* Allocate the comm page. */
390 1.19 maxv uao_reference(mach->commuobj);
391 1.19 maxv error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
392 1.19 maxv mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
393 1.19 maxv UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
394 1.19 maxv if (error) {
395 1.19 maxv uao_detach(mach->commuobj);
396 1.19 maxv nvmm_vcpu_free(mach, vcpu);
397 1.19 maxv nvmm_vcpu_put(vcpu);
398 1.19 maxv goto out;
399 1.19 maxv }
400 1.19 maxv error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
401 1.19 maxv (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
402 1.19 maxv if (error) {
403 1.19 maxv nvmm_vcpu_free(mach, vcpu);
404 1.19 maxv nvmm_vcpu_put(vcpu);
405 1.19 maxv goto out;
406 1.19 maxv }
407 1.19 maxv memset(vcpu->comm, 0, PAGE_SIZE);
408 1.19 maxv
409 1.1 maxv error = (*nvmm_impl->vcpu_create)(mach, vcpu);
410 1.1 maxv if (error) {
411 1.1 maxv nvmm_vcpu_free(mach, vcpu);
412 1.1 maxv nvmm_vcpu_put(vcpu);
413 1.1 maxv goto out;
414 1.1 maxv }
415 1.1 maxv
416 1.1 maxv nvmm_vcpu_put(vcpu);
417 1.1 maxv
418 1.28 maxv atomic_inc_uint(&mach->ncpus);
419 1.28 maxv
420 1.1 maxv out:
421 1.1 maxv nvmm_machine_put(mach);
422 1.1 maxv return error;
423 1.1 maxv }
424 1.1 maxv
425 1.1 maxv static int
426 1.14 maxv nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
427 1.1 maxv {
428 1.1 maxv struct nvmm_machine *mach;
429 1.1 maxv struct nvmm_cpu *vcpu;
430 1.1 maxv int error;
431 1.1 maxv
432 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
433 1.1 maxv if (error)
434 1.1 maxv return error;
435 1.1 maxv
436 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
437 1.1 maxv if (error)
438 1.1 maxv goto out;
439 1.1 maxv
440 1.1 maxv (*nvmm_impl->vcpu_destroy)(mach, vcpu);
441 1.1 maxv nvmm_vcpu_free(mach, vcpu);
442 1.1 maxv nvmm_vcpu_put(vcpu);
443 1.1 maxv
444 1.28 maxv atomic_dec_uint(&mach->ncpus);
445 1.28 maxv
446 1.1 maxv out:
447 1.1 maxv nvmm_machine_put(mach);
448 1.1 maxv return error;
449 1.1 maxv }
450 1.1 maxv
451 1.1 maxv static int
452 1.23 maxv nvmm_vcpu_configure(struct nvmm_owner *owner,
453 1.23 maxv struct nvmm_ioc_vcpu_configure *args)
454 1.23 maxv {
455 1.23 maxv struct nvmm_machine *mach;
456 1.23 maxv struct nvmm_cpu *vcpu;
457 1.23 maxv size_t allocsz;
458 1.23 maxv uint64_t op;
459 1.23 maxv void *data;
460 1.23 maxv int error;
461 1.23 maxv
462 1.23 maxv op = NVMM_VCPU_CONF_MD(args->op);
463 1.23 maxv if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
464 1.23 maxv return EINVAL;
465 1.23 maxv
466 1.23 maxv allocsz = nvmm_impl->vcpu_conf_sizes[op];
467 1.23 maxv data = kmem_alloc(allocsz, KM_SLEEP);
468 1.23 maxv
469 1.23 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
470 1.23 maxv if (error) {
471 1.23 maxv kmem_free(data, allocsz);
472 1.23 maxv return error;
473 1.23 maxv }
474 1.23 maxv
475 1.23 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
476 1.23 maxv if (error) {
477 1.23 maxv nvmm_machine_put(mach);
478 1.23 maxv kmem_free(data, allocsz);
479 1.23 maxv return error;
480 1.23 maxv }
481 1.23 maxv
482 1.23 maxv error = copyin(args->conf, data, allocsz);
483 1.23 maxv if (error) {
484 1.23 maxv goto out;
485 1.23 maxv }
486 1.23 maxv
487 1.23 maxv error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
488 1.23 maxv
489 1.23 maxv out:
490 1.23 maxv nvmm_vcpu_put(vcpu);
491 1.23 maxv nvmm_machine_put(mach);
492 1.23 maxv kmem_free(data, allocsz);
493 1.23 maxv return error;
494 1.23 maxv }
495 1.23 maxv
496 1.23 maxv static int
497 1.14 maxv nvmm_vcpu_setstate(struct nvmm_owner *owner,
498 1.14 maxv struct nvmm_ioc_vcpu_setstate *args)
499 1.1 maxv {
500 1.1 maxv struct nvmm_machine *mach;
501 1.1 maxv struct nvmm_cpu *vcpu;
502 1.1 maxv int error;
503 1.1 maxv
504 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
505 1.6 maxv if (error)
506 1.1 maxv return error;
507 1.1 maxv
508 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
509 1.1 maxv if (error)
510 1.1 maxv goto out;
511 1.1 maxv
512 1.19 maxv (*nvmm_impl->vcpu_setstate)(vcpu);
513 1.1 maxv nvmm_vcpu_put(vcpu);
514 1.1 maxv
515 1.1 maxv out:
516 1.1 maxv nvmm_machine_put(mach);
517 1.1 maxv return error;
518 1.1 maxv }
519 1.1 maxv
520 1.1 maxv static int
521 1.14 maxv nvmm_vcpu_getstate(struct nvmm_owner *owner,
522 1.14 maxv struct nvmm_ioc_vcpu_getstate *args)
523 1.1 maxv {
524 1.1 maxv struct nvmm_machine *mach;
525 1.1 maxv struct nvmm_cpu *vcpu;
526 1.1 maxv int error;
527 1.1 maxv
528 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
529 1.6 maxv if (error)
530 1.1 maxv return error;
531 1.1 maxv
532 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
533 1.1 maxv if (error)
534 1.1 maxv goto out;
535 1.1 maxv
536 1.19 maxv (*nvmm_impl->vcpu_getstate)(vcpu);
537 1.1 maxv nvmm_vcpu_put(vcpu);
538 1.1 maxv
539 1.1 maxv out:
540 1.1 maxv nvmm_machine_put(mach);
541 1.1 maxv return error;
542 1.1 maxv }
543 1.1 maxv
544 1.1 maxv static int
545 1.14 maxv nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
546 1.1 maxv {
547 1.1 maxv struct nvmm_machine *mach;
548 1.1 maxv struct nvmm_cpu *vcpu;
549 1.1 maxv int error;
550 1.1 maxv
551 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
552 1.1 maxv if (error)
553 1.1 maxv return error;
554 1.1 maxv
555 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
556 1.1 maxv if (error)
557 1.1 maxv goto out;
558 1.1 maxv
559 1.20 maxv error = (*nvmm_impl->vcpu_inject)(vcpu);
560 1.1 maxv nvmm_vcpu_put(vcpu);
561 1.1 maxv
562 1.1 maxv out:
563 1.1 maxv nvmm_machine_put(mach);
564 1.1 maxv return error;
565 1.1 maxv }
566 1.1 maxv
567 1.22 maxv static int
568 1.8 maxv nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
569 1.23 maxv struct nvmm_vcpu_exit *exit)
570 1.8 maxv {
571 1.8 maxv struct vmspace *vm = mach->vm;
572 1.22 maxv int ret;
573 1.8 maxv
574 1.8 maxv while (1) {
575 1.30 maxv /* Got a signal? Or pending resched? Leave. */
576 1.30 maxv if (__predict_false(nvmm_return_needed())) {
577 1.30 maxv exit->reason = NVMM_VCPU_EXIT_NONE;
578 1.30 maxv return 0;
579 1.30 maxv }
580 1.30 maxv
581 1.30 maxv /* Run the VCPU. */
582 1.22 maxv ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
583 1.22 maxv if (__predict_false(ret != 0)) {
584 1.22 maxv return ret;
585 1.22 maxv }
586 1.8 maxv
587 1.30 maxv /* Process nested page faults. */
588 1.23 maxv if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
589 1.8 maxv break;
590 1.8 maxv }
591 1.10 maxv if (exit->u.mem.gpa >= mach->gpa_end) {
592 1.10 maxv break;
593 1.10 maxv }
594 1.11 maxv if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
595 1.8 maxv break;
596 1.8 maxv }
597 1.8 maxv }
598 1.22 maxv
599 1.22 maxv return 0;
600 1.8 maxv }
601 1.8 maxv
602 1.1 maxv static int
603 1.14 maxv nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
604 1.1 maxv {
605 1.1 maxv struct nvmm_machine *mach;
606 1.1 maxv struct nvmm_cpu *vcpu;
607 1.1 maxv int error;
608 1.1 maxv
609 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
610 1.1 maxv if (error)
611 1.1 maxv return error;
612 1.1 maxv
613 1.1 maxv error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
614 1.1 maxv if (error)
615 1.1 maxv goto out;
616 1.1 maxv
617 1.22 maxv error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
618 1.1 maxv nvmm_vcpu_put(vcpu);
619 1.1 maxv
620 1.1 maxv out:
621 1.1 maxv nvmm_machine_put(mach);
622 1.1 maxv return error;
623 1.1 maxv }
624 1.1 maxv
625 1.1 maxv /* -------------------------------------------------------------------------- */
626 1.1 maxv
627 1.4 maxv static struct uvm_object *
628 1.9 maxv nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
629 1.4 maxv size_t *off)
630 1.4 maxv {
631 1.9 maxv struct nvmm_hmapping *hmapping;
632 1.4 maxv size_t i;
633 1.4 maxv
634 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
635 1.9 maxv hmapping = &mach->hmap[i];
636 1.9 maxv if (!hmapping->present) {
637 1.4 maxv continue;
638 1.4 maxv }
639 1.9 maxv if (hva >= hmapping->hva &&
640 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
641 1.9 maxv *off = hva - hmapping->hva;
642 1.9 maxv return hmapping->uobj;
643 1.4 maxv }
644 1.4 maxv }
645 1.4 maxv
646 1.4 maxv return NULL;
647 1.4 maxv }
648 1.4 maxv
649 1.4 maxv static int
650 1.9 maxv nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
651 1.4 maxv {
652 1.9 maxv struct nvmm_hmapping *hmapping;
653 1.4 maxv size_t i;
654 1.4 maxv
655 1.4 maxv if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
656 1.4 maxv return EINVAL;
657 1.4 maxv }
658 1.4 maxv if (hva == 0) {
659 1.4 maxv return EINVAL;
660 1.4 maxv }
661 1.4 maxv
662 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
663 1.9 maxv hmapping = &mach->hmap[i];
664 1.9 maxv if (!hmapping->present) {
665 1.4 maxv continue;
666 1.4 maxv }
667 1.4 maxv
668 1.9 maxv if (hva >= hmapping->hva &&
669 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
670 1.4 maxv break;
671 1.4 maxv }
672 1.4 maxv
673 1.9 maxv if (hva >= hmapping->hva &&
674 1.9 maxv hva < hmapping->hva + hmapping->size) {
675 1.4 maxv return EEXIST;
676 1.4 maxv }
677 1.9 maxv if (hva + size > hmapping->hva &&
678 1.9 maxv hva + size <= hmapping->hva + hmapping->size) {
679 1.4 maxv return EEXIST;
680 1.4 maxv }
681 1.9 maxv if (hva <= hmapping->hva &&
682 1.9 maxv hva + size >= hmapping->hva + hmapping->size) {
683 1.4 maxv return EEXIST;
684 1.4 maxv }
685 1.4 maxv }
686 1.4 maxv
687 1.4 maxv return 0;
688 1.4 maxv }
689 1.4 maxv
690 1.9 maxv static struct nvmm_hmapping *
691 1.9 maxv nvmm_hmapping_alloc(struct nvmm_machine *mach)
692 1.4 maxv {
693 1.9 maxv struct nvmm_hmapping *hmapping;
694 1.4 maxv size_t i;
695 1.4 maxv
696 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
697 1.9 maxv hmapping = &mach->hmap[i];
698 1.9 maxv if (!hmapping->present) {
699 1.9 maxv hmapping->present = true;
700 1.9 maxv return hmapping;
701 1.4 maxv }
702 1.4 maxv }
703 1.4 maxv
704 1.4 maxv return NULL;
705 1.4 maxv }
706 1.4 maxv
707 1.9 maxv static int
708 1.9 maxv nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
709 1.4 maxv {
710 1.4 maxv struct vmspace *vmspace = curproc->p_vmspace;
711 1.9 maxv struct nvmm_hmapping *hmapping;
712 1.9 maxv size_t i;
713 1.4 maxv
714 1.9 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
715 1.9 maxv hmapping = &mach->hmap[i];
716 1.9 maxv if (!hmapping->present || hmapping->hva != hva ||
717 1.9 maxv hmapping->size != size) {
718 1.9 maxv continue;
719 1.9 maxv }
720 1.9 maxv
721 1.9 maxv uvm_unmap(&vmspace->vm_map, hmapping->hva,
722 1.9 maxv hmapping->hva + hmapping->size);
723 1.9 maxv uao_detach(hmapping->uobj);
724 1.4 maxv
725 1.9 maxv hmapping->uobj = NULL;
726 1.9 maxv hmapping->present = false;
727 1.9 maxv
728 1.9 maxv return 0;
729 1.9 maxv }
730 1.9 maxv
731 1.9 maxv return ENOENT;
732 1.4 maxv }
733 1.4 maxv
734 1.4 maxv static int
735 1.14 maxv nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
736 1.4 maxv {
737 1.4 maxv struct vmspace *vmspace = curproc->p_vmspace;
738 1.4 maxv struct nvmm_machine *mach;
739 1.9 maxv struct nvmm_hmapping *hmapping;
740 1.4 maxv vaddr_t uva;
741 1.4 maxv int error;
742 1.4 maxv
743 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
744 1.4 maxv if (error)
745 1.4 maxv return error;
746 1.4 maxv
747 1.9 maxv error = nvmm_hmapping_validate(mach, args->hva, args->size);
748 1.4 maxv if (error)
749 1.4 maxv goto out;
750 1.4 maxv
751 1.9 maxv hmapping = nvmm_hmapping_alloc(mach);
752 1.9 maxv if (hmapping == NULL) {
753 1.4 maxv error = ENOBUFS;
754 1.4 maxv goto out;
755 1.4 maxv }
756 1.4 maxv
757 1.9 maxv hmapping->hva = args->hva;
758 1.9 maxv hmapping->size = args->size;
759 1.9 maxv hmapping->uobj = uao_create(hmapping->size, 0);
760 1.9 maxv uva = hmapping->hva;
761 1.4 maxv
762 1.4 maxv /* Take a reference for the user. */
763 1.9 maxv uao_reference(hmapping->uobj);
764 1.4 maxv
765 1.4 maxv /* Map the uobj into the user address space, as pageable. */
766 1.9 maxv error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
767 1.9 maxv 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
768 1.4 maxv UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
769 1.4 maxv if (error) {
770 1.9 maxv uao_detach(hmapping->uobj);
771 1.4 maxv }
772 1.4 maxv
773 1.4 maxv out:
774 1.4 maxv nvmm_machine_put(mach);
775 1.4 maxv return error;
776 1.4 maxv }
777 1.4 maxv
778 1.4 maxv static int
779 1.14 maxv nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
780 1.4 maxv {
781 1.4 maxv struct nvmm_machine *mach;
782 1.4 maxv int error;
783 1.4 maxv
784 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, true);
785 1.4 maxv if (error)
786 1.4 maxv return error;
787 1.4 maxv
788 1.9 maxv error = nvmm_hmapping_free(mach, args->hva, args->size);
789 1.4 maxv
790 1.4 maxv nvmm_machine_put(mach);
791 1.9 maxv return error;
792 1.4 maxv }
793 1.4 maxv
794 1.4 maxv /* -------------------------------------------------------------------------- */
795 1.4 maxv
796 1.1 maxv static int
797 1.14 maxv nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
798 1.1 maxv {
799 1.1 maxv struct nvmm_machine *mach;
800 1.4 maxv struct uvm_object *uobj;
801 1.1 maxv gpaddr_t gpa;
802 1.4 maxv size_t off;
803 1.1 maxv int error;
804 1.1 maxv
805 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
806 1.1 maxv if (error)
807 1.1 maxv return error;
808 1.1 maxv
809 1.11 maxv if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
810 1.11 maxv error = EINVAL;
811 1.11 maxv goto out;
812 1.11 maxv }
813 1.11 maxv
814 1.1 maxv if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
815 1.1 maxv (args->hva % PAGE_SIZE) != 0) {
816 1.1 maxv error = EINVAL;
817 1.1 maxv goto out;
818 1.1 maxv }
819 1.1 maxv if (args->hva == 0) {
820 1.1 maxv error = EINVAL;
821 1.1 maxv goto out;
822 1.1 maxv }
823 1.1 maxv if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
824 1.1 maxv error = EINVAL;
825 1.1 maxv goto out;
826 1.1 maxv }
827 1.1 maxv if (args->gpa + args->size <= args->gpa) {
828 1.1 maxv error = EINVAL;
829 1.1 maxv goto out;
830 1.1 maxv }
831 1.3 maxv if (args->gpa + args->size > mach->gpa_end) {
832 1.1 maxv error = EINVAL;
833 1.1 maxv goto out;
834 1.1 maxv }
835 1.1 maxv gpa = args->gpa;
836 1.1 maxv
837 1.9 maxv uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
838 1.4 maxv if (uobj == NULL) {
839 1.4 maxv error = EINVAL;
840 1.4 maxv goto out;
841 1.4 maxv }
842 1.4 maxv
843 1.4 maxv /* Take a reference for the machine. */
844 1.4 maxv uao_reference(uobj);
845 1.1 maxv
846 1.1 maxv /* Map the uobj into the machine address space, as pageable. */
847 1.4 maxv error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
848 1.11 maxv UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
849 1.4 maxv UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
850 1.1 maxv if (error) {
851 1.4 maxv uao_detach(uobj);
852 1.1 maxv goto out;
853 1.1 maxv }
854 1.1 maxv if (gpa != args->gpa) {
855 1.4 maxv uao_detach(uobj);
856 1.1 maxv printf("[!] uvm_map problem\n");
857 1.1 maxv error = EINVAL;
858 1.1 maxv goto out;
859 1.1 maxv }
860 1.1 maxv
861 1.1 maxv out:
862 1.1 maxv nvmm_machine_put(mach);
863 1.1 maxv return error;
864 1.1 maxv }
865 1.1 maxv
866 1.1 maxv static int
867 1.14 maxv nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
868 1.1 maxv {
869 1.1 maxv struct nvmm_machine *mach;
870 1.1 maxv gpaddr_t gpa;
871 1.1 maxv int error;
872 1.1 maxv
873 1.14 maxv error = nvmm_machine_get(owner, args->machid, &mach, false);
874 1.1 maxv if (error)
875 1.1 maxv return error;
876 1.1 maxv
877 1.1 maxv if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
878 1.1 maxv error = EINVAL;
879 1.1 maxv goto out;
880 1.1 maxv }
881 1.1 maxv if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
882 1.1 maxv error = EINVAL;
883 1.1 maxv goto out;
884 1.1 maxv }
885 1.1 maxv if (args->gpa + args->size <= args->gpa) {
886 1.1 maxv error = EINVAL;
887 1.1 maxv goto out;
888 1.1 maxv }
889 1.1 maxv if (args->gpa + args->size >= mach->gpa_end) {
890 1.1 maxv error = EINVAL;
891 1.1 maxv goto out;
892 1.1 maxv }
893 1.1 maxv gpa = args->gpa;
894 1.1 maxv
895 1.1 maxv /* Unmap the memory from the machine. */
896 1.1 maxv uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
897 1.1 maxv
898 1.1 maxv out:
899 1.1 maxv nvmm_machine_put(mach);
900 1.1 maxv return error;
901 1.1 maxv }
902 1.1 maxv
903 1.1 maxv /* -------------------------------------------------------------------------- */
904 1.1 maxv
905 1.1 maxv static int
906 1.24 maxv nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
907 1.17 maxv {
908 1.17 maxv struct nvmm_ctl_mach_info ctl;
909 1.17 maxv struct nvmm_machine *mach;
910 1.17 maxv struct nvmm_cpu *vcpu;
911 1.17 maxv int error;
912 1.17 maxv size_t i;
913 1.17 maxv
914 1.17 maxv if (args->size != sizeof(ctl))
915 1.17 maxv return EINVAL;
916 1.17 maxv error = copyin(args->data, &ctl, sizeof(ctl));
917 1.17 maxv if (error)
918 1.17 maxv return error;
919 1.17 maxv
920 1.24 maxv error = nvmm_machine_get(owner, ctl.machid, &mach, true);
921 1.17 maxv if (error)
922 1.17 maxv return error;
923 1.17 maxv
924 1.17 maxv ctl.nvcpus = 0;
925 1.17 maxv for (i = 0; i < NVMM_MAX_VCPUS; i++) {
926 1.17 maxv error = nvmm_vcpu_get(mach, i, &vcpu);
927 1.17 maxv if (error)
928 1.17 maxv continue;
929 1.17 maxv ctl.nvcpus++;
930 1.17 maxv nvmm_vcpu_put(vcpu);
931 1.17 maxv }
932 1.25 maxv
933 1.25 maxv ctl.nram = 0;
934 1.25 maxv for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
935 1.25 maxv if (!mach->hmap[i].present)
936 1.25 maxv continue;
937 1.25 maxv ctl.nram += mach->hmap[i].size;
938 1.25 maxv }
939 1.25 maxv
940 1.17 maxv ctl.pid = mach->owner->pid;
941 1.17 maxv ctl.time = mach->time;
942 1.17 maxv
943 1.17 maxv nvmm_machine_put(mach);
944 1.17 maxv
945 1.17 maxv error = copyout(&ctl, args->data, sizeof(ctl));
946 1.17 maxv if (error)
947 1.17 maxv return error;
948 1.17 maxv
949 1.17 maxv return 0;
950 1.17 maxv }
951 1.17 maxv
952 1.17 maxv static int
953 1.17 maxv nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
954 1.17 maxv {
955 1.17 maxv switch (args->op) {
956 1.17 maxv case NVMM_CTL_MACH_INFO:
957 1.24 maxv return nvmm_ctl_mach_info(owner, args);
958 1.17 maxv default:
959 1.17 maxv return EINVAL;
960 1.17 maxv }
961 1.17 maxv }
962 1.17 maxv
963 1.17 maxv /* -------------------------------------------------------------------------- */
964 1.17 maxv
965 1.31 maxv static const struct nvmm_impl *
966 1.31 maxv nvmm_ident(void)
967 1.31 maxv {
968 1.31 maxv size_t i;
969 1.31 maxv
970 1.31 maxv for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
971 1.31 maxv if ((*nvmm_impl_list[i]->ident)())
972 1.31 maxv return nvmm_impl_list[i];
973 1.31 maxv }
974 1.31 maxv
975 1.31 maxv return NULL;
976 1.31 maxv }
977 1.31 maxv
978 1.17 maxv static int
979 1.1 maxv nvmm_init(void)
980 1.1 maxv {
981 1.1 maxv size_t i, n;
982 1.1 maxv
983 1.31 maxv nvmm_impl = nvmm_ident();
984 1.31 maxv if (nvmm_impl == NULL)
985 1.1 maxv return ENOTSUP;
986 1.1 maxv
987 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
988 1.1 maxv machines[i].machid = i;
989 1.1 maxv rw_init(&machines[i].lock);
990 1.1 maxv for (n = 0; n < NVMM_MAX_VCPUS; n++) {
991 1.18 maxv machines[i].cpus[n].present = false;
992 1.18 maxv machines[i].cpus[n].cpuid = n;
993 1.1 maxv mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
994 1.1 maxv IPL_NONE);
995 1.1 maxv }
996 1.1 maxv }
997 1.1 maxv
998 1.1 maxv (*nvmm_impl->init)();
999 1.1 maxv
1000 1.1 maxv return 0;
1001 1.1 maxv }
1002 1.1 maxv
1003 1.1 maxv static void
1004 1.1 maxv nvmm_fini(void)
1005 1.1 maxv {
1006 1.1 maxv size_t i, n;
1007 1.1 maxv
1008 1.1 maxv for (i = 0; i < NVMM_MAX_MACHINES; i++) {
1009 1.1 maxv rw_destroy(&machines[i].lock);
1010 1.1 maxv for (n = 0; n < NVMM_MAX_VCPUS; n++) {
1011 1.1 maxv mutex_destroy(&machines[i].cpus[n].lock);
1012 1.1 maxv }
1013 1.1 maxv }
1014 1.1 maxv
1015 1.1 maxv (*nvmm_impl->fini)();
1016 1.29 maxv nvmm_impl = NULL;
1017 1.1 maxv }
1018 1.1 maxv
1019 1.1 maxv /* -------------------------------------------------------------------------- */
1020 1.1 maxv
1021 1.14 maxv static dev_type_open(nvmm_open);
1022 1.14 maxv
1023 1.14 maxv const struct cdevsw nvmm_cdevsw = {
1024 1.14 maxv .d_open = nvmm_open,
1025 1.14 maxv .d_close = noclose,
1026 1.14 maxv .d_read = noread,
1027 1.14 maxv .d_write = nowrite,
1028 1.14 maxv .d_ioctl = noioctl,
1029 1.14 maxv .d_stop = nostop,
1030 1.14 maxv .d_tty = notty,
1031 1.14 maxv .d_poll = nopoll,
1032 1.14 maxv .d_mmap = nommap,
1033 1.14 maxv .d_kqfilter = nokqfilter,
1034 1.14 maxv .d_discard = nodiscard,
1035 1.14 maxv .d_flag = D_OTHER | D_MPSAFE
1036 1.14 maxv };
1037 1.14 maxv
1038 1.14 maxv static int nvmm_ioctl(file_t *, u_long, void *);
1039 1.14 maxv static int nvmm_close(file_t *);
1040 1.19 maxv static int nvmm_mmap(file_t *, off_t *, size_t, int, int *, int *,
1041 1.19 maxv struct uvm_object **, int *);
1042 1.14 maxv
1043 1.14 maxv const struct fileops nvmm_fileops = {
1044 1.14 maxv .fo_read = fbadop_read,
1045 1.14 maxv .fo_write = fbadop_write,
1046 1.14 maxv .fo_ioctl = nvmm_ioctl,
1047 1.14 maxv .fo_fcntl = fnullop_fcntl,
1048 1.14 maxv .fo_poll = fnullop_poll,
1049 1.14 maxv .fo_stat = fbadop_stat,
1050 1.14 maxv .fo_close = nvmm_close,
1051 1.14 maxv .fo_kqfilter = fnullop_kqfilter,
1052 1.14 maxv .fo_restart = fnullop_restart,
1053 1.19 maxv .fo_mmap = nvmm_mmap,
1054 1.14 maxv };
1055 1.14 maxv
1056 1.1 maxv static int
1057 1.1 maxv nvmm_open(dev_t dev, int flags, int type, struct lwp *l)
1058 1.1 maxv {
1059 1.14 maxv struct nvmm_owner *owner;
1060 1.14 maxv struct file *fp;
1061 1.14 maxv int error, fd;
1062 1.14 maxv
1063 1.26 maxv if (__predict_false(nvmm_impl == NULL))
1064 1.26 maxv return ENXIO;
1065 1.14 maxv if (minor(dev) != 0)
1066 1.1 maxv return EXDEV;
1067 1.23 maxv if (!(flags & O_CLOEXEC))
1068 1.23 maxv return EINVAL;
1069 1.14 maxv error = fd_allocfile(&fp, &fd);
1070 1.14 maxv if (error)
1071 1.14 maxv return error;
1072 1.14 maxv
1073 1.24 maxv if (OFLAGS(flags) & O_WRONLY) {
1074 1.24 maxv owner = &root_owner;
1075 1.24 maxv } else {
1076 1.24 maxv owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1077 1.24 maxv owner->pid = l->l_proc->p_pid;
1078 1.24 maxv }
1079 1.1 maxv
1080 1.14 maxv return fd_clone(fp, fd, flags, &nvmm_fileops, owner);
1081 1.1 maxv }
1082 1.1 maxv
1083 1.1 maxv static int
1084 1.14 maxv nvmm_close(file_t *fp)
1085 1.1 maxv {
1086 1.14 maxv struct nvmm_owner *owner = fp->f_data;
1087 1.1 maxv
1088 1.14 maxv KASSERT(owner != NULL);
1089 1.14 maxv nvmm_kill_machines(owner);
1090 1.24 maxv if (owner != &root_owner) {
1091 1.24 maxv kmem_free(owner, sizeof(*owner));
1092 1.24 maxv }
1093 1.14 maxv fp->f_data = NULL;
1094 1.1 maxv
1095 1.14 maxv return 0;
1096 1.1 maxv }
1097 1.1 maxv
1098 1.1 maxv static int
1099 1.19 maxv nvmm_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
1100 1.19 maxv int *advicep, struct uvm_object **uobjp, int *maxprotp)
1101 1.19 maxv {
1102 1.19 maxv struct nvmm_owner *owner = fp->f_data;
1103 1.19 maxv struct nvmm_machine *mach;
1104 1.19 maxv nvmm_machid_t machid;
1105 1.19 maxv nvmm_cpuid_t cpuid;
1106 1.19 maxv int error;
1107 1.19 maxv
1108 1.19 maxv if (prot & PROT_EXEC)
1109 1.19 maxv return EACCES;
1110 1.19 maxv if (size != PAGE_SIZE)
1111 1.19 maxv return EINVAL;
1112 1.19 maxv
1113 1.19 maxv cpuid = NVMM_COMM_CPUID(*offp);
1114 1.19 maxv if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1115 1.19 maxv return EINVAL;
1116 1.19 maxv
1117 1.19 maxv machid = NVMM_COMM_MACHID(*offp);
1118 1.19 maxv error = nvmm_machine_get(owner, machid, &mach, false);
1119 1.19 maxv if (error)
1120 1.19 maxv return error;
1121 1.19 maxv
1122 1.19 maxv uao_reference(mach->commuobj);
1123 1.19 maxv *uobjp = mach->commuobj;
1124 1.19 maxv *offp = cpuid * PAGE_SIZE;
1125 1.19 maxv *maxprotp = prot;
1126 1.19 maxv *advicep = UVM_ADV_RANDOM;
1127 1.19 maxv
1128 1.19 maxv nvmm_machine_put(mach);
1129 1.19 maxv return 0;
1130 1.19 maxv }
1131 1.19 maxv
1132 1.19 maxv static int
1133 1.14 maxv nvmm_ioctl(file_t *fp, u_long cmd, void *data)
1134 1.1 maxv {
1135 1.14 maxv struct nvmm_owner *owner = fp->f_data;
1136 1.14 maxv
1137 1.14 maxv KASSERT(owner != NULL);
1138 1.1 maxv
1139 1.1 maxv switch (cmd) {
1140 1.1 maxv case NVMM_IOC_CAPABILITY:
1141 1.14 maxv return nvmm_capability(owner, data);
1142 1.1 maxv case NVMM_IOC_MACHINE_CREATE:
1143 1.14 maxv return nvmm_machine_create(owner, data);
1144 1.1 maxv case NVMM_IOC_MACHINE_DESTROY:
1145 1.14 maxv return nvmm_machine_destroy(owner, data);
1146 1.1 maxv case NVMM_IOC_MACHINE_CONFIGURE:
1147 1.14 maxv return nvmm_machine_configure(owner, data);
1148 1.1 maxv case NVMM_IOC_VCPU_CREATE:
1149 1.14 maxv return nvmm_vcpu_create(owner, data);
1150 1.1 maxv case NVMM_IOC_VCPU_DESTROY:
1151 1.14 maxv return nvmm_vcpu_destroy(owner, data);
1152 1.23 maxv case NVMM_IOC_VCPU_CONFIGURE:
1153 1.23 maxv return nvmm_vcpu_configure(owner, data);
1154 1.1 maxv case NVMM_IOC_VCPU_SETSTATE:
1155 1.14 maxv return nvmm_vcpu_setstate(owner, data);
1156 1.1 maxv case NVMM_IOC_VCPU_GETSTATE:
1157 1.14 maxv return nvmm_vcpu_getstate(owner, data);
1158 1.1 maxv case NVMM_IOC_VCPU_INJECT:
1159 1.14 maxv return nvmm_vcpu_inject(owner, data);
1160 1.1 maxv case NVMM_IOC_VCPU_RUN:
1161 1.14 maxv return nvmm_vcpu_run(owner, data);
1162 1.1 maxv case NVMM_IOC_GPA_MAP:
1163 1.14 maxv return nvmm_gpa_map(owner, data);
1164 1.1 maxv case NVMM_IOC_GPA_UNMAP:
1165 1.14 maxv return nvmm_gpa_unmap(owner, data);
1166 1.4 maxv case NVMM_IOC_HVA_MAP:
1167 1.14 maxv return nvmm_hva_map(owner, data);
1168 1.4 maxv case NVMM_IOC_HVA_UNMAP:
1169 1.14 maxv return nvmm_hva_unmap(owner, data);
1170 1.17 maxv case NVMM_IOC_CTL:
1171 1.17 maxv return nvmm_ctl(owner, data);
1172 1.1 maxv default:
1173 1.1 maxv return EINVAL;
1174 1.1 maxv }
1175 1.1 maxv }
1176 1.1 maxv
1177 1.14 maxv /* -------------------------------------------------------------------------- */
1178 1.1 maxv
1179 1.31 maxv static int nvmm_match(device_t, cfdata_t, void *);
1180 1.31 maxv static void nvmm_attach(device_t, device_t, void *);
1181 1.31 maxv static int nvmm_detach(device_t, int);
1182 1.31 maxv
1183 1.31 maxv extern struct cfdriver nvmm_cd;
1184 1.31 maxv
1185 1.31 maxv CFATTACH_DECL_NEW(nvmm, 0, nvmm_match, nvmm_attach, nvmm_detach, NULL);
1186 1.31 maxv
1187 1.31 maxv static struct cfdata nvmm_cfdata[] = {
1188 1.31 maxv {
1189 1.31 maxv .cf_name = "nvmm",
1190 1.31 maxv .cf_atname = "nvmm",
1191 1.31 maxv .cf_unit = 0,
1192 1.31 maxv .cf_fstate = FSTATE_STAR,
1193 1.31 maxv .cf_loc = NULL,
1194 1.31 maxv .cf_flags = 0,
1195 1.31 maxv .cf_pspec = NULL,
1196 1.31 maxv },
1197 1.31 maxv { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL }
1198 1.31 maxv };
1199 1.31 maxv
1200 1.31 maxv static int
1201 1.31 maxv nvmm_match(device_t self, cfdata_t cfdata, void *arg)
1202 1.31 maxv {
1203 1.31 maxv return 1;
1204 1.31 maxv }
1205 1.31 maxv
1206 1.31 maxv static void
1207 1.31 maxv nvmm_attach(device_t parent, device_t self, void *aux)
1208 1.31 maxv {
1209 1.31 maxv int error;
1210 1.31 maxv
1211 1.31 maxv error = nvmm_init();
1212 1.31 maxv if (error)
1213 1.31 maxv panic("%s: impossible", __func__);
1214 1.32 maxv aprint_normal_dev(self, "attached, using backend %s\n",
1215 1.32 maxv nvmm_impl->name);
1216 1.31 maxv }
1217 1.31 maxv
1218 1.31 maxv static int
1219 1.31 maxv nvmm_detach(device_t self, int flags)
1220 1.31 maxv {
1221 1.31 maxv if (nmachines > 0)
1222 1.31 maxv return EBUSY;
1223 1.31 maxv nvmm_fini();
1224 1.31 maxv return 0;
1225 1.31 maxv }
1226 1.31 maxv
1227 1.1 maxv void
1228 1.1 maxv nvmmattach(int nunits)
1229 1.1 maxv {
1230 1.1 maxv /* nothing */
1231 1.1 maxv }
1232 1.1 maxv
1233 1.16 maxv MODULE(MODULE_CLASS_MISC, nvmm, NULL);
1234 1.1 maxv
1235 1.31 maxv #if defined(_MODULE)
1236 1.31 maxv CFDRIVER_DECL(nvmm, DV_VIRTUAL, NULL);
1237 1.31 maxv #endif
1238 1.31 maxv
1239 1.1 maxv static int
1240 1.1 maxv nvmm_modcmd(modcmd_t cmd, void *arg)
1241 1.1 maxv {
1242 1.31 maxv #if defined(_MODULE)
1243 1.31 maxv devmajor_t bmajor = NODEVMAJOR;
1244 1.31 maxv devmajor_t cmajor = 345;
1245 1.31 maxv #endif
1246 1.1 maxv int error;
1247 1.1 maxv
1248 1.1 maxv switch (cmd) {
1249 1.1 maxv case MODULE_CMD_INIT:
1250 1.31 maxv if (nvmm_ident() == NULL) {
1251 1.31 maxv aprint_error("%s: cpu not supported\n",
1252 1.31 maxv nvmm_cd.cd_name);
1253 1.31 maxv return ENOTSUP;
1254 1.31 maxv }
1255 1.31 maxv #if defined(_MODULE)
1256 1.31 maxv error = config_cfdriver_attach(&nvmm_cd);
1257 1.1 maxv if (error)
1258 1.1 maxv return error;
1259 1.31 maxv #endif
1260 1.31 maxv error = config_cfattach_attach(nvmm_cd.cd_name, &nvmm_ca);
1261 1.31 maxv if (error) {
1262 1.31 maxv config_cfdriver_detach(&nvmm_cd);
1263 1.31 maxv aprint_error("%s: config_cfattach_attach failed\n",
1264 1.31 maxv nvmm_cd.cd_name);
1265 1.31 maxv return error;
1266 1.31 maxv }
1267 1.31 maxv
1268 1.31 maxv error = config_cfdata_attach(nvmm_cfdata, 1);
1269 1.31 maxv if (error) {
1270 1.31 maxv config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1271 1.31 maxv config_cfdriver_detach(&nvmm_cd);
1272 1.31 maxv aprint_error("%s: unable to register cfdata\n",
1273 1.31 maxv nvmm_cd.cd_name);
1274 1.31 maxv return error;
1275 1.31 maxv }
1276 1.31 maxv
1277 1.31 maxv if (config_attach_pseudo(nvmm_cfdata) == NULL) {
1278 1.31 maxv aprint_error("%s: config_attach_pseudo failed\n",
1279 1.31 maxv nvmm_cd.cd_name);
1280 1.31 maxv config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1281 1.31 maxv config_cfdriver_detach(&nvmm_cd);
1282 1.31 maxv return ENXIO;
1283 1.31 maxv }
1284 1.1 maxv
1285 1.1 maxv #if defined(_MODULE)
1286 1.31 maxv /* mknod /dev/nvmm c 345 0 */
1287 1.31 maxv error = devsw_attach(nvmm_cd.cd_name, NULL, &bmajor,
1288 1.31 maxv &nvmm_cdevsw, &cmajor);
1289 1.31 maxv if (error) {
1290 1.31 maxv aprint_error("%s: unable to register devsw\n",
1291 1.31 maxv nvmm_cd.cd_name);
1292 1.31 maxv config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1293 1.31 maxv config_cfdriver_detach(&nvmm_cd);
1294 1.31 maxv return error;
1295 1.1 maxv }
1296 1.1 maxv #endif
1297 1.1 maxv return 0;
1298 1.1 maxv case MODULE_CMD_FINI:
1299 1.31 maxv error = config_cfdata_detach(nvmm_cfdata);
1300 1.31 maxv if (error)
1301 1.31 maxv return error;
1302 1.31 maxv error = config_cfattach_detach(nvmm_cd.cd_name, &nvmm_ca);
1303 1.31 maxv if (error)
1304 1.31 maxv return error;
1305 1.1 maxv #if defined(_MODULE)
1306 1.31 maxv config_cfdriver_detach(&nvmm_cd);
1307 1.31 maxv devsw_detach(NULL, &nvmm_cdevsw);
1308 1.1 maxv #endif
1309 1.1 maxv return 0;
1310 1.13 maxv case MODULE_CMD_AUTOUNLOAD:
1311 1.13 maxv return EBUSY;
1312 1.1 maxv default:
1313 1.1 maxv return ENOTTY;
1314 1.1 maxv }
1315 1.1 maxv }
1316