sys_machdep.c revision 1.12 1 /* $NetBSD: sys_machdep.c,v 1.12 2008/04/21 12:56:31 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.12 2008/04/21 12:56:31 ad Exp $");
41
42 #include "opt_compat_netbsd.h"
43 #include "opt_mtrr.h"
44 #include "opt_perfctrs.h"
45 #include "opt_user_ldt.h"
46 #include "opt_vm86.h"
47 #include "opt_xen.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/ioctl.h>
52 #include <sys/file.h>
53 #include <sys/time.h>
54 #include <sys/proc.h>
55 #include <sys/user.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/buf.h>
59 #include <sys/signal.h>
60 #include <sys/malloc.h>
61 #include <sys/kmem.h>
62 #include <sys/kauth.h>
63
64 #include <sys/mount.h>
65 #include <sys/syscallargs.h>
66
67 #include <uvm/uvm_extern.h>
68
69 #include <machine/cpu.h>
70 #include <machine/cpufunc.h>
71 #include <machine/gdt.h>
72 #include <machine/psl.h>
73 #include <machine/reg.h>
74 #include <machine/sysarch.h>
75 #include <machine/mtrr.h>
76
77 #ifdef __x86_64__
78 /* Need to be checked. */
79 #undef USER_LDT
80 #undef PERFCTRS
81 #undef VM86
82 #undef IOPERM
83 #else
84 #if defined(XEN)
85 #undef IOPERM
86 #else /* defined(XEN) */
87 #define IOPERM
88 #endif /* defined(XEN) */
89 #endif
90
91 #ifdef VM86
92 #include <machine/vm86.h>
93 #endif
94
95 #ifdef PERFCTRS
96 #include <machine/pmc.h>
97 #endif
98
99 extern struct vm_map *kernel_map;
100
101 int x86_get_ioperm(struct lwp *, void *, register_t *);
102 int x86_set_ioperm(struct lwp *, void *, register_t *);
103 int x86_get_mtrr(struct lwp *, void *, register_t *);
104 int x86_set_mtrr(struct lwp *, void *, register_t *);
105 int x86_set_sdbase(void *arg, char which);
106 int x86_get_sdbase(void *arg, char which);
107
108 #ifdef LDT_DEBUG
109 static void x86_print_ldt(int, const struct segment_descriptor *);
110
111 static void
112 x86_print_ldt(int i, const struct segment_descriptor *d)
113 {
114 printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
115 "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
116 i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
117 d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
118 }
119 #endif
120
121 int
122 x86_get_ldt_len(struct lwp *l)
123 {
124 #ifndef USER_LDT
125 return -1;
126 #else
127 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
128 int nldt;
129
130 mutex_enter(&pmap->pm_lock);
131
132 if (pmap->pm_flags & PMF_USER_LDT) {
133 nldt = pmap->pm_ldt_len;
134 } else {
135 nldt = NLDT;
136 }
137 mutex_exit(&pmap->pm_lock);
138 return nldt;
139 #endif
140 }
141
142
143 int
144 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
145 {
146 #ifndef USER_LDT
147 return EINVAL;
148 #else
149 struct x86_get_ldt_args ua;
150 union descriptor *cp;
151 int error;
152
153 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
154 return error;
155
156 if (ua.num < 0 || ua.num > 8192)
157 return EINVAL;
158
159 cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
160 if (cp == NULL)
161 return ENOMEM;
162
163 error = x86_get_ldt1(l, &ua, cp);
164 *retval = ua.num;
165 if (error == 0)
166 error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
167
168 free(cp, M_TEMP);
169 return error;
170 #endif
171 }
172
173 int
174 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
175 {
176 #ifndef USER_LDT
177 return EINVAL;
178 #else
179 int error;
180 struct proc *p = l->l_proc;
181 pmap_t pmap = p->p_vmspace->vm_map.pmap;
182 int nldt, num;
183 union descriptor *lp;
184
185 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
186 NULL, NULL, NULL, NULL);
187 if (error)
188 return (error);
189
190 #ifdef LDT_DEBUG
191 printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
192 ua->num, ua->desc);
193 #endif
194
195 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
196 ua->start + ua->num > 8192)
197 return (EINVAL);
198
199 mutex_enter(&pmap->pm_lock);
200
201 if (pmap->pm_flags & PMF_USER_LDT) {
202 nldt = pmap->pm_ldt_len;
203 lp = pmap->pm_ldt;
204 } else {
205 nldt = NLDT;
206 lp = ldt;
207 }
208
209 if (ua->start > nldt) {
210 mutex_exit(&pmap->pm_lock);
211 return (EINVAL);
212 }
213
214 lp += ua->start;
215 num = min(ua->num, nldt - ua->start);
216 ua->num = num;
217 #ifdef LDT_DEBUG
218 {
219 int i;
220 for (i = 0; i < num; i++)
221 x86_print_ldt(i, &lp[i].sd);
222 }
223 #endif
224
225 memcpy(cp, lp, num * sizeof(union descriptor));
226 mutex_exit(&pmap->pm_lock);
227
228 return 0;
229 #endif
230 }
231
232 int
233 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
234 {
235 #ifndef USER_LDT
236 return EINVAL;
237 #else
238 struct x86_set_ldt_args ua;
239 union descriptor *descv;
240 int error;
241
242 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
243 return (error);
244
245 if (ua.num < 0 || ua.num > 8192)
246 return EINVAL;
247
248 descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
249 if (descv == NULL)
250 return ENOMEM;
251
252 error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
253 if (error == 0)
254 error = x86_set_ldt1(l, &ua, descv);
255 *retval = ua.start;
256
257 free(descv, M_TEMP);
258 return error;
259 #endif
260 }
261
262 int
263 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
264 union descriptor *descv)
265 {
266 #ifndef USER_LDT
267 return EINVAL;
268 #else
269 int error, i, n, sel, free_sel;
270 struct proc *p = l->l_proc;
271 struct pcb *pcb = &l->l_addr->u_pcb;
272 pmap_t pmap = p->p_vmspace->vm_map.pmap;
273 size_t old_len, new_len, ldt_len, free_len;
274 union descriptor *old_ldt, *new_ldt, *free_ldt;
275
276 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
277 NULL, NULL, NULL, NULL);
278 if (error)
279 return (error);
280
281 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
282 ua->start + ua->num > 8192)
283 return (EINVAL);
284
285 /* Check descriptors for access violations. */
286 for (i = 0; i < ua->num; i++) {
287 union descriptor *desc = &descv[i];
288
289 switch (desc->sd.sd_type) {
290 case SDT_SYSNULL:
291 desc->sd.sd_p = 0;
292 break;
293 case SDT_SYS286CGT:
294 case SDT_SYS386CGT:
295 /*
296 * Only allow call gates targeting a segment
297 * in the LDT or a user segment in the fixed
298 * part of the gdt. Segments in the LDT are
299 * constrained (below) to be user segments.
300 */
301 if (desc->gd.gd_p != 0 &&
302 !ISLDT(desc->gd.gd_selector) &&
303 ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
304 (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
305 SEL_UPL))) {
306 return EACCES;
307 }
308 break;
309 case SDT_MEMEC:
310 case SDT_MEMEAC:
311 case SDT_MEMERC:
312 case SDT_MEMERAC:
313 /* Must be "present" if executable and conforming. */
314 if (desc->sd.sd_p == 0)
315 return EACCES;
316 break;
317 case SDT_MEMRO:
318 case SDT_MEMROA:
319 case SDT_MEMRW:
320 case SDT_MEMRWA:
321 case SDT_MEMROD:
322 case SDT_MEMRODA:
323 case SDT_MEMRWD:
324 case SDT_MEMRWDA:
325 case SDT_MEME:
326 case SDT_MEMEA:
327 case SDT_MEMER:
328 case SDT_MEMERA:
329 break;
330 default:
331 /*
332 * Make sure that unknown descriptor types are
333 * not marked present.
334 */
335 if (desc->sd.sd_p != 0)
336 return EACCES;
337 break;
338 }
339
340 if (desc->sd.sd_p != 0) {
341 /* Only user (ring-3) descriptors may be present. */
342 if (desc->sd.sd_dpl != SEL_UPL)
343 return EACCES;
344 }
345 }
346
347 /* allocate user ldt */
348 free_sel = -1;
349 new_ldt = NULL;
350 new_len = 0;
351 free_ldt = NULL;
352 free_len = 0;
353 mutex_enter(&pmap->pm_lock);
354 if (pmap->pm_ldt == 0 || (ua->start + ua->num) > pmap->pm_ldt_len) {
355 if (pmap->pm_flags & PMF_USER_LDT)
356 ldt_len = pmap->pm_ldt_len;
357 else
358 ldt_len = 512;
359 while ((ua->start + ua->num) > ldt_len)
360 ldt_len *= 2;
361 new_len = ldt_len * sizeof(union descriptor);
362
363 mutex_exit(&pmap->pm_lock);
364 new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
365 new_len, 0, UVM_KMF_WIRED);
366 memset(new_ldt, 0, new_len);
367 sel = ldt_alloc(new_ldt, new_len);
368 mutex_enter(&pmap->pm_lock);
369
370 if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
371 /*
372 * Another thread (re)allocated the LDT to
373 * sufficient size while we were blocked in
374 * uvm_km_alloc. Oh well. The new entries
375 * will quite probably not be right, but
376 * hey.. not our problem if user applications
377 * have race conditions like that.
378 */
379 goto copy;
380 }
381
382 old_ldt = pmap->pm_ldt;
383 free_ldt = old_ldt;
384 free_len = pmap->pm_ldt_len * sizeof(union descriptor);
385
386 if (old_ldt != NULL) {
387 old_len = pmap->pm_ldt_len * sizeof(union descriptor);
388 } else {
389 old_len = NLDT * sizeof(union descriptor);
390 old_ldt = ldt;
391 }
392
393 memcpy(new_ldt, old_ldt, old_len);
394 memset((char *)new_ldt + old_len, 0, new_len - old_len);
395
396 pmap->pm_ldt = new_ldt;
397 pmap->pm_ldt_len = ldt_len;
398
399 if (pmap->pm_flags & PMF_USER_LDT)
400 free_sel = pmap->pm_ldt_sel;
401 else {
402 pmap->pm_flags |= PMF_USER_LDT;
403 free_sel = -1;
404 }
405 pmap->pm_ldt_sel = sel;
406 pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
407 if (pcb == curpcb)
408 lldt(pcb->pcb_ldt_sel);
409 new_ldt = NULL;
410 }
411 copy:
412 /* Now actually replace the descriptors. */
413 for (i = 0, n = ua->start; i < ua->num; i++, n++)
414 pmap->pm_ldt[n] = descv[i];
415
416 mutex_exit(&pmap->pm_lock);
417
418 if (new_ldt != NULL)
419 uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
420 UVM_KMF_WIRED);
421 if (free_sel != -1)
422 ldt_free(free_sel);
423 if (free_ldt != NULL)
424 uvm_km_free(kernel_map, (vaddr_t)free_ldt, free_len,
425 UVM_KMF_WIRED);
426
427 return (error);
428 #endif
429 }
430
431 int
432 x86_iopl(struct lwp *l, void *args, register_t *retval)
433 {
434 int error;
435 struct x86_iopl_args ua;
436 #ifdef XEN
437 int iopl;
438 #else
439 struct trapframe *tf = l->l_md.md_regs;
440 #endif
441
442 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
443 NULL, NULL, NULL, NULL);
444 if (error)
445 return (error);
446
447 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
448 return error;
449
450 #ifdef XEN
451 if (ua.iopl)
452 iopl = SEL_UPL;
453 else
454 iopl = SEL_KPL;
455 l->l_addr->u_pcb.pcb_iopl = iopl;
456 /* Force the change at ring 0. */
457 #ifdef XEN3
458 {
459 struct physdev_op physop;
460 physop.cmd = PHYSDEVOP_SET_IOPL;
461 physop.u.set_iopl.iopl = iopl;
462 HYPERVISOR_physdev_op(&physop);
463 }
464 #else /* XEN3 */
465 {
466 dom0_op_t op;
467 op.cmd = DOM0_IOPL;
468 op.u.iopl.domain = DOMID_SELF;
469 op.u.iopl.iopl = iopl;
470 HYPERVISOR_dom0_op(&op);
471 }
472 #endif /* XEN3 */
473 #elif defined(__x86_64__)
474 if (ua.iopl)
475 tf->tf_rflags |= PSL_IOPL;
476 else
477 tf->tf_rflags &= ~PSL_IOPL;
478 #else
479 if (ua.iopl)
480 tf->tf_eflags |= PSL_IOPL;
481 else
482 tf->tf_eflags &= ~PSL_IOPL;
483 #endif
484
485 return 0;
486 }
487
488 int
489 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
490 {
491 #ifdef IOPERM
492 int error;
493 struct pcb *pcb = &l->l_addr->u_pcb;
494 struct x86_get_ioperm_args ua;
495 void *dummymap = NULL;
496 void *iomap;
497
498 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
499 NULL, NULL, NULL, NULL);
500 if (error)
501 return (error);
502
503 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
504 return (error);
505
506 iomap = pcb->pcb_iomap;
507 if (iomap == NULL) {
508 iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
509 memset(dummymap, 0xff, IOMAPSIZE);
510 }
511 error = copyout(iomap, ua.iomap, IOMAPSIZE);
512 if (dummymap != NULL) {
513 kmem_free(dummymap, IOMAPSIZE);
514 }
515 return error;
516 #else
517 return EINVAL;
518 #endif
519 }
520
521 int
522 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
523 {
524 #ifdef IOPERM
525 struct cpu_info *ci;
526 int error;
527 struct pcb *pcb = &l->l_addr->u_pcb;
528 struct x86_set_ioperm_args ua;
529 void *new;
530 void *old;
531
532 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
533 NULL, NULL, NULL, NULL);
534 if (error)
535 return (error);
536
537 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
538 return (error);
539
540 new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
541 error = copyin(ua.iomap, new, IOMAPSIZE);
542 if (error) {
543 kmem_free(new, IOMAPSIZE);
544 return error;
545 }
546 old = pcb->pcb_iomap;
547 pcb->pcb_iomap = new;
548 if (old != NULL) {
549 kmem_free(old, IOMAPSIZE);
550 }
551
552 crit_enter();
553 ci = curcpu();
554 memcpy(ci->ci_iomap, pcb->pcb_iomap, sizeof(ci->ci_iomap));
555 ci->ci_tss.tss_iobase =
556 ((uintptr_t)ci->ci_iomap - (uintptr_t)&ci->ci_tss) << 16;
557 crit_exit();
558
559 return error;
560 #else
561 return EINVAL;
562 #endif
563 }
564
565 int
566 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
567 {
568 #ifdef MTRR
569 struct x86_get_mtrr_args ua;
570 int error, n;
571
572 if (mtrr_funcs == NULL)
573 return ENOSYS;
574
575 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
576 NULL, NULL, NULL, NULL);
577 if (error)
578 return (error);
579
580 error = copyin(args, &ua, sizeof ua);
581 if (error != 0)
582 return error;
583
584 error = copyin(ua.n, &n, sizeof n);
585 if (error != 0)
586 return error;
587
588 KERNEL_LOCK(1, NULL);
589 error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
590 KERNEL_UNLOCK_ONE(NULL);
591
592 copyout(&n, ua.n, sizeof (int));
593
594 return error;
595 #else
596 return EINVAL;
597 #endif
598 }
599
600 int
601 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
602 {
603 #ifdef MTRR
604 int error, n;
605 struct x86_set_mtrr_args ua;
606
607 if (mtrr_funcs == NULL)
608 return ENOSYS;
609
610 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
611 NULL, NULL, NULL, NULL);
612 if (error)
613 return (error);
614
615 error = copyin(args, &ua, sizeof ua);
616 if (error != 0)
617 return error;
618
619 error = copyin(ua.n, &n, sizeof n);
620 if (error != 0)
621 return error;
622
623 KERNEL_LOCK(1, NULL);
624 error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
625 if (n != 0)
626 mtrr_commit();
627 KERNEL_UNLOCK_ONE(NULL);
628
629 copyout(&n, ua.n, sizeof n);
630
631 return error;
632 #else
633 return EINVAL;
634 #endif
635 }
636
637 int
638 x86_set_sdbase(void *arg, char which)
639 {
640 #ifdef i386
641 struct segment_descriptor sd;
642 vaddr_t base;
643 int error;
644
645 error = copyin(arg, &base, sizeof(base));
646 if (error != 0)
647 return error;
648
649 sd.sd_lobase = base & 0xffffff;
650 sd.sd_hibase = (base >> 24) & 0xff;
651 sd.sd_lolimit = 0xffff;
652 sd.sd_hilimit = 0xf;
653 sd.sd_type = SDT_MEMRWA;
654 sd.sd_dpl = SEL_UPL;
655 sd.sd_p = 1;
656 sd.sd_xx = 0;
657 sd.sd_def32 = 1;
658 sd.sd_gran = 1;
659
660 crit_enter();
661 if (which == 'f') {
662 memcpy(&curpcb->pcb_fsd, &sd, sizeof(sd));
663 memcpy(&curcpu()->ci_gdt[GUFS_SEL], &sd, sizeof(sd));
664 } else /* which == 'g' */ {
665 memcpy(&curpcb->pcb_gsd, &sd, sizeof(sd));
666 memcpy(&curcpu()->ci_gdt[GUGS_SEL], &sd, sizeof(sd));
667 }
668 crit_exit();
669
670 return 0;
671 #else
672 return EINVAL;
673 #endif
674 }
675
676 int
677 x86_get_sdbase(void *arg, char which)
678 {
679 #ifdef i386
680 struct segment_descriptor *sd;
681 vaddr_t base;
682
683 switch (which) {
684 case 'f':
685 sd = (struct segment_descriptor *)&curpcb->pcb_fsd;
686 break;
687 case 'g':
688 sd = (struct segment_descriptor *)&curpcb->pcb_gsd;
689 break;
690 default:
691 panic("x86_get_sdbase");
692 }
693
694 base = sd->sd_hibase << 24 | sd->sd_lobase;
695 return copyout(&base, &arg, sizeof(base));
696 #else
697 return EINVAL;
698 #endif
699 }
700
701 int
702 sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
703 {
704 /* {
705 syscallarg(int) op;
706 syscallarg(void *) parms;
707 } */
708 int error = 0;
709
710 switch(SCARG(uap, op)) {
711 case X86_IOPL:
712 error = x86_iopl(l, SCARG(uap, parms), retval);
713 break;
714
715 case X86_GET_LDT:
716 error = x86_get_ldt(l, SCARG(uap, parms), retval);
717 break;
718
719 case X86_SET_LDT:
720 error = x86_set_ldt(l, SCARG(uap, parms), retval);
721 break;
722
723 case X86_GET_IOPERM:
724 error = x86_get_ioperm(l, SCARG(uap, parms), retval);
725 break;
726
727 case X86_SET_IOPERM:
728 error = x86_set_ioperm(l, SCARG(uap, parms), retval);
729 break;
730
731 case X86_GET_MTRR:
732 error = x86_get_mtrr(l, SCARG(uap, parms), retval);
733 break;
734 case X86_SET_MTRR:
735 error = x86_set_mtrr(l, SCARG(uap, parms), retval);
736 break;
737
738 #ifdef VM86
739 case X86_VM86:
740 error = x86_vm86(l, SCARG(uap, parms), retval);
741 break;
742 #ifdef COMPAT_16
743 case X86_OLD_VM86:
744 error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
745 break;
746 #endif
747 #endif
748
749 #ifdef PERFCTRS
750 case X86_PMC_INFO:
751 KERNEL_LOCK(1, NULL);
752 error = pmc_info(l, SCARG(uap, parms), retval);
753 KERNEL_UNLOCK_ONE(NULL);
754 break;
755
756 case X86_PMC_STARTSTOP:
757 KERNEL_LOCK(1, NULL);
758 error = pmc_startstop(l, SCARG(uap, parms), retval);
759 KERNEL_UNLOCK_ONE(NULL);
760 break;
761
762 case X86_PMC_READ:
763 KERNEL_LOCK(1, NULL);
764 error = pmc_read(l, SCARG(uap, parms), retval);
765 KERNEL_UNLOCK_ONE(NULL);
766 break;
767 #endif
768
769 case X86_SET_FSBASE:
770 error = x86_set_sdbase(SCARG(uap, parms), 'f');
771 break;
772
773 case X86_SET_GSBASE:
774 error = x86_set_sdbase(SCARG(uap, parms), 'g');
775 break;
776
777 case X86_GET_FSBASE:
778 error = x86_get_sdbase(SCARG(uap, parms), 'f');
779 break;
780
781 case X86_GET_GSBASE:
782 error = x86_get_sdbase(SCARG(uap, parms), 'g');
783 break;
784
785 default:
786 error = EINVAL;
787 break;
788 }
789 return (error);
790 }
791