Home | History | Annotate | Line # | Download | only in x86
sys_machdep.c revision 1.3
      1 /*	$NetBSD: sys_machdep.c,v 1.3 2007/08/29 23:38:06 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.3 2007/08/29 23:38:06 ad Exp $");
     41 
     42 #include "opt_compat_netbsd.h"
     43 #include "opt_mtrr.h"
     44 #include "opt_perfctrs.h"
     45 #include "opt_user_ldt.h"
     46 #include "opt_vm86.h"
     47 #include "opt_xen.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/ioctl.h>
     52 #include <sys/file.h>
     53 #include <sys/time.h>
     54 #include <sys/proc.h>
     55 #include <sys/user.h>
     56 #include <sys/uio.h>
     57 #include <sys/kernel.h>
     58 #include <sys/buf.h>
     59 #include <sys/signal.h>
     60 #include <sys/malloc.h>
     61 #include <sys/kauth.h>
     62 
     63 #include <sys/mount.h>
     64 #include <sys/syscallargs.h>
     65 
     66 #include <uvm/uvm_extern.h>
     67 
     68 #include <machine/cpu.h>
     69 #include <machine/cpufunc.h>
     70 #include <machine/gdt.h>
     71 #include <machine/psl.h>
     72 #include <machine/reg.h>
     73 #include <machine/sysarch.h>
     74 #include <machine/mtrr.h>
     75 
     76 #ifdef __x86_64__
     77 /* Need to be checked. */
     78 #undef	USER_LDT
     79 #undef	PERFCTRS
     80 #undef	VM86
     81 #undef	IOPERM
     82 #else
     83 #define	IOPERM
     84 #endif
     85 
     86 #ifdef VM86
     87 #include <machine/vm86.h>
     88 #endif
     89 
     90 #ifdef PERFCTRS
     91 #include <machine/pmc.h>
     92 #endif
     93 
     94 /* XXX needs changes from vmlocking branch */
     95 #define	mutex_enter(x)	/* nothing */
     96 #define	mutex_exit(x)	/* nothing */
     97 
     98 extern struct vm_map *kernel_map;
     99 
    100 int x86_get_ioperm(struct lwp *, void *, register_t *);
    101 int x86_set_ioperm(struct lwp *, void *, register_t *);
    102 int x86_get_mtrr(struct lwp *, void *, register_t *);
    103 int x86_set_mtrr(struct lwp *, void *, register_t *);
    104 
    105 #ifdef LDT_DEBUG
    106 static void x86_print_ldt(int, const struct segment_descriptor *);
    107 
    108 static void
    109 x86_print_ldt(int i, const struct segment_descriptor *d)
    110 {
    111 	printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
    112 	    "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
    113 	    i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
    114 	    d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
    115 }
    116 #endif
    117 
    118 int
    119 x86_get_ldt_len(struct lwp *l)
    120 {
    121 #ifndef USER_LDT
    122 	return -1;
    123 #else
    124 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    125 	int nldt;
    126 
    127 	mutex_enter(&pmap->pm_lock);
    128 
    129 	if (pmap->pm_flags & PMF_USER_LDT) {
    130 		nldt = pmap->pm_ldt_len;
    131 	} else {
    132 		nldt = NLDT;
    133 	}
    134 	mutex_exit(&pmap->pm_lock);
    135 	return nldt;
    136 #endif
    137 }
    138 
    139 
    140 int
    141 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
    142 {
    143 #ifndef USER_LDT
    144 	return EINVAL;
    145 #else
    146 	struct x86_get_ldt_args ua;
    147 	union descriptor *cp;
    148 	int error;
    149 
    150 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    151 		return error;
    152 
    153 	if (ua.num < 0 || ua.num > 8192)
    154 		return EINVAL;
    155 
    156 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
    157 	if (cp == NULL)
    158 		return ENOMEM;
    159 
    160 	error = x86_get_ldt1(l, &ua, cp);
    161 	*retval = ua.num;
    162 	if (error == 0)
    163 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
    164 
    165 	free(cp, M_TEMP);
    166 	return error;
    167 #endif
    168 }
    169 
    170 int
    171 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
    172 {
    173 #ifndef USER_LDT
    174 	return EINVAL;
    175 #else
    176 	int error;
    177 	struct proc *p = l->l_proc;
    178 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    179 	int nldt, num;
    180 	union descriptor *lp;
    181 
    182 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
    183 	    NULL, NULL, NULL, NULL);
    184 	if (error)
    185 		return (error);
    186 
    187 #ifdef	LDT_DEBUG
    188 	printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
    189 	    ua->num, ua->desc);
    190 #endif
    191 
    192 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    193 	    ua->start + ua->num > 8192)
    194 		return (EINVAL);
    195 
    196 	mutex_enter(&pmap->pm_lock);
    197 
    198 	if (pmap->pm_flags & PMF_USER_LDT) {
    199 		nldt = pmap->pm_ldt_len;
    200 		lp = pmap->pm_ldt;
    201 	} else {
    202 		nldt = NLDT;
    203 		lp = ldt;
    204 	}
    205 
    206 	if (ua->start > nldt) {
    207 		mutex_exit(&pmap->pm_lock);
    208 		return (EINVAL);
    209 	}
    210 
    211 	lp += ua->start;
    212 	num = min(ua->num, nldt - ua->start);
    213 	ua->num = num;
    214 #ifdef LDT_DEBUG
    215 	{
    216 		int i;
    217 		for (i = 0; i < num; i++)
    218 			x86_print_ldt(i, &lp[i].sd);
    219 	}
    220 #endif
    221 
    222 	memcpy(cp, lp, num * sizeof(union descriptor));
    223 	mutex_exit(&pmap->pm_lock);
    224 
    225 	return 0;
    226 #endif
    227 }
    228 
    229 int
    230 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
    231 {
    232 #ifndef USER_LDT
    233 	return EINVAL;
    234 #else
    235 	struct x86_set_ldt_args ua;
    236 	union descriptor *descv;
    237 	int error;
    238 
    239 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    240 		return (error);
    241 
    242 	if (ua.num < 0 || ua.num > 8192)
    243 		return EINVAL;
    244 
    245 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    246 	if (descv == NULL)
    247 		return ENOMEM;
    248 
    249 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
    250 	if (error == 0)
    251 		error = x86_set_ldt1(l, &ua, descv);
    252 	*retval = ua.start;
    253 
    254 	free(descv, M_TEMP);
    255 	return error;
    256 #endif
    257 }
    258 
    259 int
    260 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
    261     union descriptor *descv)
    262 {
    263 #ifndef USER_LDT
    264 	return EINVAL;
    265 #else
    266 	int error, i, n, sel, free_sel;
    267 	struct proc *p = l->l_proc;
    268 	struct pcb *pcb = &l->l_addr->u_pcb;
    269 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    270 	size_t old_len, new_len, ldt_len, free_len;
    271 	union descriptor *old_ldt, *new_ldt, *free_ldt;
    272 
    273 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
    274 	    NULL, NULL, NULL, NULL);
    275 	if (error)
    276 		return (error);
    277 
    278 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    279 	    ua->start + ua->num > 8192)
    280 		return (EINVAL);
    281 
    282 	/* Check descriptors for access violations. */
    283 	for (i = 0; i < ua->num; i++) {
    284 		union descriptor *desc = &descv[i];
    285 
    286 		switch (desc->sd.sd_type) {
    287 		case SDT_SYSNULL:
    288 			desc->sd.sd_p = 0;
    289 			break;
    290 		case SDT_SYS286CGT:
    291 		case SDT_SYS386CGT:
    292 			/*
    293 			 * Only allow call gates targeting a segment
    294 			 * in the LDT or a user segment in the fixed
    295 			 * part of the gdt.  Segments in the LDT are
    296 			 * constrained (below) to be user segments.
    297 			 */
    298 			if (desc->gd.gd_p != 0 &&
    299 			    !ISLDT(desc->gd.gd_selector) &&
    300 			    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
    301 			     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
    302 				 SEL_UPL))) {
    303 				return EACCES;
    304 			}
    305 			break;
    306 		case SDT_MEMEC:
    307 		case SDT_MEMEAC:
    308 		case SDT_MEMERC:
    309 		case SDT_MEMERAC:
    310 			/* Must be "present" if executable and conforming. */
    311 			if (desc->sd.sd_p == 0)
    312 				return EACCES;
    313 			break;
    314 		case SDT_MEMRO:
    315 		case SDT_MEMROA:
    316 		case SDT_MEMRW:
    317 		case SDT_MEMRWA:
    318 		case SDT_MEMROD:
    319 		case SDT_MEMRODA:
    320 		case SDT_MEMRWD:
    321 		case SDT_MEMRWDA:
    322 		case SDT_MEME:
    323 		case SDT_MEMEA:
    324 		case SDT_MEMER:
    325 		case SDT_MEMERA:
    326 			break;
    327 		default:
    328 			/*
    329 			 * Make sure that unknown descriptor types are
    330 			 * not marked present.
    331 			 */
    332 			if (desc->sd.sd_p != 0)
    333 				return EACCES;
    334 			break;
    335 		}
    336 
    337 		if (desc->sd.sd_p != 0) {
    338 			/* Only user (ring-3) descriptors may be present. */
    339 			if (desc->sd.sd_dpl != SEL_UPL)
    340 				return EACCES;
    341 		}
    342 	}
    343 
    344 	/* allocate user ldt */
    345 	free_sel = -1;
    346 	new_ldt = NULL;
    347 	new_len = 0;
    348 	free_ldt = NULL;
    349 	free_len = 0;
    350 	mutex_enter(&pmap->pm_lock);
    351 	if (pmap->pm_ldt == 0 || (ua->start + ua->num) > pmap->pm_ldt_len) {
    352 		if (pmap->pm_flags & PMF_USER_LDT)
    353 			ldt_len = pmap->pm_ldt_len;
    354 		else
    355 			ldt_len = 512;
    356 		while ((ua->start + ua->num) > ldt_len)
    357 			ldt_len *= 2;
    358 		new_len = ldt_len * sizeof(union descriptor);
    359 
    360 		mutex_exit(&pmap->pm_lock);
    361 		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
    362 		    new_len, 0, UVM_KMF_WIRED);
    363 		memset(new_ldt, 0, new_len);
    364 		sel = ldt_alloc(new_ldt, new_len);
    365 		mutex_enter(&pmap->pm_lock);
    366 
    367 		if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
    368 			/*
    369 			 * Another thread (re)allocated the LDT to
    370 			 * sufficient size while we were blocked in
    371 			 * uvm_km_alloc. Oh well. The new entries
    372 			 * will quite probably not be right, but
    373 			 * hey.. not our problem if user applications
    374 			 * have race conditions like that.
    375 			 */
    376 			goto copy;
    377 		}
    378 
    379 		old_ldt = pmap->pm_ldt;
    380 		free_ldt = old_ldt;
    381 		free_len = pmap->pm_ldt_len * sizeof(union descriptor);
    382 
    383 		if (old_ldt != NULL) {
    384 			old_len = pmap->pm_ldt_len * sizeof(union descriptor);
    385 		} else {
    386 			old_len = NLDT * sizeof(union descriptor);
    387 			old_ldt = ldt;
    388 		}
    389 
    390 		memcpy(new_ldt, old_ldt, old_len);
    391 		memset((char *)new_ldt + old_len, 0, new_len - old_len);
    392 
    393 		pmap->pm_ldt = new_ldt;
    394 		pmap->pm_ldt_len = ldt_len;
    395 
    396 		if (pmap->pm_flags & PMF_USER_LDT)
    397 			free_sel = pmap->pm_ldt_sel;
    398 		else {
    399 			pmap->pm_flags |= PMF_USER_LDT;
    400 			free_sel = -1;
    401 		}
    402 		pmap->pm_ldt_sel = sel;
    403 		pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
    404 		if (pcb == curpcb)
    405 			lldt(pcb->pcb_ldt_sel);
    406 		new_ldt = NULL;
    407 	}
    408 copy:
    409 	/* Now actually replace the descriptors. */
    410 	for (i = 0, n = ua->start; i < ua->num; i++, n++)
    411 		pmap->pm_ldt[n] = descv[i];
    412 
    413 	mutex_exit(&pmap->pm_lock);
    414 
    415 	if (new_ldt != NULL)
    416 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    417 		    UVM_KMF_WIRED);
    418 	if (free_sel != -1)
    419 		ldt_free(free_sel);
    420 	if (free_ldt != NULL)
    421 		uvm_km_free(kernel_map, (vaddr_t)free_ldt, free_len,
    422 		    UVM_KMF_WIRED);
    423 
    424 	return (error);
    425 #endif
    426 }
    427 
    428 int
    429 x86_iopl(struct lwp *l, void *args, register_t *retval)
    430 {
    431 	int error;
    432 	struct x86_iopl_args ua;
    433 #ifdef XEN
    434 	struct pcb *pcb = &l->l_addr->u_pcb;
    435 #else
    436 	struct trapframe *tf = l->l_md.md_regs;
    437 #endif
    438 
    439 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
    440 	    NULL, NULL, NULL, NULL);
    441 	if (error)
    442 		return (error);
    443 
    444 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    445 		return error;
    446 
    447 #ifdef XEN
    448 	{
    449 		pcb->pcb_tss.tss_ioopt &= ~SEL_RPL;
    450 		if (ua.iopl)
    451 			pcb->pcb_tss.tss_ioopt |= SEL_UPL; /* i/o pl */
    452 		else
    453 			pcb->pcb_tss.tss_ioopt |= SEL_KPL; /* i/o pl */
    454 	}
    455 	/* Force the change at ring 0. */
    456 #ifdef XEN3
    457 	{
    458 		struct physdev_op physop;
    459 		physop.cmd = PHYSDEVOP_SET_IOPL;
    460 		physop.u.set_iopl.iopl = pcb->pcb_tss.tss_ioopt & SEL_RPL;
    461 		HYPERVISOR_physdev_op(&physop);
    462 	}
    463 #else /* XEN3 */
    464 	{
    465 		dom0_op_t op;
    466 		op.cmd = DOM0_IOPL;
    467 		op.u.iopl.domain = DOMID_SELF;
    468 		op.u.iopl.iopl = pcb->pcb_tss.tss_ioopt & SEL_RPL; /* i/o pl */
    469 		HYPERVISOR_dom0_op(&op);
    470 	}
    471 #endif /* XEN3 */
    472 #elif defined(__x86_64__)
    473 	if (ua.iopl)
    474 		tf->tf_rflags |= PSL_IOPL;
    475 	else
    476 		tf->tf_rflags &= ~PSL_IOPL;
    477 #else
    478 	if (ua.iopl)
    479 		tf->tf_eflags |= PSL_IOPL;
    480 	else
    481 		tf->tf_eflags &= ~PSL_IOPL;
    482 #endif
    483 
    484 	return 0;
    485 }
    486 
    487 int
    488 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
    489 {
    490 #ifdef IOPERM
    491 	int error;
    492 	struct pcb *pcb = &l->l_addr->u_pcb;
    493 	struct x86_get_ioperm_args ua;
    494 
    495 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
    496 	    NULL, NULL, NULL, NULL);
    497 	if (error)
    498 		return (error);
    499 
    500 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    501 		return (error);
    502 
    503 	return copyout(pcb->pcb_iomap, ua.iomap, sizeof(pcb->pcb_iomap));
    504 #else
    505 	return EINVAL;
    506 #endif
    507 }
    508 
    509 int
    510 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
    511 {
    512 #ifdef IOPERM
    513 	int error;
    514 	struct pcb *pcb = &l->l_addr->u_pcb;
    515 	struct x86_set_ioperm_args ua;
    516 
    517   	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
    518 	    NULL, NULL, NULL, NULL);
    519 	if (error)
    520 		return (error);
    521 
    522 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    523 		return (error);
    524 
    525 	return copyin(ua.iomap, pcb->pcb_iomap, sizeof(pcb->pcb_iomap));
    526 #else
    527 	return EINVAL;
    528 #endif
    529 }
    530 
    531 int
    532 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
    533 {
    534 #ifdef MTRR
    535 	struct x86_get_mtrr_args ua;
    536 	int error, n;
    537 
    538 	if (mtrr_funcs == NULL)
    539 		return ENOSYS;
    540 
    541  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
    542 	    NULL, NULL, NULL, NULL);
    543 	if (error)
    544 		return (error);
    545 
    546 	error = copyin(args, &ua, sizeof ua);
    547 	if (error != 0)
    548 		return error;
    549 
    550 	error = copyin(ua.n, &n, sizeof n);
    551 	if (error != 0)
    552 		return error;
    553 
    554 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    555 
    556 	copyout(&n, ua.n, sizeof (int));
    557 
    558 	return error;
    559 #else
    560 	return EINVAL;
    561 #endif
    562 }
    563 
    564 int
    565 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
    566 {
    567 #ifdef MTRR
    568 	int error, n;
    569 	struct x86_set_mtrr_args ua;
    570 
    571 	if (mtrr_funcs == NULL)
    572 		return ENOSYS;
    573 
    574  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
    575 	    NULL, NULL, NULL, NULL);
    576 	if (error)
    577 		return (error);
    578 
    579 	error = copyin(args, &ua, sizeof ua);
    580 	if (error != 0)
    581 		return error;
    582 
    583 	error = copyin(ua.n, &n, sizeof n);
    584 	if (error != 0)
    585 		return error;
    586 
    587 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    588 	if (n != 0)
    589 		mtrr_commit();
    590 
    591 	copyout(&n, ua.n, sizeof n);
    592 
    593 	return error;
    594 #else
    595 	return EINVAL;
    596 #endif
    597 }
    598 
    599 int
    600 sys_sysarch(struct lwp *l, void *v, register_t *retval)
    601 {
    602 	struct sys_sysarch_args /* {
    603 		syscallarg(int) op;
    604 		syscallarg(void *) parms;
    605 	} */ *uap = v;
    606 	int error = 0;
    607 
    608 	switch(SCARG(uap, op)) {
    609 	case X86_IOPL:
    610 		error = x86_iopl(l, SCARG(uap, parms), retval);
    611 		break;
    612 
    613 	case X86_GET_LDT:
    614 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
    615 		break;
    616 
    617 	case X86_SET_LDT:
    618 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
    619 		break;
    620 
    621 	case X86_GET_IOPERM:
    622 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
    623 		break;
    624 
    625 	case X86_SET_IOPERM:
    626 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
    627 		break;
    628 
    629 	case X86_GET_MTRR:
    630 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
    631 		break;
    632 	case X86_SET_MTRR:
    633 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
    634 		break;
    635 
    636 #ifdef VM86
    637 	case X86_VM86:
    638 		error = x86_vm86(l, SCARG(uap, parms), retval);
    639 		break;
    640 #ifdef COMPAT_16
    641 	case X86_OLD_VM86:
    642 		error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
    643 		break;
    644 #endif
    645 #endif
    646 
    647 #ifdef PERFCTRS
    648 	case X86_PMC_INFO:
    649 		error = pmc_info(l, SCARG(uap, parms), retval);
    650 		break;
    651 
    652 	case X86_PMC_STARTSTOP:
    653 		error = pmc_startstop(l, SCARG(uap, parms), retval);
    654 		break;
    655 
    656 	case X86_PMC_READ:
    657 		error = pmc_read(l, SCARG(uap, parms), retval);
    658 		break;
    659 #endif
    660 
    661 	default:
    662 		error = EINVAL;
    663 		break;
    664 	}
    665 	return (error);
    666 }
    667