Home | History | Annotate | Line # | Download | only in x86
sys_machdep.c revision 1.6
      1 /*	$NetBSD: sys_machdep.c,v 1.6 2007/11/10 23:04:29 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Charles M. Hannum, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.6 2007/11/10 23:04:29 ad Exp $");
     41 
     42 #include "opt_compat_netbsd.h"
     43 #include "opt_mtrr.h"
     44 #include "opt_perfctrs.h"
     45 #include "opt_user_ldt.h"
     46 #include "opt_vm86.h"
     47 #include "opt_xen.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/ioctl.h>
     52 #include <sys/file.h>
     53 #include <sys/time.h>
     54 #include <sys/proc.h>
     55 #include <sys/user.h>
     56 #include <sys/uio.h>
     57 #include <sys/kernel.h>
     58 #include <sys/buf.h>
     59 #include <sys/signal.h>
     60 #include <sys/malloc.h>
     61 #include <sys/kauth.h>
     62 
     63 #include <sys/mount.h>
     64 #include <sys/syscallargs.h>
     65 
     66 #include <uvm/uvm_extern.h>
     67 
     68 #include <machine/cpu.h>
     69 #include <machine/cpufunc.h>
     70 #include <machine/gdt.h>
     71 #include <machine/psl.h>
     72 #include <machine/reg.h>
     73 #include <machine/sysarch.h>
     74 #include <machine/mtrr.h>
     75 
     76 #ifdef __x86_64__
     77 /* Need to be checked. */
     78 #undef	USER_LDT
     79 #undef	PERFCTRS
     80 #undef	VM86
     81 #undef	IOPERM
     82 #else
     83 #define	IOPERM
     84 #endif
     85 
     86 #ifdef VM86
     87 #include <machine/vm86.h>
     88 #endif
     89 
     90 #ifdef PERFCTRS
     91 #include <machine/pmc.h>
     92 #endif
     93 
     94 /* XXX needs changes from vmlocking branch */
     95 #define	mutex_enter(x)	/* nothing */
     96 #define	mutex_exit(x)	/* nothing */
     97 
     98 extern struct vm_map *kernel_map;
     99 
    100 int x86_get_ioperm(struct lwp *, void *, register_t *);
    101 int x86_set_ioperm(struct lwp *, void *, register_t *);
    102 int x86_get_mtrr(struct lwp *, void *, register_t *);
    103 int x86_set_mtrr(struct lwp *, void *, register_t *);
    104 int x86_set_sdbase(void *arg, char which);
    105 int x86_get_sdbase(void *arg, char which);
    106 
    107 #ifdef LDT_DEBUG
    108 static void x86_print_ldt(int, const struct segment_descriptor *);
    109 
    110 static void
    111 x86_print_ldt(int i, const struct segment_descriptor *d)
    112 {
    113 	printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
    114 	    "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
    115 	    i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
    116 	    d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
    117 }
    118 #endif
    119 
    120 int
    121 x86_get_ldt_len(struct lwp *l)
    122 {
    123 #ifndef USER_LDT
    124 	return -1;
    125 #else
    126 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    127 	int nldt;
    128 
    129 	mutex_enter(&pmap->pm_lock);
    130 
    131 	if (pmap->pm_flags & PMF_USER_LDT) {
    132 		nldt = pmap->pm_ldt_len;
    133 	} else {
    134 		nldt = NLDT;
    135 	}
    136 	mutex_exit(&pmap->pm_lock);
    137 	return nldt;
    138 #endif
    139 }
    140 
    141 
    142 int
    143 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
    144 {
    145 #ifndef USER_LDT
    146 	return EINVAL;
    147 #else
    148 	struct x86_get_ldt_args ua;
    149 	union descriptor *cp;
    150 	int error;
    151 
    152 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    153 		return error;
    154 
    155 	if (ua.num < 0 || ua.num > 8192)
    156 		return EINVAL;
    157 
    158 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
    159 	if (cp == NULL)
    160 		return ENOMEM;
    161 
    162 	error = x86_get_ldt1(l, &ua, cp);
    163 	*retval = ua.num;
    164 	if (error == 0)
    165 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
    166 
    167 	free(cp, M_TEMP);
    168 	return error;
    169 #endif
    170 }
    171 
    172 int
    173 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
    174 {
    175 #ifndef USER_LDT
    176 	return EINVAL;
    177 #else
    178 	int error;
    179 	struct proc *p = l->l_proc;
    180 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    181 	int nldt, num;
    182 	union descriptor *lp;
    183 
    184 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
    185 	    NULL, NULL, NULL, NULL);
    186 	if (error)
    187 		return (error);
    188 
    189 #ifdef	LDT_DEBUG
    190 	printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
    191 	    ua->num, ua->desc);
    192 #endif
    193 
    194 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    195 	    ua->start + ua->num > 8192)
    196 		return (EINVAL);
    197 
    198 	mutex_enter(&pmap->pm_lock);
    199 
    200 	if (pmap->pm_flags & PMF_USER_LDT) {
    201 		nldt = pmap->pm_ldt_len;
    202 		lp = pmap->pm_ldt;
    203 	} else {
    204 		nldt = NLDT;
    205 		lp = ldt;
    206 	}
    207 
    208 	if (ua->start > nldt) {
    209 		mutex_exit(&pmap->pm_lock);
    210 		return (EINVAL);
    211 	}
    212 
    213 	lp += ua->start;
    214 	num = min(ua->num, nldt - ua->start);
    215 	ua->num = num;
    216 #ifdef LDT_DEBUG
    217 	{
    218 		int i;
    219 		for (i = 0; i < num; i++)
    220 			x86_print_ldt(i, &lp[i].sd);
    221 	}
    222 #endif
    223 
    224 	memcpy(cp, lp, num * sizeof(union descriptor));
    225 	mutex_exit(&pmap->pm_lock);
    226 
    227 	return 0;
    228 #endif
    229 }
    230 
    231 int
    232 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
    233 {
    234 #ifndef USER_LDT
    235 	return EINVAL;
    236 #else
    237 	struct x86_set_ldt_args ua;
    238 	union descriptor *descv;
    239 	int error;
    240 
    241 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    242 		return (error);
    243 
    244 	if (ua.num < 0 || ua.num > 8192)
    245 		return EINVAL;
    246 
    247 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    248 	if (descv == NULL)
    249 		return ENOMEM;
    250 
    251 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
    252 	if (error == 0)
    253 		error = x86_set_ldt1(l, &ua, descv);
    254 	*retval = ua.start;
    255 
    256 	free(descv, M_TEMP);
    257 	return error;
    258 #endif
    259 }
    260 
    261 int
    262 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
    263     union descriptor *descv)
    264 {
    265 #ifndef USER_LDT
    266 	return EINVAL;
    267 #else
    268 	int error, i, n, sel, free_sel;
    269 	struct proc *p = l->l_proc;
    270 	struct pcb *pcb = &l->l_addr->u_pcb;
    271 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    272 	size_t old_len, new_len, ldt_len, free_len;
    273 	union descriptor *old_ldt, *new_ldt, *free_ldt;
    274 
    275 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
    276 	    NULL, NULL, NULL, NULL);
    277 	if (error)
    278 		return (error);
    279 
    280 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    281 	    ua->start + ua->num > 8192)
    282 		return (EINVAL);
    283 
    284 	/* Check descriptors for access violations. */
    285 	for (i = 0; i < ua->num; i++) {
    286 		union descriptor *desc = &descv[i];
    287 
    288 		switch (desc->sd.sd_type) {
    289 		case SDT_SYSNULL:
    290 			desc->sd.sd_p = 0;
    291 			break;
    292 		case SDT_SYS286CGT:
    293 		case SDT_SYS386CGT:
    294 			/*
    295 			 * Only allow call gates targeting a segment
    296 			 * in the LDT or a user segment in the fixed
    297 			 * part of the gdt.  Segments in the LDT are
    298 			 * constrained (below) to be user segments.
    299 			 */
    300 			if (desc->gd.gd_p != 0 &&
    301 			    !ISLDT(desc->gd.gd_selector) &&
    302 			    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
    303 			     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
    304 				 SEL_UPL))) {
    305 				return EACCES;
    306 			}
    307 			break;
    308 		case SDT_MEMEC:
    309 		case SDT_MEMEAC:
    310 		case SDT_MEMERC:
    311 		case SDT_MEMERAC:
    312 			/* Must be "present" if executable and conforming. */
    313 			if (desc->sd.sd_p == 0)
    314 				return EACCES;
    315 			break;
    316 		case SDT_MEMRO:
    317 		case SDT_MEMROA:
    318 		case SDT_MEMRW:
    319 		case SDT_MEMRWA:
    320 		case SDT_MEMROD:
    321 		case SDT_MEMRODA:
    322 		case SDT_MEMRWD:
    323 		case SDT_MEMRWDA:
    324 		case SDT_MEME:
    325 		case SDT_MEMEA:
    326 		case SDT_MEMER:
    327 		case SDT_MEMERA:
    328 			break;
    329 		default:
    330 			/*
    331 			 * Make sure that unknown descriptor types are
    332 			 * not marked present.
    333 			 */
    334 			if (desc->sd.sd_p != 0)
    335 				return EACCES;
    336 			break;
    337 		}
    338 
    339 		if (desc->sd.sd_p != 0) {
    340 			/* Only user (ring-3) descriptors may be present. */
    341 			if (desc->sd.sd_dpl != SEL_UPL)
    342 				return EACCES;
    343 		}
    344 	}
    345 
    346 	/* allocate user ldt */
    347 	free_sel = -1;
    348 	new_ldt = NULL;
    349 	new_len = 0;
    350 	free_ldt = NULL;
    351 	free_len = 0;
    352 	mutex_enter(&pmap->pm_lock);
    353 	if (pmap->pm_ldt == 0 || (ua->start + ua->num) > pmap->pm_ldt_len) {
    354 		if (pmap->pm_flags & PMF_USER_LDT)
    355 			ldt_len = pmap->pm_ldt_len;
    356 		else
    357 			ldt_len = 512;
    358 		while ((ua->start + ua->num) > ldt_len)
    359 			ldt_len *= 2;
    360 		new_len = ldt_len * sizeof(union descriptor);
    361 
    362 		mutex_exit(&pmap->pm_lock);
    363 		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
    364 		    new_len, 0, UVM_KMF_WIRED);
    365 		memset(new_ldt, 0, new_len);
    366 		sel = ldt_alloc(new_ldt, new_len);
    367 		mutex_enter(&pmap->pm_lock);
    368 
    369 		if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) {
    370 			/*
    371 			 * Another thread (re)allocated the LDT to
    372 			 * sufficient size while we were blocked in
    373 			 * uvm_km_alloc. Oh well. The new entries
    374 			 * will quite probably not be right, but
    375 			 * hey.. not our problem if user applications
    376 			 * have race conditions like that.
    377 			 */
    378 			goto copy;
    379 		}
    380 
    381 		old_ldt = pmap->pm_ldt;
    382 		free_ldt = old_ldt;
    383 		free_len = pmap->pm_ldt_len * sizeof(union descriptor);
    384 
    385 		if (old_ldt != NULL) {
    386 			old_len = pmap->pm_ldt_len * sizeof(union descriptor);
    387 		} else {
    388 			old_len = NLDT * sizeof(union descriptor);
    389 			old_ldt = ldt;
    390 		}
    391 
    392 		memcpy(new_ldt, old_ldt, old_len);
    393 		memset((char *)new_ldt + old_len, 0, new_len - old_len);
    394 
    395 		pmap->pm_ldt = new_ldt;
    396 		pmap->pm_ldt_len = ldt_len;
    397 
    398 		if (pmap->pm_flags & PMF_USER_LDT)
    399 			free_sel = pmap->pm_ldt_sel;
    400 		else {
    401 			pmap->pm_flags |= PMF_USER_LDT;
    402 			free_sel = -1;
    403 		}
    404 		pmap->pm_ldt_sel = sel;
    405 		pcb->pcb_ldt_sel = pmap->pm_ldt_sel;
    406 		if (pcb == curpcb)
    407 			lldt(pcb->pcb_ldt_sel);
    408 		new_ldt = NULL;
    409 	}
    410 copy:
    411 	/* Now actually replace the descriptors. */
    412 	for (i = 0, n = ua->start; i < ua->num; i++, n++)
    413 		pmap->pm_ldt[n] = descv[i];
    414 
    415 	mutex_exit(&pmap->pm_lock);
    416 
    417 	if (new_ldt != NULL)
    418 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    419 		    UVM_KMF_WIRED);
    420 	if (free_sel != -1)
    421 		ldt_free(free_sel);
    422 	if (free_ldt != NULL)
    423 		uvm_km_free(kernel_map, (vaddr_t)free_ldt, free_len,
    424 		    UVM_KMF_WIRED);
    425 
    426 	return (error);
    427 #endif
    428 }
    429 
    430 int
    431 x86_iopl(struct lwp *l, void *args, register_t *retval)
    432 {
    433 	int error;
    434 	struct x86_iopl_args ua;
    435 #ifdef XEN
    436 	struct pcb *pcb = &l->l_addr->u_pcb;
    437 #else
    438 	struct trapframe *tf = l->l_md.md_regs;
    439 #endif
    440 
    441 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
    442 	    NULL, NULL, NULL, NULL);
    443 	if (error)
    444 		return (error);
    445 
    446 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    447 		return error;
    448 
    449 #ifdef XEN
    450 	{
    451 		pcb->pcb_tss.tss_ioopt &= ~SEL_RPL;
    452 		if (ua.iopl)
    453 			pcb->pcb_tss.tss_ioopt |= SEL_UPL; /* i/o pl */
    454 		else
    455 			pcb->pcb_tss.tss_ioopt |= SEL_KPL; /* i/o pl */
    456 	}
    457 	/* Force the change at ring 0. */
    458 #ifdef XEN3
    459 	{
    460 		struct physdev_op physop;
    461 		physop.cmd = PHYSDEVOP_SET_IOPL;
    462 		physop.u.set_iopl.iopl = pcb->pcb_tss.tss_ioopt & SEL_RPL;
    463 		HYPERVISOR_physdev_op(&physop);
    464 	}
    465 #else /* XEN3 */
    466 	{
    467 		dom0_op_t op;
    468 		op.cmd = DOM0_IOPL;
    469 		op.u.iopl.domain = DOMID_SELF;
    470 		op.u.iopl.iopl = pcb->pcb_tss.tss_ioopt & SEL_RPL; /* i/o pl */
    471 		HYPERVISOR_dom0_op(&op);
    472 	}
    473 #endif /* XEN3 */
    474 #elif defined(__x86_64__)
    475 	if (ua.iopl)
    476 		tf->tf_rflags |= PSL_IOPL;
    477 	else
    478 		tf->tf_rflags &= ~PSL_IOPL;
    479 #else
    480 	if (ua.iopl)
    481 		tf->tf_eflags |= PSL_IOPL;
    482 	else
    483 		tf->tf_eflags &= ~PSL_IOPL;
    484 #endif
    485 
    486 	return 0;
    487 }
    488 
    489 int
    490 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
    491 {
    492 #ifdef IOPERM
    493 	int error;
    494 	struct pcb *pcb = &l->l_addr->u_pcb;
    495 	struct x86_get_ioperm_args ua;
    496 
    497 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
    498 	    NULL, NULL, NULL, NULL);
    499 	if (error)
    500 		return (error);
    501 
    502 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    503 		return (error);
    504 
    505 	return copyout(pcb->pcb_iomap, ua.iomap, sizeof(pcb->pcb_iomap));
    506 #else
    507 	return EINVAL;
    508 #endif
    509 }
    510 
    511 int
    512 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
    513 {
    514 #ifdef IOPERM
    515 	int error;
    516 	struct pcb *pcb = &l->l_addr->u_pcb;
    517 	struct x86_set_ioperm_args ua;
    518 
    519   	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
    520 	    NULL, NULL, NULL, NULL);
    521 	if (error)
    522 		return (error);
    523 
    524 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    525 		return (error);
    526 
    527 	return copyin(ua.iomap, pcb->pcb_iomap, sizeof(pcb->pcb_iomap));
    528 #else
    529 	return EINVAL;
    530 #endif
    531 }
    532 
    533 int
    534 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
    535 {
    536 #ifdef MTRR
    537 	struct x86_get_mtrr_args ua;
    538 	int error, n;
    539 
    540 	if (mtrr_funcs == NULL)
    541 		return ENOSYS;
    542 
    543  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
    544 	    NULL, NULL, NULL, NULL);
    545 	if (error)
    546 		return (error);
    547 
    548 	error = copyin(args, &ua, sizeof ua);
    549 	if (error != 0)
    550 		return error;
    551 
    552 	error = copyin(ua.n, &n, sizeof n);
    553 	if (error != 0)
    554 		return error;
    555 
    556 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    557 
    558 	copyout(&n, ua.n, sizeof (int));
    559 
    560 	return error;
    561 #else
    562 	return EINVAL;
    563 #endif
    564 }
    565 
    566 int
    567 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
    568 {
    569 #ifdef MTRR
    570 	int error, n;
    571 	struct x86_set_mtrr_args ua;
    572 
    573 	if (mtrr_funcs == NULL)
    574 		return ENOSYS;
    575 
    576  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
    577 	    NULL, NULL, NULL, NULL);
    578 	if (error)
    579 		return (error);
    580 
    581 	error = copyin(args, &ua, sizeof ua);
    582 	if (error != 0)
    583 		return error;
    584 
    585 	error = copyin(ua.n, &n, sizeof n);
    586 	if (error != 0)
    587 		return error;
    588 
    589 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    590 	if (n != 0)
    591 		mtrr_commit();
    592 
    593 	copyout(&n, ua.n, sizeof n);
    594 
    595 	return error;
    596 #else
    597 	return EINVAL;
    598 #endif
    599 }
    600 
    601 int
    602 x86_set_sdbase(void *arg, char which)
    603 {
    604 #ifdef i386
    605 	struct segment_descriptor sd;
    606 	vaddr_t base;
    607 	int error;
    608 
    609 	error = copyin(arg, &base, sizeof(base));
    610 	if (error != 0)
    611 		return error;
    612 
    613 	sd.sd_lobase = base & 0xffffff;
    614 	sd.sd_hibase = (base >> 24) & 0xff;
    615 	sd.sd_lolimit = 0xffff;
    616 	sd.sd_hilimit = 0xf;
    617 	sd.sd_type = SDT_MEMRWA;
    618 	sd.sd_dpl = SEL_UPL;
    619 	sd.sd_p = 1;
    620 	sd.sd_xx = 0;
    621 	sd.sd_def32 = 1;
    622 	sd.sd_gran = 1;
    623 
    624 	crit_enter();
    625 	if (which == 'f') {
    626 		memcpy(&curpcb->pcb_fsd, &sd, sizeof(sd));
    627 		memcpy(&curcpu()->ci_gdt[GUFS_SEL], &sd, sizeof(sd));
    628 	} else /* which == 'g' */ {
    629 		memcpy(&curpcb->pcb_gsd, &sd, sizeof(sd));
    630 		memcpy(&curcpu()->ci_gdt[GUGS_SEL], &sd, sizeof(sd));
    631 	}
    632 	crit_exit();
    633 
    634 	return 0;
    635 #else
    636 	return EINVAL;
    637 #endif
    638 }
    639 
    640 int
    641 x86_get_sdbase(void *arg, char which)
    642 {
    643 #ifdef i386
    644 	struct segment_descriptor *sd;
    645 	vaddr_t base;
    646 
    647 	switch (which) {
    648 	case 'f':
    649 		sd = (struct segment_descriptor *)&curpcb->pcb_fsd;
    650 		break;
    651 	case 'g':
    652 		sd = (struct segment_descriptor *)&curpcb->pcb_gsd;
    653 		break;
    654 	default:
    655 		panic("x86_get_sdbase");
    656 	}
    657 
    658 	base = sd->sd_hibase << 24 | sd->sd_lobase;
    659 	return copyout(&base, &arg, sizeof(base));
    660 #else
    661 	return EINVAL;
    662 #endif
    663 }
    664 
    665 int
    666 sys_sysarch(struct lwp *l, void *v, register_t *retval)
    667 {
    668 	struct sys_sysarch_args /* {
    669 		syscallarg(int) op;
    670 		syscallarg(void *) parms;
    671 	} */ *uap = v;
    672 	int error = 0;
    673 
    674 	switch(SCARG(uap, op)) {
    675 	case X86_IOPL:
    676 		error = x86_iopl(l, SCARG(uap, parms), retval);
    677 		break;
    678 
    679 	case X86_GET_LDT:
    680 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
    681 		break;
    682 
    683 	case X86_SET_LDT:
    684 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
    685 		break;
    686 
    687 	case X86_GET_IOPERM:
    688 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
    689 		break;
    690 
    691 	case X86_SET_IOPERM:
    692 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
    693 		break;
    694 
    695 	case X86_GET_MTRR:
    696 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
    697 		break;
    698 	case X86_SET_MTRR:
    699 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
    700 		break;
    701 
    702 #ifdef VM86
    703 	case X86_VM86:
    704 		error = x86_vm86(l, SCARG(uap, parms), retval);
    705 		break;
    706 #ifdef COMPAT_16
    707 	case X86_OLD_VM86:
    708 		error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
    709 		break;
    710 #endif
    711 #endif
    712 
    713 #ifdef PERFCTRS
    714 	case X86_PMC_INFO:
    715 		error = pmc_info(l, SCARG(uap, parms), retval);
    716 		break;
    717 
    718 	case X86_PMC_STARTSTOP:
    719 		error = pmc_startstop(l, SCARG(uap, parms), retval);
    720 		break;
    721 
    722 	case X86_PMC_READ:
    723 		error = pmc_read(l, SCARG(uap, parms), retval);
    724 		break;
    725 #endif
    726 
    727 	case X86_SET_FSBASE:
    728 		error = x86_set_sdbase(SCARG(uap, parms), 'f');
    729 		break;
    730 
    731 	case X86_SET_GSBASE:
    732 		error = x86_set_sdbase(SCARG(uap, parms), 'g');
    733 		break;
    734 
    735 	case X86_GET_FSBASE:
    736 		error = x86_get_sdbase(SCARG(uap, parms), 'f');
    737 		break;
    738 
    739 	case X86_GET_GSBASE:
    740 		error = x86_get_sdbase(SCARG(uap, parms), 'g');
    741 		break;
    742 
    743 	default:
    744 		error = EINVAL;
    745 		break;
    746 	}
    747 	return (error);
    748 }
    749