Home | History | Annotate | Line # | Download | only in x86
sys_machdep.c revision 1.18
      1  1.18    ad /*	$NetBSD: sys_machdep.c,v 1.18 2009/03/29 09:24:52 ad Exp $	*/
      2   1.1    ad 
      3   1.1    ad /*-
      4  1.17    ad  * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
      5   1.1    ad  * All rights reserved.
      6   1.1    ad  *
      7   1.1    ad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1    ad  * by Charles M. Hannum, and by Andrew Doran.
      9   1.1    ad  *
     10   1.1    ad  * Redistribution and use in source and binary forms, with or without
     11   1.1    ad  * modification, are permitted provided that the following conditions
     12   1.1    ad  * are met:
     13   1.1    ad  * 1. Redistributions of source code must retain the above copyright
     14   1.1    ad  *    notice, this list of conditions and the following disclaimer.
     15   1.1    ad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1    ad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1    ad  *    documentation and/or other materials provided with the distribution.
     18   1.1    ad  *
     19   1.1    ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1    ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1    ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1    ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1    ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1    ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1    ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1    ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1    ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1    ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1    ad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1    ad  */
     31   1.1    ad 
     32   1.1    ad #include <sys/cdefs.h>
     33  1.18    ad __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.18 2009/03/29 09:24:52 ad Exp $");
     34   1.1    ad 
     35   1.1    ad #include "opt_mtrr.h"
     36   1.1    ad #include "opt_perfctrs.h"
     37   1.1    ad #include "opt_user_ldt.h"
     38   1.1    ad #include "opt_vm86.h"
     39   1.1    ad #include "opt_xen.h"
     40   1.1    ad 
     41   1.1    ad #include <sys/param.h>
     42   1.1    ad #include <sys/systm.h>
     43   1.1    ad #include <sys/ioctl.h>
     44   1.1    ad #include <sys/file.h>
     45   1.1    ad #include <sys/time.h>
     46   1.1    ad #include <sys/proc.h>
     47   1.1    ad #include <sys/user.h>
     48   1.1    ad #include <sys/uio.h>
     49   1.1    ad #include <sys/kernel.h>
     50   1.1    ad #include <sys/buf.h>
     51   1.1    ad #include <sys/signal.h>
     52   1.1    ad #include <sys/malloc.h>
     53   1.9  yamt #include <sys/kmem.h>
     54   1.1    ad #include <sys/kauth.h>
     55  1.17    ad #include <sys/cpu.h>
     56   1.1    ad #include <sys/mount.h>
     57   1.1    ad #include <sys/syscallargs.h>
     58   1.1    ad 
     59   1.1    ad #include <uvm/uvm_extern.h>
     60   1.1    ad 
     61   1.1    ad #include <machine/cpufunc.h>
     62   1.1    ad #include <machine/gdt.h>
     63   1.1    ad #include <machine/psl.h>
     64   1.1    ad #include <machine/reg.h>
     65   1.1    ad #include <machine/sysarch.h>
     66   1.1    ad #include <machine/mtrr.h>
     67   1.1    ad 
     68   1.1    ad #ifdef __x86_64__
     69   1.1    ad /* Need to be checked. */
     70   1.1    ad #undef	USER_LDT
     71   1.1    ad #undef	PERFCTRS
     72   1.1    ad #undef	VM86
     73   1.1    ad #undef	IOPERM
     74   1.1    ad #else
     75   1.9  yamt #if defined(XEN)
     76   1.9  yamt #undef	IOPERM
     77   1.9  yamt #else /* defined(XEN) */
     78   1.1    ad #define	IOPERM
     79   1.9  yamt #endif /* defined(XEN) */
     80   1.1    ad #endif
     81   1.1    ad 
     82   1.1    ad #ifdef VM86
     83   1.1    ad #include <machine/vm86.h>
     84   1.1    ad #endif
     85   1.1    ad 
     86   1.1    ad #ifdef PERFCTRS
     87   1.1    ad #include <machine/pmc.h>
     88   1.1    ad #endif
     89   1.1    ad 
     90   1.1    ad extern struct vm_map *kernel_map;
     91   1.1    ad 
     92   1.1    ad int x86_get_ioperm(struct lwp *, void *, register_t *);
     93   1.1    ad int x86_set_ioperm(struct lwp *, void *, register_t *);
     94   1.1    ad int x86_get_mtrr(struct lwp *, void *, register_t *);
     95   1.1    ad int x86_set_mtrr(struct lwp *, void *, register_t *);
     96  1.18    ad int x86_set_sdbase(void *, char, lwp_t *, bool);
     97  1.18    ad int x86_get_sdbase(void *, char);
     98   1.1    ad 
     99   1.1    ad #ifdef LDT_DEBUG
    100   1.1    ad static void x86_print_ldt(int, const struct segment_descriptor *);
    101   1.1    ad 
    102   1.1    ad static void
    103   1.1    ad x86_print_ldt(int i, const struct segment_descriptor *d)
    104   1.1    ad {
    105   1.1    ad 	printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
    106   1.1    ad 	    "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
    107   1.1    ad 	    i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
    108   1.1    ad 	    d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
    109   1.1    ad }
    110   1.1    ad #endif
    111   1.1    ad 
    112   1.1    ad int
    113   1.1    ad x86_get_ldt(struct lwp *l, void *args, register_t *retval)
    114   1.1    ad {
    115   1.2   dsl #ifndef USER_LDT
    116   1.2   dsl 	return EINVAL;
    117   1.2   dsl #else
    118   1.2   dsl 	struct x86_get_ldt_args ua;
    119   1.2   dsl 	union descriptor *cp;
    120   1.2   dsl 	int error;
    121   1.2   dsl 
    122   1.2   dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    123   1.2   dsl 		return error;
    124   1.2   dsl 
    125   1.2   dsl 	if (ua.num < 0 || ua.num > 8192)
    126   1.2   dsl 		return EINVAL;
    127   1.2   dsl 
    128   1.2   dsl 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
    129   1.2   dsl 	if (cp == NULL)
    130   1.2   dsl 		return ENOMEM;
    131   1.2   dsl 
    132   1.2   dsl 	error = x86_get_ldt1(l, &ua, cp);
    133   1.2   dsl 	*retval = ua.num;
    134   1.2   dsl 	if (error == 0)
    135   1.2   dsl 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
    136   1.2   dsl 
    137   1.2   dsl 	free(cp, M_TEMP);
    138   1.2   dsl 	return error;
    139   1.2   dsl #endif
    140   1.2   dsl }
    141   1.2   dsl 
    142   1.2   dsl int
    143   1.2   dsl x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
    144   1.2   dsl {
    145   1.2   dsl #ifndef USER_LDT
    146   1.2   dsl 	return EINVAL;
    147   1.2   dsl #else
    148   1.1    ad 	int error;
    149   1.1    ad 	struct proc *p = l->l_proc;
    150   1.1    ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    151   1.1    ad 	int nldt, num;
    152   1.2   dsl 	union descriptor *lp;
    153   1.1    ad 
    154   1.1    ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
    155   1.1    ad 	    NULL, NULL, NULL, NULL);
    156   1.1    ad 	if (error)
    157   1.1    ad 		return (error);
    158   1.1    ad 
    159   1.1    ad #ifdef	LDT_DEBUG
    160   1.2   dsl 	printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
    161   1.2   dsl 	    ua->num, ua->desc);
    162   1.1    ad #endif
    163   1.1    ad 
    164   1.2   dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    165   1.2   dsl 	    ua->start + ua->num > 8192)
    166   1.1    ad 		return (EINVAL);
    167   1.1    ad 
    168  1.17    ad 	mutex_enter(&cpu_lock);
    169   1.1    ad 
    170  1.17    ad 	if (pmap->pm_ldt != NULL) {
    171  1.17    ad 		nldt = pmap->pm_ldt_len / sizeof(*lp);
    172   1.1    ad 		lp = pmap->pm_ldt;
    173   1.1    ad 	} else {
    174   1.1    ad 		nldt = NLDT;
    175   1.1    ad 		lp = ldt;
    176   1.1    ad 	}
    177   1.1    ad 
    178   1.2   dsl 	if (ua->start > nldt) {
    179  1.17    ad 		mutex_exit(&cpu_lock);
    180   1.1    ad 		return (EINVAL);
    181   1.1    ad 	}
    182   1.1    ad 
    183   1.2   dsl 	lp += ua->start;
    184   1.2   dsl 	num = min(ua->num, nldt - ua->start);
    185   1.2   dsl 	ua->num = num;
    186   1.1    ad #ifdef LDT_DEBUG
    187   1.1    ad 	{
    188   1.1    ad 		int i;
    189   1.1    ad 		for (i = 0; i < num; i++)
    190   1.1    ad 			x86_print_ldt(i, &lp[i].sd);
    191   1.1    ad 	}
    192   1.1    ad #endif
    193   1.1    ad 
    194   1.1    ad 	memcpy(cp, lp, num * sizeof(union descriptor));
    195  1.17    ad 	mutex_exit(&cpu_lock);
    196   1.1    ad 
    197   1.2   dsl 	return 0;
    198   1.2   dsl #endif
    199   1.2   dsl }
    200   1.2   dsl 
    201   1.2   dsl int
    202   1.2   dsl x86_set_ldt(struct lwp *l, void *args, register_t *retval)
    203   1.2   dsl {
    204   1.2   dsl #ifndef USER_LDT
    205   1.2   dsl 	return EINVAL;
    206   1.2   dsl #else
    207   1.2   dsl 	struct x86_set_ldt_args ua;
    208   1.2   dsl 	union descriptor *descv;
    209   1.2   dsl 	int error;
    210   1.2   dsl 
    211   1.2   dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    212   1.2   dsl 		return (error);
    213   1.2   dsl 
    214   1.2   dsl 	if (ua.num < 0 || ua.num > 8192)
    215   1.2   dsl 		return EINVAL;
    216   1.2   dsl 
    217   1.2   dsl 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    218   1.2   dsl 	if (descv == NULL)
    219   1.2   dsl 		return ENOMEM;
    220   1.2   dsl 
    221   1.2   dsl 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
    222   1.1    ad 	if (error == 0)
    223   1.2   dsl 		error = x86_set_ldt1(l, &ua, descv);
    224   1.2   dsl 	*retval = ua.start;
    225   1.1    ad 
    226   1.2   dsl 	free(descv, M_TEMP);
    227   1.2   dsl 	return error;
    228   1.1    ad #endif
    229   1.1    ad }
    230   1.1    ad 
    231   1.1    ad int
    232   1.2   dsl x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
    233   1.2   dsl     union descriptor *descv)
    234   1.1    ad {
    235   1.2   dsl #ifndef USER_LDT
    236   1.2   dsl 	return EINVAL;
    237   1.2   dsl #else
    238  1.17    ad 	int error, i, n, old_sel, new_sel;
    239   1.1    ad 	struct proc *p = l->l_proc;
    240   1.1    ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    241  1.17    ad 	size_t old_len, new_len;
    242  1.17    ad 	union descriptor *old_ldt, *new_ldt;
    243   1.1    ad 
    244   1.1    ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
    245   1.1    ad 	    NULL, NULL, NULL, NULL);
    246   1.1    ad 	if (error)
    247   1.1    ad 		return (error);
    248   1.1    ad 
    249   1.2   dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    250   1.2   dsl 	    ua->start + ua->num > 8192)
    251   1.1    ad 		return (EINVAL);
    252   1.1    ad 
    253   1.1    ad 	/* Check descriptors for access violations. */
    254   1.2   dsl 	for (i = 0; i < ua->num; i++) {
    255   1.1    ad 		union descriptor *desc = &descv[i];
    256   1.1    ad 
    257   1.1    ad 		switch (desc->sd.sd_type) {
    258   1.1    ad 		case SDT_SYSNULL:
    259   1.1    ad 			desc->sd.sd_p = 0;
    260   1.1    ad 			break;
    261   1.1    ad 		case SDT_SYS286CGT:
    262   1.1    ad 		case SDT_SYS386CGT:
    263   1.1    ad 			/*
    264   1.1    ad 			 * Only allow call gates targeting a segment
    265   1.1    ad 			 * in the LDT or a user segment in the fixed
    266   1.1    ad 			 * part of the gdt.  Segments in the LDT are
    267   1.1    ad 			 * constrained (below) to be user segments.
    268   1.1    ad 			 */
    269   1.1    ad 			if (desc->gd.gd_p != 0 &&
    270   1.1    ad 			    !ISLDT(desc->gd.gd_selector) &&
    271   1.1    ad 			    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
    272   1.1    ad 			     (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
    273   1.1    ad 				 SEL_UPL))) {
    274   1.2   dsl 				return EACCES;
    275   1.1    ad 			}
    276   1.1    ad 			break;
    277   1.1    ad 		case SDT_MEMEC:
    278   1.1    ad 		case SDT_MEMEAC:
    279   1.1    ad 		case SDT_MEMERC:
    280   1.1    ad 		case SDT_MEMERAC:
    281   1.1    ad 			/* Must be "present" if executable and conforming. */
    282   1.2   dsl 			if (desc->sd.sd_p == 0)
    283   1.2   dsl 				return EACCES;
    284   1.1    ad 			break;
    285   1.1    ad 		case SDT_MEMRO:
    286   1.1    ad 		case SDT_MEMROA:
    287   1.1    ad 		case SDT_MEMRW:
    288   1.1    ad 		case SDT_MEMRWA:
    289   1.1    ad 		case SDT_MEMROD:
    290   1.1    ad 		case SDT_MEMRODA:
    291   1.1    ad 		case SDT_MEMRWD:
    292   1.1    ad 		case SDT_MEMRWDA:
    293   1.1    ad 		case SDT_MEME:
    294   1.1    ad 		case SDT_MEMEA:
    295   1.1    ad 		case SDT_MEMER:
    296   1.1    ad 		case SDT_MEMERA:
    297   1.1    ad 			break;
    298   1.1    ad 		default:
    299   1.1    ad 			/*
    300   1.1    ad 			 * Make sure that unknown descriptor types are
    301   1.1    ad 			 * not marked present.
    302   1.1    ad 			 */
    303   1.2   dsl 			if (desc->sd.sd_p != 0)
    304   1.2   dsl 				return EACCES;
    305   1.1    ad 			break;
    306   1.1    ad 		}
    307   1.1    ad 
    308   1.1    ad 		if (desc->sd.sd_p != 0) {
    309   1.1    ad 			/* Only user (ring-3) descriptors may be present. */
    310   1.2   dsl 			if (desc->sd.sd_dpl != SEL_UPL)
    311   1.2   dsl 				return EACCES;
    312   1.1    ad 		}
    313   1.1    ad 	}
    314   1.1    ad 
    315  1.17    ad 	/*
    316  1.17    ad 	 * Install selected changes.  We perform a copy, write, swap dance
    317  1.17    ad 	 * here to ensure that all updates happen atomically.
    318  1.17    ad 	 */
    319  1.17    ad 
    320  1.17    ad 	/* Allocate a new LDT. */
    321  1.17    ad 	for (;;) {
    322  1.17    ad 		new_len = (ua->start + ua->num) * sizeof(union descriptor);
    323  1.17    ad 		new_len = max(new_len, pmap->pm_ldt_len);
    324  1.17    ad 		new_len = max(new_len, NLDT * sizeof(union descriptor));
    325  1.17    ad 		new_len = round_page(new_len);
    326   1.1    ad 		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
    327  1.17    ad 		    new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    328  1.17    ad 		mutex_enter(&cpu_lock);
    329  1.17    ad 		if (pmap->pm_ldt_len <= new_len) {
    330  1.17    ad 			break;
    331   1.1    ad 		}
    332  1.17    ad 		mutex_exit(&cpu_lock);
    333  1.17    ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    334  1.17    ad 		    UVM_KMF_WIRED);
    335  1.17    ad 	}
    336   1.1    ad 
    337  1.17    ad 	/* Copy existing entries, if any. */
    338  1.17    ad 	if (pmap->pm_ldt != NULL) {
    339   1.1    ad 		old_ldt = pmap->pm_ldt;
    340  1.17    ad 		old_len = pmap->pm_ldt_len;
    341  1.17    ad 		old_sel = pmap->pm_ldt_sel;
    342   1.1    ad 		memcpy(new_ldt, old_ldt, old_len);
    343  1.17    ad 	} else {
    344  1.17    ad 		old_ldt = NULL;
    345  1.17    ad 		old_len = 0;
    346  1.17    ad 		old_sel = -1;
    347  1.17    ad 		memcpy(new_ldt, ldt, NLDT * sizeof(union descriptor));
    348  1.17    ad 	}
    349   1.1    ad 
    350  1.17    ad 	/* Apply requested changes. */
    351  1.17    ad 	for (i = 0, n = ua->start; i < ua->num; i++, n++) {
    352  1.17    ad 		new_ldt[n] = descv[i];
    353  1.17    ad 	}
    354   1.1    ad 
    355  1.17    ad 	/* Allocate LDT selector. */
    356  1.17    ad 	new_sel = ldt_alloc(new_ldt, new_len);
    357  1.17    ad 	if (new_sel == -1) {
    358  1.17    ad 		mutex_exit(&cpu_lock);
    359   1.1    ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    360   1.1    ad 		    UVM_KMF_WIRED);
    361  1.17    ad 		return ENOMEM;
    362  1.17    ad 	}
    363  1.17    ad 
    364  1.17    ad 	/* All changes are now globally visible.  Swap in the new LDT. */
    365  1.17    ad 	pmap->pm_ldt = new_ldt;
    366  1.17    ad 	pmap->pm_ldt_len = new_len;
    367  1.17    ad 	pmap->pm_ldt_sel = new_sel;
    368  1.17    ad 
    369  1.17    ad 	/* Switch existing users onto new LDT. */
    370  1.17    ad 	pmap_ldt_sync(pmap);
    371  1.17    ad 
    372  1.17    ad 	/* Free existing LDT (if any). */
    373  1.17    ad 	if (old_ldt != NULL) {
    374  1.17    ad 		ldt_free(old_sel);
    375  1.17    ad 		uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len,
    376   1.1    ad 		    UVM_KMF_WIRED);
    377  1.17    ad 	}
    378  1.17    ad 	mutex_exit(&cpu_lock);
    379   1.2   dsl 
    380  1.17    ad 	return error;
    381   1.1    ad #endif
    382   1.1    ad }
    383   1.1    ad 
    384   1.1    ad int
    385   1.1    ad x86_iopl(struct lwp *l, void *args, register_t *retval)
    386   1.1    ad {
    387   1.1    ad 	int error;
    388   1.1    ad 	struct x86_iopl_args ua;
    389   1.1    ad #ifdef XEN
    390   1.9  yamt 	int iopl;
    391   1.1    ad #else
    392   1.1    ad 	struct trapframe *tf = l->l_md.md_regs;
    393   1.1    ad #endif
    394   1.1    ad 
    395   1.1    ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
    396   1.1    ad 	    NULL, NULL, NULL, NULL);
    397   1.1    ad 	if (error)
    398   1.1    ad 		return (error);
    399   1.1    ad 
    400   1.1    ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    401   1.1    ad 		return error;
    402   1.1    ad 
    403   1.1    ad #ifdef XEN
    404   1.9  yamt 	if (ua.iopl)
    405   1.9  yamt 		iopl = SEL_UPL;
    406   1.9  yamt 	else
    407   1.9  yamt 		iopl = SEL_KPL;
    408  1.10  yamt 	l->l_addr->u_pcb.pcb_iopl = iopl;
    409   1.1    ad 	/* Force the change at ring 0. */
    410   1.1    ad #ifdef XEN3
    411   1.1    ad 	{
    412   1.1    ad 		struct physdev_op physop;
    413   1.1    ad 		physop.cmd = PHYSDEVOP_SET_IOPL;
    414   1.9  yamt 		physop.u.set_iopl.iopl = iopl;
    415   1.1    ad 		HYPERVISOR_physdev_op(&physop);
    416   1.1    ad 	}
    417   1.1    ad #else /* XEN3 */
    418   1.1    ad 	{
    419   1.1    ad 		dom0_op_t op;
    420   1.1    ad 		op.cmd = DOM0_IOPL;
    421   1.1    ad 		op.u.iopl.domain = DOMID_SELF;
    422   1.9  yamt 		op.u.iopl.iopl = iopl;
    423   1.1    ad 		HYPERVISOR_dom0_op(&op);
    424   1.1    ad 	}
    425   1.1    ad #endif /* XEN3 */
    426   1.1    ad #elif defined(__x86_64__)
    427   1.1    ad 	if (ua.iopl)
    428   1.1    ad 		tf->tf_rflags |= PSL_IOPL;
    429   1.1    ad 	else
    430   1.1    ad 		tf->tf_rflags &= ~PSL_IOPL;
    431   1.1    ad #else
    432   1.1    ad 	if (ua.iopl)
    433   1.1    ad 		tf->tf_eflags |= PSL_IOPL;
    434   1.1    ad 	else
    435   1.1    ad 		tf->tf_eflags &= ~PSL_IOPL;
    436   1.1    ad #endif
    437   1.1    ad 
    438   1.1    ad 	return 0;
    439   1.1    ad }
    440   1.1    ad 
    441   1.1    ad int
    442   1.1    ad x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
    443   1.1    ad {
    444   1.1    ad #ifdef IOPERM
    445   1.1    ad 	int error;
    446   1.1    ad 	struct pcb *pcb = &l->l_addr->u_pcb;
    447   1.1    ad 	struct x86_get_ioperm_args ua;
    448   1.9  yamt 	void *dummymap = NULL;
    449   1.9  yamt 	void *iomap;
    450   1.1    ad 
    451   1.1    ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
    452   1.1    ad 	    NULL, NULL, NULL, NULL);
    453   1.1    ad 	if (error)
    454   1.1    ad 		return (error);
    455   1.1    ad 
    456   1.1    ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    457   1.1    ad 		return (error);
    458   1.1    ad 
    459   1.9  yamt 	iomap = pcb->pcb_iomap;
    460   1.9  yamt 	if (iomap == NULL) {
    461   1.9  yamt 		iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    462   1.9  yamt 		memset(dummymap, 0xff, IOMAPSIZE);
    463   1.9  yamt 	}
    464   1.9  yamt 	error = copyout(iomap, ua.iomap, IOMAPSIZE);
    465   1.9  yamt 	if (dummymap != NULL) {
    466   1.9  yamt 		kmem_free(dummymap, IOMAPSIZE);
    467   1.9  yamt 	}
    468   1.9  yamt 	return error;
    469   1.1    ad #else
    470   1.1    ad 	return EINVAL;
    471   1.1    ad #endif
    472   1.1    ad }
    473   1.1    ad 
    474   1.1    ad int
    475   1.1    ad x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
    476   1.1    ad {
    477   1.1    ad #ifdef IOPERM
    478   1.9  yamt 	struct cpu_info *ci;
    479   1.1    ad 	int error;
    480   1.1    ad 	struct pcb *pcb = &l->l_addr->u_pcb;
    481   1.1    ad 	struct x86_set_ioperm_args ua;
    482   1.9  yamt 	void *new;
    483   1.9  yamt 	void *old;
    484   1.1    ad 
    485   1.1    ad   	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
    486   1.1    ad 	    NULL, NULL, NULL, NULL);
    487   1.1    ad 	if (error)
    488   1.1    ad 		return (error);
    489   1.1    ad 
    490   1.1    ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    491   1.1    ad 		return (error);
    492   1.1    ad 
    493   1.9  yamt 	new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    494   1.9  yamt 	error = copyin(ua.iomap, new, IOMAPSIZE);
    495   1.9  yamt 	if (error) {
    496   1.9  yamt 		kmem_free(new, IOMAPSIZE);
    497   1.9  yamt 		return error;
    498   1.9  yamt 	}
    499   1.9  yamt 	old = pcb->pcb_iomap;
    500   1.9  yamt 	pcb->pcb_iomap = new;
    501   1.9  yamt 	if (old != NULL) {
    502   1.9  yamt 		kmem_free(old, IOMAPSIZE);
    503   1.9  yamt 	}
    504   1.9  yamt 
    505  1.13    ad 	kpreempt_disable();
    506   1.9  yamt 	ci = curcpu();
    507   1.9  yamt 	memcpy(ci->ci_iomap, pcb->pcb_iomap, sizeof(ci->ci_iomap));
    508   1.9  yamt 	ci->ci_tss.tss_iobase =
    509   1.9  yamt 	    ((uintptr_t)ci->ci_iomap - (uintptr_t)&ci->ci_tss) << 16;
    510  1.13    ad 	kpreempt_enable();
    511   1.9  yamt 
    512   1.9  yamt 	return error;
    513   1.1    ad #else
    514   1.1    ad 	return EINVAL;
    515   1.1    ad #endif
    516   1.1    ad }
    517   1.1    ad 
    518   1.1    ad int
    519   1.1    ad x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
    520   1.1    ad {
    521   1.1    ad #ifdef MTRR
    522   1.1    ad 	struct x86_get_mtrr_args ua;
    523   1.1    ad 	int error, n;
    524   1.1    ad 
    525   1.1    ad 	if (mtrr_funcs == NULL)
    526   1.1    ad 		return ENOSYS;
    527   1.1    ad 
    528   1.1    ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
    529   1.1    ad 	    NULL, NULL, NULL, NULL);
    530   1.1    ad 	if (error)
    531   1.1    ad 		return (error);
    532   1.1    ad 
    533   1.1    ad 	error = copyin(args, &ua, sizeof ua);
    534   1.1    ad 	if (error != 0)
    535   1.1    ad 		return error;
    536   1.1    ad 
    537   1.1    ad 	error = copyin(ua.n, &n, sizeof n);
    538   1.1    ad 	if (error != 0)
    539   1.1    ad 		return error;
    540   1.1    ad 
    541  1.12    ad 	KERNEL_LOCK(1, NULL);
    542   1.1    ad 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    543  1.12    ad 	KERNEL_UNLOCK_ONE(NULL);
    544   1.1    ad 
    545   1.1    ad 	copyout(&n, ua.n, sizeof (int));
    546   1.1    ad 
    547   1.1    ad 	return error;
    548   1.1    ad #else
    549   1.1    ad 	return EINVAL;
    550   1.1    ad #endif
    551   1.1    ad }
    552   1.1    ad 
    553   1.1    ad int
    554   1.1    ad x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
    555   1.1    ad {
    556   1.1    ad #ifdef MTRR
    557   1.1    ad 	int error, n;
    558   1.1    ad 	struct x86_set_mtrr_args ua;
    559   1.1    ad 
    560   1.1    ad 	if (mtrr_funcs == NULL)
    561   1.1    ad 		return ENOSYS;
    562   1.1    ad 
    563   1.1    ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
    564   1.1    ad 	    NULL, NULL, NULL, NULL);
    565   1.1    ad 	if (error)
    566   1.1    ad 		return (error);
    567   1.1    ad 
    568   1.1    ad 	error = copyin(args, &ua, sizeof ua);
    569   1.1    ad 	if (error != 0)
    570   1.1    ad 		return error;
    571   1.1    ad 
    572   1.1    ad 	error = copyin(ua.n, &n, sizeof n);
    573   1.1    ad 	if (error != 0)
    574   1.1    ad 		return error;
    575   1.1    ad 
    576  1.12    ad 	KERNEL_LOCK(1, NULL);
    577   1.1    ad 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    578   1.1    ad 	if (n != 0)
    579   1.1    ad 		mtrr_commit();
    580  1.12    ad 	KERNEL_UNLOCK_ONE(NULL);
    581   1.1    ad 
    582   1.1    ad 	copyout(&n, ua.n, sizeof n);
    583   1.1    ad 
    584   1.1    ad 	return error;
    585   1.1    ad #else
    586   1.1    ad 	return EINVAL;
    587   1.1    ad #endif
    588   1.1    ad }
    589   1.1    ad 
    590   1.1    ad int
    591  1.18    ad x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
    592   1.5    ad {
    593   1.5    ad #ifdef i386
    594   1.6    ad 	struct segment_descriptor sd;
    595  1.18    ad 	struct pcb *pcb;
    596   1.5    ad 	vaddr_t base;
    597   1.6    ad 	int error;
    598   1.5    ad 
    599  1.18    ad 	if (direct) {
    600  1.18    ad 		base = (vaddr_t)arg;
    601  1.18    ad 	} else {
    602  1.18    ad 		error = copyin(arg, &base, sizeof(base));
    603  1.18    ad 		if (error != 0)
    604  1.18    ad 			return error;
    605  1.18    ad 	}
    606   1.5    ad 
    607   1.6    ad 	sd.sd_lobase = base & 0xffffff;
    608   1.6    ad 	sd.sd_hibase = (base >> 24) & 0xff;
    609   1.6    ad 	sd.sd_lolimit = 0xffff;
    610   1.6    ad 	sd.sd_hilimit = 0xf;
    611   1.6    ad 	sd.sd_type = SDT_MEMRWA;
    612   1.6    ad 	sd.sd_dpl = SEL_UPL;
    613   1.6    ad 	sd.sd_p = 1;
    614   1.6    ad 	sd.sd_xx = 0;
    615   1.6    ad 	sd.sd_def32 = 1;
    616   1.6    ad 	sd.sd_gran = 1;
    617   1.6    ad 
    618  1.13    ad 	kpreempt_disable();
    619  1.18    ad 	pcb = &l->l_addr->u_pcb;
    620   1.6    ad 	if (which == 'f') {
    621  1.18    ad 		memcpy(&pcb->pcb_fsd, &sd, sizeof(sd));
    622  1.18    ad 		if (l == curlwp) {
    623  1.18    ad 			memcpy(&curcpu()->ci_gdt[GUFS_SEL], &sd, sizeof(sd));
    624  1.18    ad 		}
    625   1.6    ad 	} else /* which == 'g' */ {
    626  1.18    ad 		memcpy(&pcb->pcb_gsd, &sd, sizeof(sd));
    627  1.18    ad 		if (l == curlwp) {
    628  1.18    ad 			memcpy(&curcpu()->ci_gdt[GUGS_SEL], &sd, sizeof(sd));
    629  1.18    ad 		}
    630   1.6    ad 	}
    631  1.13    ad 	kpreempt_enable();
    632   1.5    ad 
    633   1.5    ad 	return 0;
    634   1.5    ad #else
    635   1.5    ad 	return EINVAL;
    636   1.5    ad #endif
    637   1.5    ad }
    638   1.5    ad 
    639   1.5    ad int
    640   1.5    ad x86_get_sdbase(void *arg, char which)
    641   1.5    ad {
    642   1.5    ad #ifdef i386
    643   1.5    ad 	struct segment_descriptor *sd;
    644   1.5    ad 	vaddr_t base;
    645   1.5    ad 
    646   1.5    ad 	switch (which) {
    647   1.5    ad 	case 'f':
    648   1.6    ad 		sd = (struct segment_descriptor *)&curpcb->pcb_fsd;
    649   1.5    ad 		break;
    650   1.5    ad 	case 'g':
    651   1.6    ad 		sd = (struct segment_descriptor *)&curpcb->pcb_gsd;
    652   1.5    ad 		break;
    653   1.5    ad 	default:
    654   1.5    ad 		panic("x86_get_sdbase");
    655   1.5    ad 	}
    656   1.5    ad 
    657   1.5    ad 	base = sd->sd_hibase << 24 | sd->sd_lobase;
    658   1.5    ad 	return copyout(&base, &arg, sizeof(base));
    659   1.5    ad #else
    660   1.5    ad 	return EINVAL;
    661   1.5    ad #endif
    662   1.5    ad }
    663   1.5    ad 
    664   1.5    ad int
    665   1.8   dsl sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
    666   1.1    ad {
    667   1.8   dsl 	/* {
    668   1.1    ad 		syscallarg(int) op;
    669   1.1    ad 		syscallarg(void *) parms;
    670   1.8   dsl 	} */
    671   1.1    ad 	int error = 0;
    672   1.1    ad 
    673   1.1    ad 	switch(SCARG(uap, op)) {
    674   1.1    ad 	case X86_IOPL:
    675   1.1    ad 		error = x86_iopl(l, SCARG(uap, parms), retval);
    676   1.1    ad 		break;
    677   1.1    ad 
    678   1.1    ad 	case X86_GET_LDT:
    679   1.1    ad 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
    680   1.1    ad 		break;
    681   1.1    ad 
    682   1.1    ad 	case X86_SET_LDT:
    683   1.1    ad 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
    684   1.1    ad 		break;
    685   1.1    ad 
    686   1.1    ad 	case X86_GET_IOPERM:
    687   1.1    ad 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
    688   1.1    ad 		break;
    689   1.1    ad 
    690   1.1    ad 	case X86_SET_IOPERM:
    691   1.1    ad 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
    692   1.1    ad 		break;
    693   1.1    ad 
    694   1.1    ad 	case X86_GET_MTRR:
    695   1.1    ad 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
    696   1.1    ad 		break;
    697   1.1    ad 	case X86_SET_MTRR:
    698   1.1    ad 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
    699   1.1    ad 		break;
    700   1.1    ad 
    701   1.1    ad #ifdef VM86
    702   1.1    ad 	case X86_VM86:
    703   1.1    ad 		error = x86_vm86(l, SCARG(uap, parms), retval);
    704   1.1    ad 		break;
    705   1.1    ad 	case X86_OLD_VM86:
    706   1.1    ad 		error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
    707   1.1    ad 		break;
    708   1.1    ad #endif
    709   1.1    ad 
    710   1.1    ad #ifdef PERFCTRS
    711   1.1    ad 	case X86_PMC_INFO:
    712  1.12    ad 		KERNEL_LOCK(1, NULL);
    713   1.1    ad 		error = pmc_info(l, SCARG(uap, parms), retval);
    714  1.12    ad 		KERNEL_UNLOCK_ONE(NULL);
    715   1.1    ad 		break;
    716   1.1    ad 
    717   1.1    ad 	case X86_PMC_STARTSTOP:
    718  1.12    ad 		KERNEL_LOCK(1, NULL);
    719   1.1    ad 		error = pmc_startstop(l, SCARG(uap, parms), retval);
    720  1.12    ad 		KERNEL_UNLOCK_ONE(NULL);
    721   1.1    ad 		break;
    722   1.1    ad 
    723   1.1    ad 	case X86_PMC_READ:
    724  1.12    ad 		KERNEL_LOCK(1, NULL);
    725   1.1    ad 		error = pmc_read(l, SCARG(uap, parms), retval);
    726  1.12    ad 		KERNEL_UNLOCK_ONE(NULL);
    727   1.1    ad 		break;
    728   1.1    ad #endif
    729   1.1    ad 
    730   1.5    ad 	case X86_SET_FSBASE:
    731  1.18    ad 		error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
    732   1.5    ad 		break;
    733   1.5    ad 
    734   1.5    ad 	case X86_SET_GSBASE:
    735  1.18    ad 		error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
    736   1.5    ad 		break;
    737   1.5    ad 
    738   1.5    ad 	case X86_GET_FSBASE:
    739   1.5    ad 		error = x86_get_sdbase(SCARG(uap, parms), 'f');
    740   1.5    ad 		break;
    741   1.5    ad 
    742   1.5    ad 	case X86_GET_GSBASE:
    743   1.5    ad 		error = x86_get_sdbase(SCARG(uap, parms), 'g');
    744   1.5    ad 		break;
    745   1.5    ad 
    746   1.1    ad 	default:
    747   1.1    ad 		error = EINVAL;
    748   1.1    ad 		break;
    749   1.1    ad 	}
    750   1.1    ad 	return (error);
    751   1.1    ad }
    752  1.18    ad 
    753  1.18    ad int
    754  1.18    ad cpu_lwp_setprivate(lwp_t *l, void *addr)
    755  1.18    ad {
    756  1.18    ad 
    757  1.18    ad 	return x86_set_sdbase(addr, 'g', l, true);
    758  1.18    ad }
    759