Home | History | Annotate | Line # | Download | only in x86
sys_machdep.c revision 1.49
      1  1.49  riastrad /*	$NetBSD: sys_machdep.c,v 1.49 2018/09/03 16:29:29 riastradh Exp $	*/
      2   1.1        ad 
      3  1.38      maxv /*
      4  1.38      maxv  * Copyright (c) 1998, 2007, 2009, 2017 The NetBSD Foundation, Inc.
      5   1.1        ad  * All rights reserved.
      6   1.1        ad  *
      7   1.1        ad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.38      maxv  * by Charles M. Hannum, by Andrew Doran, and by Maxime Villard.
      9   1.1        ad  *
     10   1.1        ad  * Redistribution and use in source and binary forms, with or without
     11   1.1        ad  * modification, are permitted provided that the following conditions
     12   1.1        ad  * are met:
     13   1.1        ad  * 1. Redistributions of source code must retain the above copyright
     14   1.1        ad  *    notice, this list of conditions and the following disclaimer.
     15   1.1        ad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1        ad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1        ad  *    documentation and/or other materials provided with the distribution.
     18   1.1        ad  *
     19   1.1        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1        ad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1        ad  */
     31   1.1        ad 
     32   1.1        ad #include <sys/cdefs.h>
     33  1.49  riastrad __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.49 2018/09/03 16:29:29 riastradh Exp $");
     34   1.1        ad 
     35   1.1        ad #include "opt_mtrr.h"
     36   1.1        ad #include "opt_user_ldt.h"
     37  1.27  christos #include "opt_compat_netbsd.h"
     38   1.1        ad #include "opt_xen.h"
     39   1.1        ad 
     40   1.1        ad #include <sys/param.h>
     41   1.1        ad #include <sys/systm.h>
     42   1.1        ad #include <sys/ioctl.h>
     43   1.1        ad #include <sys/file.h>
     44   1.1        ad #include <sys/time.h>
     45   1.1        ad #include <sys/proc.h>
     46   1.1        ad #include <sys/uio.h>
     47   1.1        ad #include <sys/kernel.h>
     48   1.1        ad #include <sys/buf.h>
     49   1.1        ad #include <sys/signal.h>
     50   1.1        ad #include <sys/malloc.h>
     51   1.9      yamt #include <sys/kmem.h>
     52   1.1        ad #include <sys/kauth.h>
     53  1.17        ad #include <sys/cpu.h>
     54   1.1        ad #include <sys/mount.h>
     55   1.1        ad #include <sys/syscallargs.h>
     56   1.1        ad 
     57   1.1        ad #include <uvm/uvm_extern.h>
     58   1.1        ad 
     59   1.1        ad #include <machine/cpufunc.h>
     60   1.1        ad #include <machine/gdt.h>
     61   1.1        ad #include <machine/psl.h>
     62   1.1        ad #include <machine/reg.h>
     63   1.1        ad #include <machine/sysarch.h>
     64   1.1        ad #include <machine/mtrr.h>
     65   1.1        ad 
     66   1.1        ad #ifdef __x86_64__
     67  1.40      maxv #undef	IOPERM	/* not implemented */
     68   1.1        ad #else
     69   1.9      yamt #if defined(XEN)
     70   1.9      yamt #undef	IOPERM
     71   1.9      yamt #else /* defined(XEN) */
     72   1.1        ad #define	IOPERM
     73   1.9      yamt #endif /* defined(XEN) */
     74   1.1        ad #endif
     75   1.1        ad 
     76  1.35      maxv #ifdef XEN
     77  1.40      maxv #undef	USER_LDT
     78  1.35      maxv #endif
     79  1.35      maxv 
     80   1.1        ad extern struct vm_map *kernel_map;
     81   1.1        ad 
     82   1.1        ad int x86_get_ioperm(struct lwp *, void *, register_t *);
     83   1.1        ad int x86_set_ioperm(struct lwp *, void *, register_t *);
     84   1.1        ad int x86_get_mtrr(struct lwp *, void *, register_t *);
     85   1.1        ad int x86_set_mtrr(struct lwp *, void *, register_t *);
     86  1.24       chs int x86_set_sdbase32(void *, char, lwp_t *, bool);
     87  1.18        ad int x86_set_sdbase(void *, char, lwp_t *, bool);
     88  1.24       chs int x86_get_sdbase32(void *, char);
     89  1.18        ad int x86_get_sdbase(void *, char);
     90   1.1        ad 
     91   1.1        ad int
     92   1.1        ad x86_get_ldt(struct lwp *l, void *args, register_t *retval)
     93   1.1        ad {
     94   1.2       dsl #ifndef USER_LDT
     95   1.2       dsl 	return EINVAL;
     96   1.2       dsl #else
     97   1.2       dsl 	struct x86_get_ldt_args ua;
     98   1.2       dsl 	union descriptor *cp;
     99   1.2       dsl 	int error;
    100   1.2       dsl 
    101   1.2       dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    102   1.2       dsl 		return error;
    103   1.2       dsl 
    104   1.2       dsl 	if (ua.num < 0 || ua.num > 8192)
    105   1.2       dsl 		return EINVAL;
    106   1.2       dsl 
    107   1.2       dsl 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
    108   1.2       dsl 	if (cp == NULL)
    109   1.2       dsl 		return ENOMEM;
    110   1.2       dsl 
    111   1.2       dsl 	error = x86_get_ldt1(l, &ua, cp);
    112   1.2       dsl 	*retval = ua.num;
    113   1.2       dsl 	if (error == 0)
    114   1.2       dsl 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
    115   1.2       dsl 
    116   1.2       dsl 	free(cp, M_TEMP);
    117   1.2       dsl 	return error;
    118   1.2       dsl #endif
    119   1.2       dsl }
    120   1.2       dsl 
    121   1.2       dsl int
    122   1.2       dsl x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
    123   1.2       dsl {
    124   1.2       dsl #ifndef USER_LDT
    125   1.2       dsl 	return EINVAL;
    126   1.2       dsl #else
    127   1.1        ad 	int error;
    128   1.1        ad 	struct proc *p = l->l_proc;
    129   1.1        ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    130   1.1        ad 	int nldt, num;
    131   1.2       dsl 	union descriptor *lp;
    132   1.1        ad 
    133  1.38      maxv #ifdef __x86_64__
    134  1.38      maxv 	const size_t min_ldt_size = LDT_SIZE;
    135  1.38      maxv #else
    136  1.38      maxv 	const size_t min_ldt_size = NLDT * sizeof(union descriptor);
    137  1.38      maxv #endif
    138  1.38      maxv 
    139   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
    140   1.1        ad 	    NULL, NULL, NULL, NULL);
    141   1.1        ad 	if (error)
    142  1.38      maxv 		return error;
    143   1.1        ad 
    144   1.2       dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    145   1.2       dsl 	    ua->start + ua->num > 8192)
    146  1.38      maxv 		return EINVAL;
    147   1.1        ad 
    148  1.38      maxv 	if (ua->start * sizeof(union descriptor) < min_ldt_size)
    149  1.32      maxv 		return EINVAL;
    150  1.32      maxv 
    151  1.17        ad 	mutex_enter(&cpu_lock);
    152   1.1        ad 
    153  1.17        ad 	if (pmap->pm_ldt != NULL) {
    154  1.17        ad 		nldt = pmap->pm_ldt_len / sizeof(*lp);
    155   1.1        ad 		lp = pmap->pm_ldt;
    156   1.1        ad 	} else {
    157  1.32      maxv #ifdef __x86_64__
    158  1.32      maxv 		nldt = LDT_SIZE / sizeof(*lp);
    159  1.32      maxv #else
    160   1.1        ad 		nldt = NLDT;
    161  1.32      maxv #endif
    162  1.32      maxv 		lp = (union descriptor *)ldtstore;
    163   1.1        ad 	}
    164   1.1        ad 
    165   1.2       dsl 	if (ua->start > nldt) {
    166  1.17        ad 		mutex_exit(&cpu_lock);
    167  1.38      maxv 		return EINVAL;
    168   1.1        ad 	}
    169   1.1        ad 
    170   1.2       dsl 	lp += ua->start;
    171  1.49  riastrad 	num = uimin(ua->num, nldt - ua->start);
    172   1.2       dsl 	ua->num = num;
    173   1.1        ad 
    174   1.1        ad 	memcpy(cp, lp, num * sizeof(union descriptor));
    175  1.17        ad 	mutex_exit(&cpu_lock);
    176   1.1        ad 
    177   1.2       dsl 	return 0;
    178   1.2       dsl #endif
    179   1.2       dsl }
    180   1.2       dsl 
    181   1.2       dsl int
    182   1.2       dsl x86_set_ldt(struct lwp *l, void *args, register_t *retval)
    183   1.2       dsl {
    184   1.2       dsl #ifndef USER_LDT
    185   1.2       dsl 	return EINVAL;
    186   1.2       dsl #else
    187   1.2       dsl 	struct x86_set_ldt_args ua;
    188   1.2       dsl 	union descriptor *descv;
    189   1.2       dsl 	int error;
    190   1.2       dsl 
    191   1.2       dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    192  1.38      maxv 		return error;
    193   1.2       dsl 
    194   1.2       dsl 	if (ua.num < 0 || ua.num > 8192)
    195   1.2       dsl 		return EINVAL;
    196   1.2       dsl 
    197   1.2       dsl 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    198   1.2       dsl 	if (descv == NULL)
    199   1.2       dsl 		return ENOMEM;
    200   1.2       dsl 
    201   1.2       dsl 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
    202   1.1        ad 	if (error == 0)
    203   1.2       dsl 		error = x86_set_ldt1(l, &ua, descv);
    204   1.2       dsl 	*retval = ua.start;
    205   1.1        ad 
    206   1.2       dsl 	free(descv, M_TEMP);
    207   1.2       dsl 	return error;
    208   1.1        ad #endif
    209   1.1        ad }
    210   1.1        ad 
    211   1.1        ad int
    212   1.2       dsl x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
    213   1.2       dsl     union descriptor *descv)
    214   1.1        ad {
    215   1.2       dsl #ifndef USER_LDT
    216   1.2       dsl 	return EINVAL;
    217   1.2       dsl #else
    218  1.17        ad 	int error, i, n, old_sel, new_sel;
    219   1.1        ad 	struct proc *p = l->l_proc;
    220   1.1        ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    221  1.17        ad 	size_t old_len, new_len;
    222  1.17        ad 	union descriptor *old_ldt, *new_ldt;
    223   1.1        ad 
    224  1.32      maxv #ifdef __x86_64__
    225  1.32      maxv 	const size_t min_ldt_size = LDT_SIZE;
    226  1.32      maxv #else
    227  1.32      maxv 	const size_t min_ldt_size = NLDT * sizeof(union descriptor);
    228  1.32      maxv #endif
    229  1.32      maxv 
    230   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
    231   1.1        ad 	    NULL, NULL, NULL, NULL);
    232   1.1        ad 	if (error)
    233  1.38      maxv 		return error;
    234   1.1        ad 
    235   1.2       dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    236   1.2       dsl 	    ua->start + ua->num > 8192)
    237  1.38      maxv 		return EINVAL;
    238   1.1        ad 
    239  1.38      maxv 	if (ua->start * sizeof(union descriptor) < min_ldt_size)
    240  1.32      maxv 		return EINVAL;
    241  1.32      maxv 
    242   1.1        ad 	/* Check descriptors for access violations. */
    243   1.2       dsl 	for (i = 0; i < ua->num; i++) {
    244   1.1        ad 		union descriptor *desc = &descv[i];
    245   1.1        ad 
    246  1.43      maxv #ifdef __x86_64__
    247  1.43      maxv 		if (desc->sd.sd_long != 0)
    248  1.43      maxv 			return EACCES;
    249  1.43      maxv #endif
    250  1.43      maxv 
    251   1.1        ad 		switch (desc->sd.sd_type) {
    252   1.1        ad 		case SDT_SYSNULL:
    253   1.1        ad 			desc->sd.sd_p = 0;
    254   1.1        ad 			break;
    255   1.1        ad 		case SDT_MEMEC:
    256   1.1        ad 		case SDT_MEMEAC:
    257   1.1        ad 		case SDT_MEMERC:
    258   1.1        ad 		case SDT_MEMERAC:
    259   1.1        ad 			/* Must be "present" if executable and conforming. */
    260   1.2       dsl 			if (desc->sd.sd_p == 0)
    261   1.2       dsl 				return EACCES;
    262   1.1        ad 			break;
    263   1.1        ad 		case SDT_MEMRO:
    264   1.1        ad 		case SDT_MEMROA:
    265   1.1        ad 		case SDT_MEMRW:
    266   1.1        ad 		case SDT_MEMRWA:
    267   1.1        ad 		case SDT_MEMROD:
    268   1.1        ad 		case SDT_MEMRODA:
    269   1.1        ad 		case SDT_MEMRWD:
    270   1.1        ad 		case SDT_MEMRWDA:
    271   1.1        ad 		case SDT_MEME:
    272   1.1        ad 		case SDT_MEMEA:
    273   1.1        ad 		case SDT_MEMER:
    274   1.1        ad 		case SDT_MEMERA:
    275   1.1        ad 			break;
    276   1.1        ad 		default:
    277  1.38      maxv 			return EACCES;
    278   1.1        ad 		}
    279   1.1        ad 
    280   1.1        ad 		if (desc->sd.sd_p != 0) {
    281   1.1        ad 			/* Only user (ring-3) descriptors may be present. */
    282   1.2       dsl 			if (desc->sd.sd_dpl != SEL_UPL)
    283   1.2       dsl 				return EACCES;
    284   1.1        ad 		}
    285   1.1        ad 	}
    286   1.1        ad 
    287  1.17        ad 	/*
    288  1.17        ad 	 * Install selected changes.  We perform a copy, write, swap dance
    289  1.17        ad 	 * here to ensure that all updates happen atomically.
    290  1.17        ad 	 */
    291  1.17        ad 
    292  1.17        ad 	/* Allocate a new LDT. */
    293  1.17        ad 	for (;;) {
    294  1.17        ad 		new_len = (ua->start + ua->num) * sizeof(union descriptor);
    295  1.49  riastrad 		new_len = uimax(new_len, pmap->pm_ldt_len);
    296  1.49  riastrad 		new_len = uimax(new_len, min_ldt_size);
    297  1.17        ad 		new_len = round_page(new_len);
    298   1.1        ad 		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
    299  1.30  dholland 		    new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA);
    300  1.17        ad 		mutex_enter(&cpu_lock);
    301  1.17        ad 		if (pmap->pm_ldt_len <= new_len) {
    302  1.17        ad 			break;
    303   1.1        ad 		}
    304  1.17        ad 		mutex_exit(&cpu_lock);
    305  1.17        ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    306  1.17        ad 		    UVM_KMF_WIRED);
    307  1.17        ad 	}
    308   1.1        ad 
    309  1.17        ad 	/* Copy existing entries, if any. */
    310  1.17        ad 	if (pmap->pm_ldt != NULL) {
    311   1.1        ad 		old_ldt = pmap->pm_ldt;
    312  1.17        ad 		old_len = pmap->pm_ldt_len;
    313  1.17        ad 		old_sel = pmap->pm_ldt_sel;
    314   1.1        ad 		memcpy(new_ldt, old_ldt, old_len);
    315  1.17        ad 	} else {
    316  1.17        ad 		old_ldt = NULL;
    317  1.17        ad 		old_len = 0;
    318  1.17        ad 		old_sel = -1;
    319  1.32      maxv 		memcpy(new_ldt, ldtstore, min_ldt_size);
    320  1.17        ad 	}
    321   1.1        ad 
    322  1.17        ad 	/* Apply requested changes. */
    323  1.17        ad 	for (i = 0, n = ua->start; i < ua->num; i++, n++) {
    324  1.17        ad 		new_ldt[n] = descv[i];
    325  1.17        ad 	}
    326   1.1        ad 
    327  1.17        ad 	/* Allocate LDT selector. */
    328  1.17        ad 	new_sel = ldt_alloc(new_ldt, new_len);
    329  1.17        ad 	if (new_sel == -1) {
    330  1.17        ad 		mutex_exit(&cpu_lock);
    331   1.1        ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    332   1.1        ad 		    UVM_KMF_WIRED);
    333  1.17        ad 		return ENOMEM;
    334  1.17        ad 	}
    335  1.17        ad 
    336  1.17        ad 	/* All changes are now globally visible.  Swap in the new LDT. */
    337  1.17        ad 	pmap->pm_ldt_len = new_len;
    338  1.17        ad 	pmap->pm_ldt_sel = new_sel;
    339  1.30  dholland 	/* membar_store_store for pmap_fork() to read these unlocked safely */
    340  1.30  dholland 	membar_producer();
    341  1.30  dholland 	pmap->pm_ldt = new_ldt;
    342  1.17        ad 
    343  1.17        ad 	/* Switch existing users onto new LDT. */
    344  1.17        ad 	pmap_ldt_sync(pmap);
    345  1.17        ad 
    346  1.17        ad 	/* Free existing LDT (if any). */
    347  1.17        ad 	if (old_ldt != NULL) {
    348  1.17        ad 		ldt_free(old_sel);
    349  1.30  dholland 		/* exit the mutex before free */
    350  1.30  dholland 		mutex_exit(&cpu_lock);
    351  1.17        ad 		uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len,
    352   1.1        ad 		    UVM_KMF_WIRED);
    353  1.30  dholland 	} else {
    354  1.30  dholland 		mutex_exit(&cpu_lock);
    355  1.17        ad 	}
    356   1.2       dsl 
    357  1.17        ad 	return error;
    358   1.1        ad #endif
    359   1.1        ad }
    360   1.1        ad 
    361   1.1        ad int
    362   1.1        ad x86_iopl(struct lwp *l, void *args, register_t *retval)
    363   1.1        ad {
    364   1.1        ad 	int error;
    365   1.1        ad 	struct x86_iopl_args ua;
    366   1.1        ad #ifdef XEN
    367   1.9      yamt 	int iopl;
    368   1.1        ad #else
    369   1.1        ad 	struct trapframe *tf = l->l_md.md_regs;
    370   1.1        ad #endif
    371   1.1        ad 
    372   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
    373   1.1        ad 	    NULL, NULL, NULL, NULL);
    374   1.1        ad 	if (error)
    375  1.38      maxv 		return error;
    376   1.1        ad 
    377   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    378   1.1        ad 		return error;
    379   1.1        ad 
    380   1.1        ad #ifdef XEN
    381   1.9      yamt 	if (ua.iopl)
    382   1.9      yamt 		iopl = SEL_UPL;
    383   1.9      yamt 	else
    384   1.9      yamt 		iopl = SEL_KPL;
    385  1.22     rmind 
    386  1.22     rmind     {
    387  1.22     rmind 	struct physdev_op physop;
    388  1.22     rmind 	struct pcb *pcb;
    389  1.22     rmind 
    390  1.22     rmind 	pcb = lwp_getpcb(l);
    391  1.22     rmind 	pcb->pcb_iopl = iopl;
    392  1.22     rmind 
    393   1.1        ad 	/* Force the change at ring 0. */
    394  1.22     rmind 	physop.cmd = PHYSDEVOP_SET_IOPL;
    395  1.22     rmind 	physop.u.set_iopl.iopl = iopl;
    396  1.22     rmind 	HYPERVISOR_physdev_op(&physop);
    397  1.22     rmind     }
    398   1.1        ad #elif defined(__x86_64__)
    399   1.1        ad 	if (ua.iopl)
    400   1.1        ad 		tf->tf_rflags |= PSL_IOPL;
    401   1.1        ad 	else
    402   1.1        ad 		tf->tf_rflags &= ~PSL_IOPL;
    403   1.1        ad #else
    404   1.1        ad 	if (ua.iopl)
    405   1.1        ad 		tf->tf_eflags |= PSL_IOPL;
    406   1.1        ad 	else
    407   1.1        ad 		tf->tf_eflags &= ~PSL_IOPL;
    408   1.1        ad #endif
    409   1.1        ad 
    410   1.1        ad 	return 0;
    411   1.1        ad }
    412   1.1        ad 
    413   1.1        ad int
    414   1.1        ad x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
    415   1.1        ad {
    416   1.1        ad #ifdef IOPERM
    417   1.1        ad 	int error;
    418  1.22     rmind 	struct pcb *pcb = lwp_getpcb(l);
    419   1.1        ad 	struct x86_get_ioperm_args ua;
    420   1.9      yamt 	void *dummymap = NULL;
    421   1.9      yamt 	void *iomap;
    422   1.1        ad 
    423   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
    424   1.1        ad 	    NULL, NULL, NULL, NULL);
    425   1.1        ad 	if (error)
    426  1.38      maxv 		return error;
    427   1.1        ad 
    428   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    429  1.38      maxv 		return error;
    430   1.1        ad 
    431   1.9      yamt 	iomap = pcb->pcb_iomap;
    432   1.9      yamt 	if (iomap == NULL) {
    433   1.9      yamt 		iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    434   1.9      yamt 		memset(dummymap, 0xff, IOMAPSIZE);
    435   1.9      yamt 	}
    436   1.9      yamt 	error = copyout(iomap, ua.iomap, IOMAPSIZE);
    437   1.9      yamt 	if (dummymap != NULL) {
    438   1.9      yamt 		kmem_free(dummymap, IOMAPSIZE);
    439   1.9      yamt 	}
    440   1.9      yamt 	return error;
    441   1.1        ad #else
    442   1.1        ad 	return EINVAL;
    443   1.1        ad #endif
    444   1.1        ad }
    445   1.1        ad 
    446   1.1        ad int
    447   1.1        ad x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
    448   1.1        ad {
    449   1.1        ad #ifdef IOPERM
    450   1.9      yamt 	struct cpu_info *ci;
    451   1.1        ad 	int error;
    452  1.22     rmind 	struct pcb *pcb = lwp_getpcb(l);
    453   1.1        ad 	struct x86_set_ioperm_args ua;
    454   1.9      yamt 	void *new;
    455   1.9      yamt 	void *old;
    456   1.1        ad 
    457   1.1        ad   	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
    458   1.1        ad 	    NULL, NULL, NULL, NULL);
    459   1.1        ad 	if (error)
    460  1.38      maxv 		return error;
    461   1.1        ad 
    462   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    463  1.38      maxv 		return error;
    464   1.1        ad 
    465   1.9      yamt 	new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    466   1.9      yamt 	error = copyin(ua.iomap, new, IOMAPSIZE);
    467   1.9      yamt 	if (error) {
    468   1.9      yamt 		kmem_free(new, IOMAPSIZE);
    469   1.9      yamt 		return error;
    470   1.9      yamt 	}
    471   1.9      yamt 	old = pcb->pcb_iomap;
    472   1.9      yamt 	pcb->pcb_iomap = new;
    473   1.9      yamt 	if (old != NULL) {
    474   1.9      yamt 		kmem_free(old, IOMAPSIZE);
    475   1.9      yamt 	}
    476   1.9      yamt 
    477  1.46      maxv 	CTASSERT(offsetof(struct cpu_tss, iomap) -
    478  1.46      maxv 	    offsetof(struct cpu_tss, tss) == IOMAP_VALIDOFF);
    479  1.46      maxv 
    480  1.13        ad 	kpreempt_disable();
    481   1.9      yamt 	ci = curcpu();
    482  1.45      maxv 	memcpy(ci->ci_tss->iomap, pcb->pcb_iomap, IOMAPSIZE);
    483  1.46      maxv 	ci->ci_tss->tss.tss_iobase = IOMAP_VALIDOFF << 16;
    484  1.13        ad 	kpreempt_enable();
    485   1.9      yamt 
    486   1.9      yamt 	return error;
    487   1.1        ad #else
    488   1.1        ad 	return EINVAL;
    489   1.1        ad #endif
    490   1.1        ad }
    491   1.1        ad 
    492   1.1        ad int
    493   1.1        ad x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
    494   1.1        ad {
    495   1.1        ad #ifdef MTRR
    496   1.1        ad 	struct x86_get_mtrr_args ua;
    497   1.1        ad 	int error, n;
    498   1.1        ad 
    499   1.1        ad 	if (mtrr_funcs == NULL)
    500   1.1        ad 		return ENOSYS;
    501   1.1        ad 
    502   1.1        ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
    503   1.1        ad 	    NULL, NULL, NULL, NULL);
    504   1.1        ad 	if (error)
    505  1.38      maxv 		return error;
    506   1.1        ad 
    507   1.1        ad 	error = copyin(args, &ua, sizeof ua);
    508   1.1        ad 	if (error != 0)
    509   1.1        ad 		return error;
    510   1.1        ad 
    511   1.1        ad 	error = copyin(ua.n, &n, sizeof n);
    512   1.1        ad 	if (error != 0)
    513   1.1        ad 		return error;
    514   1.1        ad 
    515  1.12        ad 	KERNEL_LOCK(1, NULL);
    516   1.1        ad 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    517  1.12        ad 	KERNEL_UNLOCK_ONE(NULL);
    518   1.1        ad 
    519   1.1        ad 	copyout(&n, ua.n, sizeof (int));
    520   1.1        ad 
    521   1.1        ad 	return error;
    522   1.1        ad #else
    523   1.1        ad 	return EINVAL;
    524   1.1        ad #endif
    525   1.1        ad }
    526   1.1        ad 
    527   1.1        ad int
    528   1.1        ad x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
    529   1.1        ad {
    530   1.1        ad #ifdef MTRR
    531   1.1        ad 	int error, n;
    532   1.1        ad 	struct x86_set_mtrr_args ua;
    533   1.1        ad 
    534   1.1        ad 	if (mtrr_funcs == NULL)
    535   1.1        ad 		return ENOSYS;
    536   1.1        ad 
    537   1.1        ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
    538   1.1        ad 	    NULL, NULL, NULL, NULL);
    539   1.1        ad 	if (error)
    540  1.38      maxv 		return error;
    541   1.1        ad 
    542   1.1        ad 	error = copyin(args, &ua, sizeof ua);
    543   1.1        ad 	if (error != 0)
    544   1.1        ad 		return error;
    545   1.1        ad 
    546   1.1        ad 	error = copyin(ua.n, &n, sizeof n);
    547   1.1        ad 	if (error != 0)
    548   1.1        ad 		return error;
    549   1.1        ad 
    550  1.12        ad 	KERNEL_LOCK(1, NULL);
    551   1.1        ad 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    552   1.1        ad 	if (n != 0)
    553   1.1        ad 		mtrr_commit();
    554  1.12        ad 	KERNEL_UNLOCK_ONE(NULL);
    555   1.1        ad 
    556   1.1        ad 	copyout(&n, ua.n, sizeof n);
    557   1.1        ad 
    558   1.1        ad 	return error;
    559   1.1        ad #else
    560   1.1        ad 	return EINVAL;
    561   1.1        ad #endif
    562   1.1        ad }
    563   1.1        ad 
    564  1.24       chs #ifdef __x86_64__
    565  1.24       chs #define pcb_fsd pcb_fs
    566  1.24       chs #define pcb_gsd pcb_gs
    567  1.24       chs #define segment_descriptor mem_segment_descriptor
    568  1.24       chs #endif
    569  1.24       chs 
    570   1.1        ad int
    571  1.24       chs x86_set_sdbase32(void *arg, char which, lwp_t *l, bool direct)
    572   1.5        ad {
    573  1.24       chs 	struct trapframe *tf = l->l_md.md_regs;
    574  1.24       chs 	union descriptor usd;
    575  1.18        ad 	struct pcb *pcb;
    576  1.24       chs 	uint32_t base;
    577   1.6        ad 	int error;
    578   1.5        ad 
    579  1.18        ad 	if (direct) {
    580  1.18        ad 		base = (vaddr_t)arg;
    581  1.18        ad 	} else {
    582  1.18        ad 		error = copyin(arg, &base, sizeof(base));
    583  1.18        ad 		if (error != 0)
    584  1.18        ad 			return error;
    585  1.18        ad 	}
    586   1.5        ad 
    587  1.24       chs 	memset(&usd, 0, sizeof(usd));
    588  1.19    bouyer 	usd.sd.sd_lobase = base & 0xffffff;
    589  1.19    bouyer 	usd.sd.sd_hibase = (base >> 24) & 0xff;
    590  1.19    bouyer 	usd.sd.sd_lolimit = 0xffff;
    591  1.19    bouyer 	usd.sd.sd_hilimit = 0xf;
    592  1.19    bouyer 	usd.sd.sd_type = SDT_MEMRWA;
    593  1.19    bouyer 	usd.sd.sd_dpl = SEL_UPL;
    594  1.19    bouyer 	usd.sd.sd_p = 1;
    595  1.19    bouyer 	usd.sd.sd_def32 = 1;
    596  1.19    bouyer 	usd.sd.sd_gran = 1;
    597   1.6        ad 
    598  1.24       chs 	pcb = lwp_getpcb(l);
    599  1.13        ad 	kpreempt_disable();
    600   1.6        ad 	if (which == 'f') {
    601  1.19    bouyer 		memcpy(&pcb->pcb_fsd, &usd.sd,
    602  1.19    bouyer 		    sizeof(struct segment_descriptor));
    603  1.18        ad 		if (l == curlwp) {
    604  1.19    bouyer 			update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd);
    605  1.18        ad 		}
    606  1.24       chs 		tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
    607   1.6        ad 	} else /* which == 'g' */ {
    608  1.19    bouyer 		memcpy(&pcb->pcb_gsd, &usd.sd,
    609  1.19    bouyer 		    sizeof(struct segment_descriptor));
    610  1.18        ad 		if (l == curlwp) {
    611  1.19    bouyer 			update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd);
    612  1.42      maxv #if defined(__x86_64__) && defined(XEN)
    613  1.24       chs 			setusergs(GSEL(GUGS_SEL, SEL_UPL));
    614  1.24       chs #endif
    615  1.18        ad 		}
    616  1.24       chs 		tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
    617   1.6        ad 	}
    618  1.13        ad 	kpreempt_enable();
    619  1.24       chs 	return 0;
    620  1.24       chs }
    621   1.5        ad 
    622  1.24       chs int
    623  1.24       chs x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
    624  1.24       chs {
    625  1.24       chs #ifdef i386
    626  1.24       chs 	return x86_set_sdbase32(arg, which, l, direct);
    627   1.5        ad #else
    628  1.24       chs 	struct pcb *pcb;
    629  1.24       chs 	vaddr_t base;
    630  1.24       chs 
    631  1.24       chs 	if (l->l_proc->p_flag & PK_32) {
    632  1.24       chs 		return x86_set_sdbase32(arg, which, l, direct);
    633  1.24       chs 	}
    634  1.24       chs 
    635  1.24       chs 	if (direct) {
    636  1.24       chs 		base = (vaddr_t)arg;
    637  1.24       chs 	} else {
    638  1.29  christos 		int error = copyin(arg, &base, sizeof(base));
    639  1.24       chs 		if (error != 0)
    640  1.24       chs 			return error;
    641  1.24       chs 	}
    642  1.24       chs 
    643  1.24       chs 	if (base >= VM_MAXUSER_ADDRESS)
    644  1.24       chs 		return EINVAL;
    645  1.24       chs 
    646  1.24       chs 	pcb = lwp_getpcb(l);
    647  1.24       chs 
    648  1.24       chs 	kpreempt_disable();
    649  1.24       chs 	switch(which) {
    650  1.24       chs 	case 'f':
    651  1.24       chs 		pcb->pcb_fs = base;
    652  1.24       chs 		if (l == curlwp)
    653  1.24       chs 			wrmsr(MSR_FSBASE, pcb->pcb_fs);
    654  1.24       chs 		break;
    655  1.24       chs 	case 'g':
    656  1.24       chs 		pcb->pcb_gs = base;
    657  1.24       chs 		if (l == curlwp)
    658  1.24       chs 			wrmsr(MSR_KERNELGSBASE, pcb->pcb_gs);
    659  1.24       chs 		break;
    660  1.24       chs 	default:
    661  1.28  dholland 		panic("x86_set_sdbase");
    662  1.24       chs 	}
    663  1.24       chs 	kpreempt_enable();
    664  1.24       chs 
    665  1.29  christos 	return 0;
    666   1.5        ad #endif
    667   1.5        ad }
    668   1.5        ad 
    669   1.5        ad int
    670  1.24       chs x86_get_sdbase32(void *arg, char which)
    671   1.5        ad {
    672   1.5        ad 	struct segment_descriptor *sd;
    673  1.24       chs 	uint32_t base;
    674   1.5        ad 
    675   1.5        ad 	switch (which) {
    676   1.5        ad 	case 'f':
    677  1.24       chs 		sd = (void *)&curpcb->pcb_fsd;
    678   1.5        ad 		break;
    679   1.5        ad 	case 'g':
    680  1.24       chs 		sd = (void *)&curpcb->pcb_gsd;
    681   1.5        ad 		break;
    682   1.5        ad 	default:
    683  1.28  dholland 		panic("x86_get_sdbase32");
    684   1.5        ad 	}
    685   1.5        ad 
    686   1.5        ad 	base = sd->sd_hibase << 24 | sd->sd_lobase;
    687  1.21      yamt 	return copyout(&base, arg, sizeof(base));
    688  1.24       chs }
    689  1.24       chs 
    690  1.24       chs int
    691  1.24       chs x86_get_sdbase(void *arg, char which)
    692  1.24       chs {
    693  1.24       chs #ifdef i386
    694  1.24       chs 	return x86_get_sdbase32(arg, which);
    695   1.5        ad #else
    696  1.24       chs 	vaddr_t base;
    697  1.24       chs 	struct pcb *pcb;
    698  1.24       chs 
    699  1.24       chs 	if (curproc->p_flag & PK_32) {
    700  1.24       chs 		return x86_get_sdbase32(arg, which);
    701  1.24       chs 	}
    702  1.24       chs 
    703  1.24       chs 	pcb = lwp_getpcb(curlwp);
    704  1.24       chs 
    705  1.24       chs 	switch(which) {
    706  1.24       chs 	case 'f':
    707  1.24       chs 		base = pcb->pcb_fs;
    708  1.24       chs 		break;
    709  1.24       chs 	case 'g':
    710  1.24       chs 		base = pcb->pcb_gs;
    711  1.24       chs 		break;
    712  1.24       chs 	default:
    713  1.24       chs 		panic("x86_get_sdbase");
    714  1.24       chs 	}
    715  1.24       chs 
    716  1.24       chs 	return copyout(&base, arg, sizeof(base));
    717   1.5        ad #endif
    718   1.5        ad }
    719   1.5        ad 
    720   1.5        ad int
    721   1.8       dsl sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
    722   1.1        ad {
    723   1.8       dsl 	/* {
    724   1.1        ad 		syscallarg(int) op;
    725   1.1        ad 		syscallarg(void *) parms;
    726   1.8       dsl 	} */
    727   1.1        ad 	int error = 0;
    728   1.1        ad 
    729   1.1        ad 	switch(SCARG(uap, op)) {
    730   1.1        ad 	case X86_IOPL:
    731   1.1        ad 		error = x86_iopl(l, SCARG(uap, parms), retval);
    732   1.1        ad 		break;
    733   1.1        ad 
    734  1.32      maxv #ifdef i386
    735  1.32      maxv 	/*
    736  1.32      maxv 	 * On amd64, this is done via netbsd32_sysarch.
    737  1.32      maxv 	 */
    738   1.1        ad 	case X86_GET_LDT:
    739   1.1        ad 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
    740   1.1        ad 		break;
    741   1.1        ad 
    742   1.1        ad 	case X86_SET_LDT:
    743   1.1        ad 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
    744   1.1        ad 		break;
    745  1.32      maxv #endif
    746   1.1        ad 
    747   1.1        ad 	case X86_GET_IOPERM:
    748   1.1        ad 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
    749   1.1        ad 		break;
    750   1.1        ad 
    751   1.1        ad 	case X86_SET_IOPERM:
    752   1.1        ad 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
    753   1.1        ad 		break;
    754   1.1        ad 
    755   1.1        ad 	case X86_GET_MTRR:
    756   1.1        ad 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
    757   1.1        ad 		break;
    758   1.1        ad 	case X86_SET_MTRR:
    759   1.1        ad 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
    760   1.1        ad 		break;
    761   1.1        ad 
    762   1.5        ad 	case X86_SET_FSBASE:
    763  1.18        ad 		error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
    764   1.5        ad 		break;
    765   1.5        ad 
    766   1.5        ad 	case X86_SET_GSBASE:
    767  1.18        ad 		error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
    768   1.5        ad 		break;
    769   1.5        ad 
    770   1.5        ad 	case X86_GET_FSBASE:
    771   1.5        ad 		error = x86_get_sdbase(SCARG(uap, parms), 'f');
    772   1.5        ad 		break;
    773   1.5        ad 
    774   1.5        ad 	case X86_GET_GSBASE:
    775   1.5        ad 		error = x86_get_sdbase(SCARG(uap, parms), 'g');
    776   1.5        ad 		break;
    777   1.5        ad 
    778   1.1        ad 	default:
    779   1.1        ad 		error = EINVAL;
    780   1.1        ad 		break;
    781   1.1        ad 	}
    782  1.38      maxv 	return error;
    783   1.1        ad }
    784  1.18        ad 
    785  1.18        ad int
    786  1.18        ad cpu_lwp_setprivate(lwp_t *l, void *addr)
    787  1.18        ad {
    788  1.18        ad 
    789  1.24       chs #ifdef __x86_64__
    790  1.24       chs 	if ((l->l_proc->p_flag & PK_32) == 0) {
    791  1.24       chs 		return x86_set_sdbase(addr, 'f', l, true);
    792  1.24       chs 	}
    793  1.24       chs #endif
    794  1.18        ad 	return x86_set_sdbase(addr, 'g', l, true);
    795  1.18        ad }
    796