Home | History | Annotate | Line # | Download | only in x86
sys_machdep.c revision 1.32
      1  1.31      maxv /*	$NetBSD: sys_machdep.c,v 1.32 2017/02/14 09:11:05 maxv Exp $	*/
      2   1.1        ad 
      3   1.1        ad /*-
      4  1.17        ad  * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
      5   1.1        ad  * All rights reserved.
      6   1.1        ad  *
      7   1.1        ad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1        ad  * by Charles M. Hannum, and by Andrew Doran.
      9   1.1        ad  *
     10   1.1        ad  * Redistribution and use in source and binary forms, with or without
     11   1.1        ad  * modification, are permitted provided that the following conditions
     12   1.1        ad  * are met:
     13   1.1        ad  * 1. Redistributions of source code must retain the above copyright
     14   1.1        ad  *    notice, this list of conditions and the following disclaimer.
     15   1.1        ad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1        ad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1        ad  *    documentation and/or other materials provided with the distribution.
     18   1.1        ad  *
     19   1.1        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1        ad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1        ad  */
     31   1.1        ad 
     32   1.1        ad #include <sys/cdefs.h>
     33  1.31      maxv __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.32 2017/02/14 09:11:05 maxv Exp $");
     34   1.1        ad 
     35   1.1        ad #include "opt_mtrr.h"
     36   1.1        ad #include "opt_perfctrs.h"
     37   1.1        ad #include "opt_user_ldt.h"
     38  1.27  christos #include "opt_compat_netbsd.h"
     39  1.26       dsl #ifdef i386
     40   1.1        ad #include "opt_vm86.h"
     41  1.26       dsl #endif
     42   1.1        ad #include "opt_xen.h"
     43   1.1        ad 
     44   1.1        ad #include <sys/param.h>
     45   1.1        ad #include <sys/systm.h>
     46   1.1        ad #include <sys/ioctl.h>
     47   1.1        ad #include <sys/file.h>
     48   1.1        ad #include <sys/time.h>
     49   1.1        ad #include <sys/proc.h>
     50   1.1        ad #include <sys/uio.h>
     51   1.1        ad #include <sys/kernel.h>
     52   1.1        ad #include <sys/buf.h>
     53   1.1        ad #include <sys/signal.h>
     54   1.1        ad #include <sys/malloc.h>
     55   1.9      yamt #include <sys/kmem.h>
     56   1.1        ad #include <sys/kauth.h>
     57  1.17        ad #include <sys/cpu.h>
     58   1.1        ad #include <sys/mount.h>
     59   1.1        ad #include <sys/syscallargs.h>
     60   1.1        ad 
     61   1.1        ad #include <uvm/uvm_extern.h>
     62   1.1        ad 
     63   1.1        ad #include <machine/cpufunc.h>
     64   1.1        ad #include <machine/gdt.h>
     65   1.1        ad #include <machine/psl.h>
     66   1.1        ad #include <machine/reg.h>
     67   1.1        ad #include <machine/sysarch.h>
     68   1.1        ad #include <machine/mtrr.h>
     69   1.1        ad 
     70   1.1        ad #ifdef __x86_64__
     71  1.32      maxv /*
     72  1.32      maxv  * The code for USER_LDT on amd64 is mostly functional, but it is still not
     73  1.32      maxv  * enabled.
     74  1.32      maxv  *
     75  1.32      maxv  * On amd64 we are allowing only 8-byte-sized entries in the LDT, and we are
     76  1.32      maxv  * not allowing the user to overwrite the existing entries (below LDT_SIZE).
     77  1.32      maxv  * Note that USER_LDT is used only by 32bit applications, under compat_netbsd32.
     78  1.32      maxv  * This is theoretically enough for Wine to work.
     79  1.32      maxv  *
     80  1.32      maxv  * However, letting segment registers have different location breaks amd64's
     81  1.32      maxv  * Thread Local Storage: %fs and %gs must be reloaded when returning to
     82  1.32      maxv  * userland. See the tech-kern@ archive from February 2017. A patch has been
     83  1.32      maxv  * proposed to fix that, but Wine still randomly crashes; it is not clear
     84  1.32      maxv  * whether the issues come from Wine, from netbsd32 or from the patch itself.
     85  1.32      maxv  */
     86  1.32      maxv #undef	USER_LDT
     87   1.1        ad /* Need to be checked. */
     88   1.1        ad #undef	PERFCTRS
     89   1.1        ad #undef	IOPERM
     90   1.1        ad #else
     91   1.9      yamt #if defined(XEN)
     92   1.9      yamt #undef	IOPERM
     93   1.9      yamt #else /* defined(XEN) */
     94   1.1        ad #define	IOPERM
     95   1.9      yamt #endif /* defined(XEN) */
     96   1.1        ad #endif
     97   1.1        ad 
     98   1.1        ad #ifdef VM86
     99   1.1        ad #include <machine/vm86.h>
    100   1.1        ad #endif
    101   1.1        ad 
    102   1.1        ad #ifdef PERFCTRS
    103   1.1        ad #include <machine/pmc.h>
    104   1.1        ad #endif
    105   1.1        ad 
    106   1.1        ad extern struct vm_map *kernel_map;
    107   1.1        ad 
    108   1.1        ad int x86_get_ioperm(struct lwp *, void *, register_t *);
    109   1.1        ad int x86_set_ioperm(struct lwp *, void *, register_t *);
    110   1.1        ad int x86_get_mtrr(struct lwp *, void *, register_t *);
    111   1.1        ad int x86_set_mtrr(struct lwp *, void *, register_t *);
    112  1.24       chs int x86_set_sdbase32(void *, char, lwp_t *, bool);
    113  1.18        ad int x86_set_sdbase(void *, char, lwp_t *, bool);
    114  1.24       chs int x86_get_sdbase32(void *, char);
    115  1.18        ad int x86_get_sdbase(void *, char);
    116   1.1        ad 
    117  1.25  jakllsch #if defined(USER_LDT) && defined(LDT_DEBUG)
    118   1.1        ad static void x86_print_ldt(int, const struct segment_descriptor *);
    119   1.1        ad 
    120   1.1        ad static void
    121   1.1        ad x86_print_ldt(int i, const struct segment_descriptor *d)
    122   1.1        ad {
    123   1.1        ad 	printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
    124   1.1        ad 	    "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
    125   1.1        ad 	    i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
    126   1.1        ad 	    d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
    127   1.1        ad }
    128   1.1        ad #endif
    129   1.1        ad 
    130   1.1        ad int
    131   1.1        ad x86_get_ldt(struct lwp *l, void *args, register_t *retval)
    132   1.1        ad {
    133   1.2       dsl #ifndef USER_LDT
    134   1.2       dsl 	return EINVAL;
    135   1.2       dsl #else
    136   1.2       dsl 	struct x86_get_ldt_args ua;
    137   1.2       dsl 	union descriptor *cp;
    138   1.2       dsl 	int error;
    139   1.2       dsl 
    140   1.2       dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    141   1.2       dsl 		return error;
    142   1.2       dsl 
    143   1.2       dsl 	if (ua.num < 0 || ua.num > 8192)
    144   1.2       dsl 		return EINVAL;
    145   1.2       dsl 
    146   1.2       dsl 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
    147   1.2       dsl 	if (cp == NULL)
    148   1.2       dsl 		return ENOMEM;
    149   1.2       dsl 
    150   1.2       dsl 	error = x86_get_ldt1(l, &ua, cp);
    151   1.2       dsl 	*retval = ua.num;
    152   1.2       dsl 	if (error == 0)
    153   1.2       dsl 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
    154   1.2       dsl 
    155   1.2       dsl 	free(cp, M_TEMP);
    156   1.2       dsl 	return error;
    157   1.2       dsl #endif
    158   1.2       dsl }
    159   1.2       dsl 
    160   1.2       dsl int
    161   1.2       dsl x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
    162   1.2       dsl {
    163   1.2       dsl #ifndef USER_LDT
    164   1.2       dsl 	return EINVAL;
    165   1.2       dsl #else
    166   1.1        ad 	int error;
    167   1.1        ad 	struct proc *p = l->l_proc;
    168   1.1        ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    169   1.1        ad 	int nldt, num;
    170   1.2       dsl 	union descriptor *lp;
    171   1.1        ad 
    172   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
    173   1.1        ad 	    NULL, NULL, NULL, NULL);
    174   1.1        ad 	if (error)
    175   1.1        ad 		return (error);
    176   1.1        ad 
    177   1.1        ad #ifdef	LDT_DEBUG
    178   1.2       dsl 	printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
    179   1.2       dsl 	    ua->num, ua->desc);
    180   1.1        ad #endif
    181   1.1        ad 
    182   1.2       dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    183   1.2       dsl 	    ua->start + ua->num > 8192)
    184   1.1        ad 		return (EINVAL);
    185   1.1        ad 
    186  1.32      maxv #ifdef __x86_64__
    187  1.32      maxv 	if (ua->start * sizeof(union descriptor) < LDT_SIZE)
    188  1.32      maxv 		return EINVAL;
    189  1.32      maxv #endif
    190  1.32      maxv 
    191  1.17        ad 	mutex_enter(&cpu_lock);
    192   1.1        ad 
    193  1.17        ad 	if (pmap->pm_ldt != NULL) {
    194  1.17        ad 		nldt = pmap->pm_ldt_len / sizeof(*lp);
    195   1.1        ad 		lp = pmap->pm_ldt;
    196   1.1        ad 	} else {
    197  1.32      maxv #ifdef __x86_64__
    198  1.32      maxv 		nldt = LDT_SIZE / sizeof(*lp);
    199  1.32      maxv #else
    200   1.1        ad 		nldt = NLDT;
    201  1.32      maxv #endif
    202  1.32      maxv 		lp = (union descriptor *)ldtstore;
    203   1.1        ad 	}
    204   1.1        ad 
    205   1.2       dsl 	if (ua->start > nldt) {
    206  1.17        ad 		mutex_exit(&cpu_lock);
    207   1.1        ad 		return (EINVAL);
    208   1.1        ad 	}
    209   1.1        ad 
    210   1.2       dsl 	lp += ua->start;
    211   1.2       dsl 	num = min(ua->num, nldt - ua->start);
    212   1.2       dsl 	ua->num = num;
    213   1.1        ad #ifdef LDT_DEBUG
    214   1.1        ad 	{
    215   1.1        ad 		int i;
    216   1.1        ad 		for (i = 0; i < num; i++)
    217   1.1        ad 			x86_print_ldt(i, &lp[i].sd);
    218   1.1        ad 	}
    219   1.1        ad #endif
    220   1.1        ad 
    221   1.1        ad 	memcpy(cp, lp, num * sizeof(union descriptor));
    222  1.17        ad 	mutex_exit(&cpu_lock);
    223   1.1        ad 
    224   1.2       dsl 	return 0;
    225   1.2       dsl #endif
    226   1.2       dsl }
    227   1.2       dsl 
    228   1.2       dsl int
    229   1.2       dsl x86_set_ldt(struct lwp *l, void *args, register_t *retval)
    230   1.2       dsl {
    231   1.2       dsl #ifndef USER_LDT
    232   1.2       dsl 	return EINVAL;
    233   1.2       dsl #else
    234   1.2       dsl 	struct x86_set_ldt_args ua;
    235   1.2       dsl 	union descriptor *descv;
    236   1.2       dsl 	int error;
    237   1.2       dsl 
    238   1.2       dsl 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    239   1.2       dsl 		return (error);
    240   1.2       dsl 
    241   1.2       dsl 	if (ua.num < 0 || ua.num > 8192)
    242   1.2       dsl 		return EINVAL;
    243   1.2       dsl 
    244   1.2       dsl 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
    245   1.2       dsl 	if (descv == NULL)
    246   1.2       dsl 		return ENOMEM;
    247   1.2       dsl 
    248   1.2       dsl 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
    249   1.1        ad 	if (error == 0)
    250   1.2       dsl 		error = x86_set_ldt1(l, &ua, descv);
    251   1.2       dsl 	*retval = ua.start;
    252   1.1        ad 
    253   1.2       dsl 	free(descv, M_TEMP);
    254   1.2       dsl 	return error;
    255   1.1        ad #endif
    256   1.1        ad }
    257   1.1        ad 
    258   1.1        ad int
    259   1.2       dsl x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
    260   1.2       dsl     union descriptor *descv)
    261   1.1        ad {
    262   1.2       dsl #ifndef USER_LDT
    263   1.2       dsl 	return EINVAL;
    264   1.2       dsl #else
    265  1.17        ad 	int error, i, n, old_sel, new_sel;
    266   1.1        ad 	struct proc *p = l->l_proc;
    267   1.1        ad 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
    268  1.17        ad 	size_t old_len, new_len;
    269  1.17        ad 	union descriptor *old_ldt, *new_ldt;
    270   1.1        ad 
    271  1.32      maxv #ifdef __x86_64__
    272  1.32      maxv 	const size_t min_ldt_size = LDT_SIZE;
    273  1.32      maxv #else
    274  1.32      maxv 	const size_t min_ldt_size = NLDT * sizeof(union descriptor);
    275  1.32      maxv #endif
    276  1.32      maxv 
    277   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
    278   1.1        ad 	    NULL, NULL, NULL, NULL);
    279   1.1        ad 	if (error)
    280   1.1        ad 		return (error);
    281   1.1        ad 
    282   1.2       dsl 	if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
    283   1.2       dsl 	    ua->start + ua->num > 8192)
    284   1.1        ad 		return (EINVAL);
    285   1.1        ad 
    286  1.32      maxv #ifdef __x86_64__
    287  1.32      maxv 	if (ua->start * sizeof(union descriptor) < LDT_SIZE)
    288  1.32      maxv 		return EINVAL;
    289  1.32      maxv #endif
    290  1.32      maxv 
    291   1.1        ad 	/* Check descriptors for access violations. */
    292   1.2       dsl 	for (i = 0; i < ua->num; i++) {
    293   1.1        ad 		union descriptor *desc = &descv[i];
    294   1.1        ad 
    295   1.1        ad 		switch (desc->sd.sd_type) {
    296   1.1        ad 		case SDT_SYSNULL:
    297   1.1        ad 			desc->sd.sd_p = 0;
    298   1.1        ad 			break;
    299  1.32      maxv #ifdef __x86_64__
    300  1.32      maxv 		case SDT_SYS286CGT:
    301  1.32      maxv 		case SDT_SYS386CGT:
    302  1.32      maxv 			/* We don't allow these on amd64. */
    303  1.32      maxv 			return EACCES;
    304  1.32      maxv #else
    305   1.1        ad 		case SDT_SYS286CGT:
    306   1.1        ad 		case SDT_SYS386CGT:
    307   1.1        ad 			/*
    308   1.1        ad 			 * Only allow call gates targeting a segment
    309   1.1        ad 			 * in the LDT or a user segment in the fixed
    310   1.1        ad 			 * part of the gdt.  Segments in the LDT are
    311   1.1        ad 			 * constrained (below) to be user segments.
    312   1.1        ad 			 */
    313   1.1        ad 			if (desc->gd.gd_p != 0 &&
    314   1.1        ad 			    !ISLDT(desc->gd.gd_selector) &&
    315   1.1        ad 			    ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
    316  1.31      maxv 			     (gdtstore[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
    317   1.1        ad 				 SEL_UPL))) {
    318   1.2       dsl 				return EACCES;
    319   1.1        ad 			}
    320   1.1        ad 			break;
    321  1.32      maxv #endif
    322   1.1        ad 		case SDT_MEMEC:
    323   1.1        ad 		case SDT_MEMEAC:
    324   1.1        ad 		case SDT_MEMERC:
    325   1.1        ad 		case SDT_MEMERAC:
    326   1.1        ad 			/* Must be "present" if executable and conforming. */
    327   1.2       dsl 			if (desc->sd.sd_p == 0)
    328   1.2       dsl 				return EACCES;
    329   1.1        ad 			break;
    330   1.1        ad 		case SDT_MEMRO:
    331   1.1        ad 		case SDT_MEMROA:
    332   1.1        ad 		case SDT_MEMRW:
    333   1.1        ad 		case SDT_MEMRWA:
    334   1.1        ad 		case SDT_MEMROD:
    335   1.1        ad 		case SDT_MEMRODA:
    336   1.1        ad 		case SDT_MEMRWD:
    337   1.1        ad 		case SDT_MEMRWDA:
    338   1.1        ad 		case SDT_MEME:
    339   1.1        ad 		case SDT_MEMEA:
    340   1.1        ad 		case SDT_MEMER:
    341   1.1        ad 		case SDT_MEMERA:
    342   1.1        ad 			break;
    343   1.1        ad 		default:
    344   1.1        ad 			/*
    345   1.1        ad 			 * Make sure that unknown descriptor types are
    346   1.1        ad 			 * not marked present.
    347   1.1        ad 			 */
    348   1.2       dsl 			if (desc->sd.sd_p != 0)
    349   1.2       dsl 				return EACCES;
    350   1.1        ad 			break;
    351   1.1        ad 		}
    352   1.1        ad 
    353   1.1        ad 		if (desc->sd.sd_p != 0) {
    354   1.1        ad 			/* Only user (ring-3) descriptors may be present. */
    355   1.2       dsl 			if (desc->sd.sd_dpl != SEL_UPL)
    356   1.2       dsl 				return EACCES;
    357   1.1        ad 		}
    358   1.1        ad 	}
    359   1.1        ad 
    360  1.17        ad 	/*
    361  1.17        ad 	 * Install selected changes.  We perform a copy, write, swap dance
    362  1.17        ad 	 * here to ensure that all updates happen atomically.
    363  1.17        ad 	 */
    364  1.17        ad 
    365  1.17        ad 	/* Allocate a new LDT. */
    366  1.17        ad 	for (;;) {
    367  1.17        ad 		new_len = (ua->start + ua->num) * sizeof(union descriptor);
    368  1.17        ad 		new_len = max(new_len, pmap->pm_ldt_len);
    369  1.32      maxv 		new_len = max(new_len, min_ldt_size);
    370  1.17        ad 		new_len = round_page(new_len);
    371   1.1        ad 		new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
    372  1.30  dholland 		    new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA);
    373  1.17        ad 		mutex_enter(&cpu_lock);
    374  1.17        ad 		if (pmap->pm_ldt_len <= new_len) {
    375  1.17        ad 			break;
    376   1.1        ad 		}
    377  1.17        ad 		mutex_exit(&cpu_lock);
    378  1.17        ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    379  1.17        ad 		    UVM_KMF_WIRED);
    380  1.17        ad 	}
    381   1.1        ad 
    382  1.17        ad 	/* Copy existing entries, if any. */
    383  1.17        ad 	if (pmap->pm_ldt != NULL) {
    384   1.1        ad 		old_ldt = pmap->pm_ldt;
    385  1.17        ad 		old_len = pmap->pm_ldt_len;
    386  1.17        ad 		old_sel = pmap->pm_ldt_sel;
    387   1.1        ad 		memcpy(new_ldt, old_ldt, old_len);
    388  1.17        ad 	} else {
    389  1.17        ad 		old_ldt = NULL;
    390  1.17        ad 		old_len = 0;
    391  1.17        ad 		old_sel = -1;
    392  1.32      maxv 		memcpy(new_ldt, ldtstore, min_ldt_size);
    393  1.17        ad 	}
    394   1.1        ad 
    395  1.17        ad 	/* Apply requested changes. */
    396  1.17        ad 	for (i = 0, n = ua->start; i < ua->num; i++, n++) {
    397  1.17        ad 		new_ldt[n] = descv[i];
    398  1.17        ad 	}
    399   1.1        ad 
    400  1.17        ad 	/* Allocate LDT selector. */
    401  1.17        ad 	new_sel = ldt_alloc(new_ldt, new_len);
    402  1.17        ad 	if (new_sel == -1) {
    403  1.17        ad 		mutex_exit(&cpu_lock);
    404   1.1        ad 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
    405   1.1        ad 		    UVM_KMF_WIRED);
    406  1.17        ad 		return ENOMEM;
    407  1.17        ad 	}
    408  1.17        ad 
    409  1.17        ad 	/* All changes are now globally visible.  Swap in the new LDT. */
    410  1.17        ad 	pmap->pm_ldt_len = new_len;
    411  1.17        ad 	pmap->pm_ldt_sel = new_sel;
    412  1.30  dholland 	/* membar_store_store for pmap_fork() to read these unlocked safely */
    413  1.30  dholland 	membar_producer();
    414  1.30  dholland 	pmap->pm_ldt = new_ldt;
    415  1.17        ad 
    416  1.17        ad 	/* Switch existing users onto new LDT. */
    417  1.17        ad 	pmap_ldt_sync(pmap);
    418  1.17        ad 
    419  1.17        ad 	/* Free existing LDT (if any). */
    420  1.17        ad 	if (old_ldt != NULL) {
    421  1.17        ad 		ldt_free(old_sel);
    422  1.30  dholland 		/* exit the mutex before free */
    423  1.30  dholland 		mutex_exit(&cpu_lock);
    424  1.17        ad 		uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len,
    425   1.1        ad 		    UVM_KMF_WIRED);
    426  1.30  dholland 	} else {
    427  1.30  dholland 		mutex_exit(&cpu_lock);
    428  1.17        ad 	}
    429   1.2       dsl 
    430  1.17        ad 	return error;
    431   1.1        ad #endif
    432   1.1        ad }
    433   1.1        ad 
    434   1.1        ad int
    435   1.1        ad x86_iopl(struct lwp *l, void *args, register_t *retval)
    436   1.1        ad {
    437   1.1        ad 	int error;
    438   1.1        ad 	struct x86_iopl_args ua;
    439   1.1        ad #ifdef XEN
    440   1.9      yamt 	int iopl;
    441   1.1        ad #else
    442   1.1        ad 	struct trapframe *tf = l->l_md.md_regs;
    443   1.1        ad #endif
    444   1.1        ad 
    445   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
    446   1.1        ad 	    NULL, NULL, NULL, NULL);
    447   1.1        ad 	if (error)
    448   1.1        ad 		return (error);
    449   1.1        ad 
    450   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    451   1.1        ad 		return error;
    452   1.1        ad 
    453   1.1        ad #ifdef XEN
    454   1.9      yamt 	if (ua.iopl)
    455   1.9      yamt 		iopl = SEL_UPL;
    456   1.9      yamt 	else
    457   1.9      yamt 		iopl = SEL_KPL;
    458  1.22     rmind 
    459  1.22     rmind     {
    460  1.22     rmind 	struct physdev_op physop;
    461  1.22     rmind 	struct pcb *pcb;
    462  1.22     rmind 
    463  1.22     rmind 	pcb = lwp_getpcb(l);
    464  1.22     rmind 	pcb->pcb_iopl = iopl;
    465  1.22     rmind 
    466   1.1        ad 	/* Force the change at ring 0. */
    467  1.22     rmind 	physop.cmd = PHYSDEVOP_SET_IOPL;
    468  1.22     rmind 	physop.u.set_iopl.iopl = iopl;
    469  1.22     rmind 	HYPERVISOR_physdev_op(&physop);
    470  1.22     rmind     }
    471   1.1        ad #elif defined(__x86_64__)
    472   1.1        ad 	if (ua.iopl)
    473   1.1        ad 		tf->tf_rflags |= PSL_IOPL;
    474   1.1        ad 	else
    475   1.1        ad 		tf->tf_rflags &= ~PSL_IOPL;
    476   1.1        ad #else
    477   1.1        ad 	if (ua.iopl)
    478   1.1        ad 		tf->tf_eflags |= PSL_IOPL;
    479   1.1        ad 	else
    480   1.1        ad 		tf->tf_eflags &= ~PSL_IOPL;
    481   1.1        ad #endif
    482   1.1        ad 
    483   1.1        ad 	return 0;
    484   1.1        ad }
    485   1.1        ad 
    486   1.1        ad int
    487   1.1        ad x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
    488   1.1        ad {
    489   1.1        ad #ifdef IOPERM
    490   1.1        ad 	int error;
    491  1.22     rmind 	struct pcb *pcb = lwp_getpcb(l);
    492   1.1        ad 	struct x86_get_ioperm_args ua;
    493   1.9      yamt 	void *dummymap = NULL;
    494   1.9      yamt 	void *iomap;
    495   1.1        ad 
    496   1.1        ad 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
    497   1.1        ad 	    NULL, NULL, NULL, NULL);
    498   1.1        ad 	if (error)
    499   1.1        ad 		return (error);
    500   1.1        ad 
    501   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    502   1.1        ad 		return (error);
    503   1.1        ad 
    504   1.9      yamt 	iomap = pcb->pcb_iomap;
    505   1.9      yamt 	if (iomap == NULL) {
    506   1.9      yamt 		iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    507   1.9      yamt 		memset(dummymap, 0xff, IOMAPSIZE);
    508   1.9      yamt 	}
    509   1.9      yamt 	error = copyout(iomap, ua.iomap, IOMAPSIZE);
    510   1.9      yamt 	if (dummymap != NULL) {
    511   1.9      yamt 		kmem_free(dummymap, IOMAPSIZE);
    512   1.9      yamt 	}
    513   1.9      yamt 	return error;
    514   1.1        ad #else
    515   1.1        ad 	return EINVAL;
    516   1.1        ad #endif
    517   1.1        ad }
    518   1.1        ad 
    519   1.1        ad int
    520   1.1        ad x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
    521   1.1        ad {
    522   1.1        ad #ifdef IOPERM
    523   1.9      yamt 	struct cpu_info *ci;
    524   1.1        ad 	int error;
    525  1.22     rmind 	struct pcb *pcb = lwp_getpcb(l);
    526   1.1        ad 	struct x86_set_ioperm_args ua;
    527   1.9      yamt 	void *new;
    528   1.9      yamt 	void *old;
    529   1.1        ad 
    530   1.1        ad   	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
    531   1.1        ad 	    NULL, NULL, NULL, NULL);
    532   1.1        ad 	if (error)
    533   1.1        ad 		return (error);
    534   1.1        ad 
    535   1.1        ad 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
    536   1.1        ad 		return (error);
    537   1.1        ad 
    538   1.9      yamt 	new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
    539   1.9      yamt 	error = copyin(ua.iomap, new, IOMAPSIZE);
    540   1.9      yamt 	if (error) {
    541   1.9      yamt 		kmem_free(new, IOMAPSIZE);
    542   1.9      yamt 		return error;
    543   1.9      yamt 	}
    544   1.9      yamt 	old = pcb->pcb_iomap;
    545   1.9      yamt 	pcb->pcb_iomap = new;
    546   1.9      yamt 	if (old != NULL) {
    547   1.9      yamt 		kmem_free(old, IOMAPSIZE);
    548   1.9      yamt 	}
    549   1.9      yamt 
    550  1.13        ad 	kpreempt_disable();
    551   1.9      yamt 	ci = curcpu();
    552   1.9      yamt 	memcpy(ci->ci_iomap, pcb->pcb_iomap, sizeof(ci->ci_iomap));
    553   1.9      yamt 	ci->ci_tss.tss_iobase =
    554   1.9      yamt 	    ((uintptr_t)ci->ci_iomap - (uintptr_t)&ci->ci_tss) << 16;
    555  1.13        ad 	kpreempt_enable();
    556   1.9      yamt 
    557   1.9      yamt 	return error;
    558   1.1        ad #else
    559   1.1        ad 	return EINVAL;
    560   1.1        ad #endif
    561   1.1        ad }
    562   1.1        ad 
    563   1.1        ad int
    564   1.1        ad x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
    565   1.1        ad {
    566   1.1        ad #ifdef MTRR
    567   1.1        ad 	struct x86_get_mtrr_args ua;
    568   1.1        ad 	int error, n;
    569   1.1        ad 
    570   1.1        ad 	if (mtrr_funcs == NULL)
    571   1.1        ad 		return ENOSYS;
    572   1.1        ad 
    573   1.1        ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
    574   1.1        ad 	    NULL, NULL, NULL, NULL);
    575   1.1        ad 	if (error)
    576   1.1        ad 		return (error);
    577   1.1        ad 
    578   1.1        ad 	error = copyin(args, &ua, sizeof ua);
    579   1.1        ad 	if (error != 0)
    580   1.1        ad 		return error;
    581   1.1        ad 
    582   1.1        ad 	error = copyin(ua.n, &n, sizeof n);
    583   1.1        ad 	if (error != 0)
    584   1.1        ad 		return error;
    585   1.1        ad 
    586  1.12        ad 	KERNEL_LOCK(1, NULL);
    587   1.1        ad 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    588  1.12        ad 	KERNEL_UNLOCK_ONE(NULL);
    589   1.1        ad 
    590   1.1        ad 	copyout(&n, ua.n, sizeof (int));
    591   1.1        ad 
    592   1.1        ad 	return error;
    593   1.1        ad #else
    594   1.1        ad 	return EINVAL;
    595   1.1        ad #endif
    596   1.1        ad }
    597   1.1        ad 
    598   1.1        ad int
    599   1.1        ad x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
    600   1.1        ad {
    601   1.1        ad #ifdef MTRR
    602   1.1        ad 	int error, n;
    603   1.1        ad 	struct x86_set_mtrr_args ua;
    604   1.1        ad 
    605   1.1        ad 	if (mtrr_funcs == NULL)
    606   1.1        ad 		return ENOSYS;
    607   1.1        ad 
    608   1.1        ad  	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
    609   1.1        ad 	    NULL, NULL, NULL, NULL);
    610   1.1        ad 	if (error)
    611   1.1        ad 		return (error);
    612   1.1        ad 
    613   1.1        ad 	error = copyin(args, &ua, sizeof ua);
    614   1.1        ad 	if (error != 0)
    615   1.1        ad 		return error;
    616   1.1        ad 
    617   1.1        ad 	error = copyin(ua.n, &n, sizeof n);
    618   1.1        ad 	if (error != 0)
    619   1.1        ad 		return error;
    620   1.1        ad 
    621  1.12        ad 	KERNEL_LOCK(1, NULL);
    622   1.1        ad 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
    623   1.1        ad 	if (n != 0)
    624   1.1        ad 		mtrr_commit();
    625  1.12        ad 	KERNEL_UNLOCK_ONE(NULL);
    626   1.1        ad 
    627   1.1        ad 	copyout(&n, ua.n, sizeof n);
    628   1.1        ad 
    629   1.1        ad 	return error;
    630   1.1        ad #else
    631   1.1        ad 	return EINVAL;
    632   1.1        ad #endif
    633   1.1        ad }
    634   1.1        ad 
    635  1.24       chs #ifdef __x86_64__
    636  1.24       chs #define pcb_fsd pcb_fs
    637  1.24       chs #define pcb_gsd pcb_gs
    638  1.24       chs #define segment_descriptor mem_segment_descriptor
    639  1.24       chs #endif
    640  1.24       chs 
    641   1.1        ad int
    642  1.24       chs x86_set_sdbase32(void *arg, char which, lwp_t *l, bool direct)
    643   1.5        ad {
    644  1.24       chs 	struct trapframe *tf = l->l_md.md_regs;
    645  1.24       chs 	union descriptor usd;
    646  1.18        ad 	struct pcb *pcb;
    647  1.24       chs 	uint32_t base;
    648   1.6        ad 	int error;
    649   1.5        ad 
    650  1.18        ad 	if (direct) {
    651  1.18        ad 		base = (vaddr_t)arg;
    652  1.18        ad 	} else {
    653  1.18        ad 		error = copyin(arg, &base, sizeof(base));
    654  1.18        ad 		if (error != 0)
    655  1.18        ad 			return error;
    656  1.18        ad 	}
    657   1.5        ad 
    658  1.24       chs 	memset(&usd, 0, sizeof(usd));
    659  1.19    bouyer 	usd.sd.sd_lobase = base & 0xffffff;
    660  1.19    bouyer 	usd.sd.sd_hibase = (base >> 24) & 0xff;
    661  1.19    bouyer 	usd.sd.sd_lolimit = 0xffff;
    662  1.19    bouyer 	usd.sd.sd_hilimit = 0xf;
    663  1.19    bouyer 	usd.sd.sd_type = SDT_MEMRWA;
    664  1.19    bouyer 	usd.sd.sd_dpl = SEL_UPL;
    665  1.19    bouyer 	usd.sd.sd_p = 1;
    666  1.19    bouyer 	usd.sd.sd_def32 = 1;
    667  1.19    bouyer 	usd.sd.sd_gran = 1;
    668   1.6        ad 
    669  1.24       chs 	pcb = lwp_getpcb(l);
    670  1.13        ad 	kpreempt_disable();
    671   1.6        ad 	if (which == 'f') {
    672  1.19    bouyer 		memcpy(&pcb->pcb_fsd, &usd.sd,
    673  1.19    bouyer 		    sizeof(struct segment_descriptor));
    674  1.18        ad 		if (l == curlwp) {
    675  1.19    bouyer 			update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd);
    676  1.24       chs #ifdef __x86_64__
    677  1.24       chs 			setfs(GSEL(GUFS_SEL, SEL_UPL));
    678  1.24       chs #endif
    679  1.18        ad 		}
    680  1.24       chs 		tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
    681   1.6        ad 	} else /* which == 'g' */ {
    682  1.19    bouyer 		memcpy(&pcb->pcb_gsd, &usd.sd,
    683  1.19    bouyer 		    sizeof(struct segment_descriptor));
    684  1.18        ad 		if (l == curlwp) {
    685  1.19    bouyer 			update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd);
    686  1.24       chs #ifdef __x86_64__
    687  1.24       chs #ifndef XEN
    688  1.24       chs 			setusergs(GSEL(GUGS_SEL, SEL_UPL));
    689  1.24       chs #else
    690  1.24       chs 			HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL,
    691  1.24       chs 						    GSEL(GUGS_SEL, SEL_UPL));
    692  1.24       chs #endif
    693  1.24       chs #endif
    694  1.18        ad 		}
    695  1.24       chs 		tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
    696   1.6        ad 	}
    697  1.13        ad 	kpreempt_enable();
    698  1.24       chs 	return 0;
    699  1.24       chs }
    700   1.5        ad 
    701  1.24       chs int
    702  1.24       chs x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
    703  1.24       chs {
    704  1.24       chs #ifdef i386
    705  1.24       chs 	return x86_set_sdbase32(arg, which, l, direct);
    706   1.5        ad #else
    707  1.24       chs 	struct pcb *pcb;
    708  1.24       chs 	vaddr_t base;
    709  1.24       chs 
    710  1.24       chs 	if (l->l_proc->p_flag & PK_32) {
    711  1.24       chs 		return x86_set_sdbase32(arg, which, l, direct);
    712  1.24       chs 	}
    713  1.24       chs 
    714  1.24       chs 	if (direct) {
    715  1.24       chs 		base = (vaddr_t)arg;
    716  1.24       chs 	} else {
    717  1.29  christos 		int error = copyin(arg, &base, sizeof(base));
    718  1.24       chs 		if (error != 0)
    719  1.24       chs 			return error;
    720  1.24       chs 	}
    721  1.24       chs 
    722  1.24       chs 	if (base >= VM_MAXUSER_ADDRESS)
    723  1.24       chs 		return EINVAL;
    724  1.24       chs 
    725  1.24       chs 	pcb = lwp_getpcb(l);
    726  1.24       chs 
    727  1.24       chs 	kpreempt_disable();
    728  1.24       chs 	switch(which) {
    729  1.24       chs 	case 'f':
    730  1.24       chs 		pcb->pcb_fs = base;
    731  1.24       chs 		if (l == curlwp)
    732  1.24       chs 			wrmsr(MSR_FSBASE, pcb->pcb_fs);
    733  1.24       chs 		break;
    734  1.24       chs 	case 'g':
    735  1.24       chs 		pcb->pcb_gs = base;
    736  1.24       chs 		if (l == curlwp)
    737  1.24       chs 			wrmsr(MSR_KERNELGSBASE, pcb->pcb_gs);
    738  1.24       chs 		break;
    739  1.24       chs 	default:
    740  1.28  dholland 		panic("x86_set_sdbase");
    741  1.24       chs 	}
    742  1.24       chs 	kpreempt_enable();
    743  1.24       chs 
    744  1.29  christos 	return 0;
    745   1.5        ad #endif
    746   1.5        ad }
    747   1.5        ad 
    748   1.5        ad int
    749  1.24       chs x86_get_sdbase32(void *arg, char which)
    750   1.5        ad {
    751   1.5        ad 	struct segment_descriptor *sd;
    752  1.24       chs 	uint32_t base;
    753   1.5        ad 
    754   1.5        ad 	switch (which) {
    755   1.5        ad 	case 'f':
    756  1.24       chs 		sd = (void *)&curpcb->pcb_fsd;
    757   1.5        ad 		break;
    758   1.5        ad 	case 'g':
    759  1.24       chs 		sd = (void *)&curpcb->pcb_gsd;
    760   1.5        ad 		break;
    761   1.5        ad 	default:
    762  1.28  dholland 		panic("x86_get_sdbase32");
    763   1.5        ad 	}
    764   1.5        ad 
    765   1.5        ad 	base = sd->sd_hibase << 24 | sd->sd_lobase;
    766  1.21      yamt 	return copyout(&base, arg, sizeof(base));
    767  1.24       chs }
    768  1.24       chs 
    769  1.24       chs int
    770  1.24       chs x86_get_sdbase(void *arg, char which)
    771  1.24       chs {
    772  1.24       chs #ifdef i386
    773  1.24       chs 	return x86_get_sdbase32(arg, which);
    774   1.5        ad #else
    775  1.24       chs 	vaddr_t base;
    776  1.24       chs 	struct pcb *pcb;
    777  1.24       chs 
    778  1.24       chs 	if (curproc->p_flag & PK_32) {
    779  1.24       chs 		return x86_get_sdbase32(arg, which);
    780  1.24       chs 	}
    781  1.24       chs 
    782  1.24       chs 	pcb = lwp_getpcb(curlwp);
    783  1.24       chs 
    784  1.24       chs 	switch(which) {
    785  1.24       chs 	case 'f':
    786  1.24       chs 		base = pcb->pcb_fs;
    787  1.24       chs 		break;
    788  1.24       chs 	case 'g':
    789  1.24       chs 		base = pcb->pcb_gs;
    790  1.24       chs 		break;
    791  1.24       chs 	default:
    792  1.24       chs 		panic("x86_get_sdbase");
    793  1.24       chs 	}
    794  1.24       chs 
    795  1.24       chs 	return copyout(&base, arg, sizeof(base));
    796   1.5        ad #endif
    797   1.5        ad }
    798   1.5        ad 
    799   1.5        ad int
    800   1.8       dsl sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
    801   1.1        ad {
    802   1.8       dsl 	/* {
    803   1.1        ad 		syscallarg(int) op;
    804   1.1        ad 		syscallarg(void *) parms;
    805   1.8       dsl 	} */
    806   1.1        ad 	int error = 0;
    807   1.1        ad 
    808   1.1        ad 	switch(SCARG(uap, op)) {
    809   1.1        ad 	case X86_IOPL:
    810   1.1        ad 		error = x86_iopl(l, SCARG(uap, parms), retval);
    811   1.1        ad 		break;
    812   1.1        ad 
    813  1.32      maxv #ifdef i386
    814  1.32      maxv 	/*
    815  1.32      maxv 	 * On amd64, this is done via netbsd32_sysarch.
    816  1.32      maxv 	 */
    817   1.1        ad 	case X86_GET_LDT:
    818   1.1        ad 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
    819   1.1        ad 		break;
    820   1.1        ad 
    821   1.1        ad 	case X86_SET_LDT:
    822   1.1        ad 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
    823   1.1        ad 		break;
    824  1.32      maxv #endif
    825   1.1        ad 
    826   1.1        ad 	case X86_GET_IOPERM:
    827   1.1        ad 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
    828   1.1        ad 		break;
    829   1.1        ad 
    830   1.1        ad 	case X86_SET_IOPERM:
    831   1.1        ad 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
    832   1.1        ad 		break;
    833   1.1        ad 
    834   1.1        ad 	case X86_GET_MTRR:
    835   1.1        ad 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
    836   1.1        ad 		break;
    837   1.1        ad 	case X86_SET_MTRR:
    838   1.1        ad 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
    839   1.1        ad 		break;
    840   1.1        ad 
    841   1.1        ad #ifdef VM86
    842   1.1        ad 	case X86_VM86:
    843   1.1        ad 		error = x86_vm86(l, SCARG(uap, parms), retval);
    844   1.1        ad 		break;
    845   1.1        ad 	case X86_OLD_VM86:
    846   1.1        ad 		error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
    847   1.1        ad 		break;
    848   1.1        ad #endif
    849   1.1        ad 
    850   1.1        ad #ifdef PERFCTRS
    851   1.1        ad 	case X86_PMC_INFO:
    852  1.12        ad 		KERNEL_LOCK(1, NULL);
    853   1.1        ad 		error = pmc_info(l, SCARG(uap, parms), retval);
    854  1.12        ad 		KERNEL_UNLOCK_ONE(NULL);
    855   1.1        ad 		break;
    856   1.1        ad 
    857   1.1        ad 	case X86_PMC_STARTSTOP:
    858  1.12        ad 		KERNEL_LOCK(1, NULL);
    859   1.1        ad 		error = pmc_startstop(l, SCARG(uap, parms), retval);
    860  1.12        ad 		KERNEL_UNLOCK_ONE(NULL);
    861   1.1        ad 		break;
    862   1.1        ad 
    863   1.1        ad 	case X86_PMC_READ:
    864  1.12        ad 		KERNEL_LOCK(1, NULL);
    865   1.1        ad 		error = pmc_read(l, SCARG(uap, parms), retval);
    866  1.12        ad 		KERNEL_UNLOCK_ONE(NULL);
    867   1.1        ad 		break;
    868   1.1        ad #endif
    869   1.1        ad 
    870   1.5        ad 	case X86_SET_FSBASE:
    871  1.18        ad 		error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
    872   1.5        ad 		break;
    873   1.5        ad 
    874   1.5        ad 	case X86_SET_GSBASE:
    875  1.18        ad 		error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
    876   1.5        ad 		break;
    877   1.5        ad 
    878   1.5        ad 	case X86_GET_FSBASE:
    879   1.5        ad 		error = x86_get_sdbase(SCARG(uap, parms), 'f');
    880   1.5        ad 		break;
    881   1.5        ad 
    882   1.5        ad 	case X86_GET_GSBASE:
    883   1.5        ad 		error = x86_get_sdbase(SCARG(uap, parms), 'g');
    884   1.5        ad 		break;
    885   1.5        ad 
    886   1.1        ad 	default:
    887   1.1        ad 		error = EINVAL;
    888   1.1        ad 		break;
    889   1.1        ad 	}
    890   1.1        ad 	return (error);
    891   1.1        ad }
    892  1.18        ad 
    893  1.18        ad int
    894  1.18        ad cpu_lwp_setprivate(lwp_t *l, void *addr)
    895  1.18        ad {
    896  1.18        ad 
    897  1.24       chs #ifdef __x86_64__
    898  1.24       chs 	if ((l->l_proc->p_flag & PK_32) == 0) {
    899  1.24       chs 		return x86_set_sdbase(addr, 'f', l, true);
    900  1.24       chs 	}
    901  1.24       chs #endif
    902  1.18        ad 	return x86_set_sdbase(addr, 'g', l, true);
    903  1.18        ad }
    904