sys_machdep.c revision 1.17 1 /* $NetBSD: sys_machdep.c,v 1.17 2009/03/21 14:41:30 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.17 2009/03/21 14:41:30 ad Exp $");
34
35 #include "opt_mtrr.h"
36 #include "opt_perfctrs.h"
37 #include "opt_user_ldt.h"
38 #include "opt_vm86.h"
39 #include "opt_xen.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/ioctl.h>
44 #include <sys/file.h>
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <sys/user.h>
48 #include <sys/uio.h>
49 #include <sys/kernel.h>
50 #include <sys/buf.h>
51 #include <sys/signal.h>
52 #include <sys/malloc.h>
53 #include <sys/kmem.h>
54 #include <sys/kauth.h>
55 #include <sys/cpu.h>
56 #include <sys/mount.h>
57 #include <sys/syscallargs.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/cpufunc.h>
62 #include <machine/gdt.h>
63 #include <machine/psl.h>
64 #include <machine/reg.h>
65 #include <machine/sysarch.h>
66 #include <machine/mtrr.h>
67
68 #ifdef __x86_64__
69 /* Need to be checked. */
70 #undef USER_LDT
71 #undef PERFCTRS
72 #undef VM86
73 #undef IOPERM
74 #else
75 #if defined(XEN)
76 #undef IOPERM
77 #else /* defined(XEN) */
78 #define IOPERM
79 #endif /* defined(XEN) */
80 #endif
81
82 #ifdef VM86
83 #include <machine/vm86.h>
84 #endif
85
86 #ifdef PERFCTRS
87 #include <machine/pmc.h>
88 #endif
89
90 extern struct vm_map *kernel_map;
91
92 int x86_get_ioperm(struct lwp *, void *, register_t *);
93 int x86_set_ioperm(struct lwp *, void *, register_t *);
94 int x86_get_mtrr(struct lwp *, void *, register_t *);
95 int x86_set_mtrr(struct lwp *, void *, register_t *);
96 int x86_set_sdbase(void *arg, char which);
97 int x86_get_sdbase(void *arg, char which);
98
99 #ifdef LDT_DEBUG
100 static void x86_print_ldt(int, const struct segment_descriptor *);
101
102 static void
103 x86_print_ldt(int i, const struct segment_descriptor *d)
104 {
105 printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
106 "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
107 i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
108 d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
109 }
110 #endif
111
112 int
113 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
114 {
115 #ifndef USER_LDT
116 return EINVAL;
117 #else
118 struct x86_get_ldt_args ua;
119 union descriptor *cp;
120 int error;
121
122 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
123 return error;
124
125 if (ua.num < 0 || ua.num > 8192)
126 return EINVAL;
127
128 cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
129 if (cp == NULL)
130 return ENOMEM;
131
132 error = x86_get_ldt1(l, &ua, cp);
133 *retval = ua.num;
134 if (error == 0)
135 error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
136
137 free(cp, M_TEMP);
138 return error;
139 #endif
140 }
141
142 int
143 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
144 {
145 #ifndef USER_LDT
146 return EINVAL;
147 #else
148 int error;
149 struct proc *p = l->l_proc;
150 pmap_t pmap = p->p_vmspace->vm_map.pmap;
151 int nldt, num;
152 union descriptor *lp;
153
154 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
155 NULL, NULL, NULL, NULL);
156 if (error)
157 return (error);
158
159 #ifdef LDT_DEBUG
160 printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
161 ua->num, ua->desc);
162 #endif
163
164 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
165 ua->start + ua->num > 8192)
166 return (EINVAL);
167
168 mutex_enter(&cpu_lock);
169
170 if (pmap->pm_ldt != NULL) {
171 nldt = pmap->pm_ldt_len / sizeof(*lp);
172 lp = pmap->pm_ldt;
173 } else {
174 nldt = NLDT;
175 lp = ldt;
176 }
177
178 if (ua->start > nldt) {
179 mutex_exit(&cpu_lock);
180 return (EINVAL);
181 }
182
183 lp += ua->start;
184 num = min(ua->num, nldt - ua->start);
185 ua->num = num;
186 #ifdef LDT_DEBUG
187 {
188 int i;
189 for (i = 0; i < num; i++)
190 x86_print_ldt(i, &lp[i].sd);
191 }
192 #endif
193
194 memcpy(cp, lp, num * sizeof(union descriptor));
195 mutex_exit(&cpu_lock);
196
197 return 0;
198 #endif
199 }
200
201 int
202 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
203 {
204 #ifndef USER_LDT
205 return EINVAL;
206 #else
207 struct x86_set_ldt_args ua;
208 union descriptor *descv;
209 int error;
210
211 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
212 return (error);
213
214 if (ua.num < 0 || ua.num > 8192)
215 return EINVAL;
216
217 descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
218 if (descv == NULL)
219 return ENOMEM;
220
221 error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
222 if (error == 0)
223 error = x86_set_ldt1(l, &ua, descv);
224 *retval = ua.start;
225
226 free(descv, M_TEMP);
227 return error;
228 #endif
229 }
230
231 int
232 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
233 union descriptor *descv)
234 {
235 #ifndef USER_LDT
236 return EINVAL;
237 #else
238 int error, i, n, old_sel, new_sel;
239 struct proc *p = l->l_proc;
240 pmap_t pmap = p->p_vmspace->vm_map.pmap;
241 size_t old_len, new_len;
242 union descriptor *old_ldt, *new_ldt;
243
244 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
245 NULL, NULL, NULL, NULL);
246 if (error)
247 return (error);
248
249 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
250 ua->start + ua->num > 8192)
251 return (EINVAL);
252
253 /* Check descriptors for access violations. */
254 for (i = 0; i < ua->num; i++) {
255 union descriptor *desc = &descv[i];
256
257 switch (desc->sd.sd_type) {
258 case SDT_SYSNULL:
259 desc->sd.sd_p = 0;
260 break;
261 case SDT_SYS286CGT:
262 case SDT_SYS386CGT:
263 /*
264 * Only allow call gates targeting a segment
265 * in the LDT or a user segment in the fixed
266 * part of the gdt. Segments in the LDT are
267 * constrained (below) to be user segments.
268 */
269 if (desc->gd.gd_p != 0 &&
270 !ISLDT(desc->gd.gd_selector) &&
271 ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
272 (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
273 SEL_UPL))) {
274 return EACCES;
275 }
276 break;
277 case SDT_MEMEC:
278 case SDT_MEMEAC:
279 case SDT_MEMERC:
280 case SDT_MEMERAC:
281 /* Must be "present" if executable and conforming. */
282 if (desc->sd.sd_p == 0)
283 return EACCES;
284 break;
285 case SDT_MEMRO:
286 case SDT_MEMROA:
287 case SDT_MEMRW:
288 case SDT_MEMRWA:
289 case SDT_MEMROD:
290 case SDT_MEMRODA:
291 case SDT_MEMRWD:
292 case SDT_MEMRWDA:
293 case SDT_MEME:
294 case SDT_MEMEA:
295 case SDT_MEMER:
296 case SDT_MEMERA:
297 break;
298 default:
299 /*
300 * Make sure that unknown descriptor types are
301 * not marked present.
302 */
303 if (desc->sd.sd_p != 0)
304 return EACCES;
305 break;
306 }
307
308 if (desc->sd.sd_p != 0) {
309 /* Only user (ring-3) descriptors may be present. */
310 if (desc->sd.sd_dpl != SEL_UPL)
311 return EACCES;
312 }
313 }
314
315 /*
316 * Install selected changes. We perform a copy, write, swap dance
317 * here to ensure that all updates happen atomically.
318 */
319
320 /* Allocate a new LDT. */
321 for (;;) {
322 new_len = (ua->start + ua->num) * sizeof(union descriptor);
323 new_len = max(new_len, pmap->pm_ldt_len);
324 new_len = max(new_len, NLDT * sizeof(union descriptor));
325 new_len = round_page(new_len);
326 new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
327 new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
328 mutex_enter(&cpu_lock);
329 if (pmap->pm_ldt_len <= new_len) {
330 break;
331 }
332 mutex_exit(&cpu_lock);
333 uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
334 UVM_KMF_WIRED);
335 }
336
337 /* Copy existing entries, if any. */
338 if (pmap->pm_ldt != NULL) {
339 old_ldt = pmap->pm_ldt;
340 old_len = pmap->pm_ldt_len;
341 old_sel = pmap->pm_ldt_sel;
342 memcpy(new_ldt, old_ldt, old_len);
343 } else {
344 old_ldt = NULL;
345 old_len = 0;
346 old_sel = -1;
347 memcpy(new_ldt, ldt, NLDT * sizeof(union descriptor));
348 }
349
350 /* Apply requested changes. */
351 for (i = 0, n = ua->start; i < ua->num; i++, n++) {
352 new_ldt[n] = descv[i];
353 }
354
355 /* Allocate LDT selector. */
356 new_sel = ldt_alloc(new_ldt, new_len);
357 if (new_sel == -1) {
358 mutex_exit(&cpu_lock);
359 uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
360 UVM_KMF_WIRED);
361 return ENOMEM;
362 }
363
364 /* All changes are now globally visible. Swap in the new LDT. */
365 pmap->pm_ldt = new_ldt;
366 pmap->pm_ldt_len = new_len;
367 pmap->pm_ldt_sel = new_sel;
368
369 /* Switch existing users onto new LDT. */
370 pmap_ldt_sync(pmap);
371
372 /* Free existing LDT (if any). */
373 if (old_ldt != NULL) {
374 ldt_free(old_sel);
375 uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len,
376 UVM_KMF_WIRED);
377 }
378 mutex_exit(&cpu_lock);
379
380 return error;
381 #endif
382 }
383
384 int
385 x86_iopl(struct lwp *l, void *args, register_t *retval)
386 {
387 int error;
388 struct x86_iopl_args ua;
389 #ifdef XEN
390 int iopl;
391 #else
392 struct trapframe *tf = l->l_md.md_regs;
393 #endif
394
395 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
396 NULL, NULL, NULL, NULL);
397 if (error)
398 return (error);
399
400 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
401 return error;
402
403 #ifdef XEN
404 if (ua.iopl)
405 iopl = SEL_UPL;
406 else
407 iopl = SEL_KPL;
408 l->l_addr->u_pcb.pcb_iopl = iopl;
409 /* Force the change at ring 0. */
410 #ifdef XEN3
411 {
412 struct physdev_op physop;
413 physop.cmd = PHYSDEVOP_SET_IOPL;
414 physop.u.set_iopl.iopl = iopl;
415 HYPERVISOR_physdev_op(&physop);
416 }
417 #else /* XEN3 */
418 {
419 dom0_op_t op;
420 op.cmd = DOM0_IOPL;
421 op.u.iopl.domain = DOMID_SELF;
422 op.u.iopl.iopl = iopl;
423 HYPERVISOR_dom0_op(&op);
424 }
425 #endif /* XEN3 */
426 #elif defined(__x86_64__)
427 if (ua.iopl)
428 tf->tf_rflags |= PSL_IOPL;
429 else
430 tf->tf_rflags &= ~PSL_IOPL;
431 #else
432 if (ua.iopl)
433 tf->tf_eflags |= PSL_IOPL;
434 else
435 tf->tf_eflags &= ~PSL_IOPL;
436 #endif
437
438 return 0;
439 }
440
441 int
442 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
443 {
444 #ifdef IOPERM
445 int error;
446 struct pcb *pcb = &l->l_addr->u_pcb;
447 struct x86_get_ioperm_args ua;
448 void *dummymap = NULL;
449 void *iomap;
450
451 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
452 NULL, NULL, NULL, NULL);
453 if (error)
454 return (error);
455
456 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
457 return (error);
458
459 iomap = pcb->pcb_iomap;
460 if (iomap == NULL) {
461 iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
462 memset(dummymap, 0xff, IOMAPSIZE);
463 }
464 error = copyout(iomap, ua.iomap, IOMAPSIZE);
465 if (dummymap != NULL) {
466 kmem_free(dummymap, IOMAPSIZE);
467 }
468 return error;
469 #else
470 return EINVAL;
471 #endif
472 }
473
474 int
475 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
476 {
477 #ifdef IOPERM
478 struct cpu_info *ci;
479 int error;
480 struct pcb *pcb = &l->l_addr->u_pcb;
481 struct x86_set_ioperm_args ua;
482 void *new;
483 void *old;
484
485 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
486 NULL, NULL, NULL, NULL);
487 if (error)
488 return (error);
489
490 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
491 return (error);
492
493 new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
494 error = copyin(ua.iomap, new, IOMAPSIZE);
495 if (error) {
496 kmem_free(new, IOMAPSIZE);
497 return error;
498 }
499 old = pcb->pcb_iomap;
500 pcb->pcb_iomap = new;
501 if (old != NULL) {
502 kmem_free(old, IOMAPSIZE);
503 }
504
505 kpreempt_disable();
506 ci = curcpu();
507 memcpy(ci->ci_iomap, pcb->pcb_iomap, sizeof(ci->ci_iomap));
508 ci->ci_tss.tss_iobase =
509 ((uintptr_t)ci->ci_iomap - (uintptr_t)&ci->ci_tss) << 16;
510 kpreempt_enable();
511
512 return error;
513 #else
514 return EINVAL;
515 #endif
516 }
517
518 int
519 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
520 {
521 #ifdef MTRR
522 struct x86_get_mtrr_args ua;
523 int error, n;
524
525 if (mtrr_funcs == NULL)
526 return ENOSYS;
527
528 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
529 NULL, NULL, NULL, NULL);
530 if (error)
531 return (error);
532
533 error = copyin(args, &ua, sizeof ua);
534 if (error != 0)
535 return error;
536
537 error = copyin(ua.n, &n, sizeof n);
538 if (error != 0)
539 return error;
540
541 KERNEL_LOCK(1, NULL);
542 error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
543 KERNEL_UNLOCK_ONE(NULL);
544
545 copyout(&n, ua.n, sizeof (int));
546
547 return error;
548 #else
549 return EINVAL;
550 #endif
551 }
552
553 int
554 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
555 {
556 #ifdef MTRR
557 int error, n;
558 struct x86_set_mtrr_args ua;
559
560 if (mtrr_funcs == NULL)
561 return ENOSYS;
562
563 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
564 NULL, NULL, NULL, NULL);
565 if (error)
566 return (error);
567
568 error = copyin(args, &ua, sizeof ua);
569 if (error != 0)
570 return error;
571
572 error = copyin(ua.n, &n, sizeof n);
573 if (error != 0)
574 return error;
575
576 KERNEL_LOCK(1, NULL);
577 error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
578 if (n != 0)
579 mtrr_commit();
580 KERNEL_UNLOCK_ONE(NULL);
581
582 copyout(&n, ua.n, sizeof n);
583
584 return error;
585 #else
586 return EINVAL;
587 #endif
588 }
589
590 int
591 x86_set_sdbase(void *arg, char which)
592 {
593 #ifdef i386
594 struct segment_descriptor sd;
595 vaddr_t base;
596 int error;
597
598 error = copyin(arg, &base, sizeof(base));
599 if (error != 0)
600 return error;
601
602 sd.sd_lobase = base & 0xffffff;
603 sd.sd_hibase = (base >> 24) & 0xff;
604 sd.sd_lolimit = 0xffff;
605 sd.sd_hilimit = 0xf;
606 sd.sd_type = SDT_MEMRWA;
607 sd.sd_dpl = SEL_UPL;
608 sd.sd_p = 1;
609 sd.sd_xx = 0;
610 sd.sd_def32 = 1;
611 sd.sd_gran = 1;
612
613 kpreempt_disable();
614 if (which == 'f') {
615 memcpy(&curpcb->pcb_fsd, &sd, sizeof(sd));
616 memcpy(&curcpu()->ci_gdt[GUFS_SEL], &sd, sizeof(sd));
617 } else /* which == 'g' */ {
618 memcpy(&curpcb->pcb_gsd, &sd, sizeof(sd));
619 memcpy(&curcpu()->ci_gdt[GUGS_SEL], &sd, sizeof(sd));
620 }
621 kpreempt_enable();
622
623 return 0;
624 #else
625 return EINVAL;
626 #endif
627 }
628
629 int
630 x86_get_sdbase(void *arg, char which)
631 {
632 #ifdef i386
633 struct segment_descriptor *sd;
634 vaddr_t base;
635
636 switch (which) {
637 case 'f':
638 sd = (struct segment_descriptor *)&curpcb->pcb_fsd;
639 break;
640 case 'g':
641 sd = (struct segment_descriptor *)&curpcb->pcb_gsd;
642 break;
643 default:
644 panic("x86_get_sdbase");
645 }
646
647 base = sd->sd_hibase << 24 | sd->sd_lobase;
648 return copyout(&base, &arg, sizeof(base));
649 #else
650 return EINVAL;
651 #endif
652 }
653
654 int
655 sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
656 {
657 /* {
658 syscallarg(int) op;
659 syscallarg(void *) parms;
660 } */
661 int error = 0;
662
663 switch(SCARG(uap, op)) {
664 case X86_IOPL:
665 error = x86_iopl(l, SCARG(uap, parms), retval);
666 break;
667
668 case X86_GET_LDT:
669 error = x86_get_ldt(l, SCARG(uap, parms), retval);
670 break;
671
672 case X86_SET_LDT:
673 error = x86_set_ldt(l, SCARG(uap, parms), retval);
674 break;
675
676 case X86_GET_IOPERM:
677 error = x86_get_ioperm(l, SCARG(uap, parms), retval);
678 break;
679
680 case X86_SET_IOPERM:
681 error = x86_set_ioperm(l, SCARG(uap, parms), retval);
682 break;
683
684 case X86_GET_MTRR:
685 error = x86_get_mtrr(l, SCARG(uap, parms), retval);
686 break;
687 case X86_SET_MTRR:
688 error = x86_set_mtrr(l, SCARG(uap, parms), retval);
689 break;
690
691 #ifdef VM86
692 case X86_VM86:
693 error = x86_vm86(l, SCARG(uap, parms), retval);
694 break;
695 case X86_OLD_VM86:
696 error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
697 break;
698 #endif
699
700 #ifdef PERFCTRS
701 case X86_PMC_INFO:
702 KERNEL_LOCK(1, NULL);
703 error = pmc_info(l, SCARG(uap, parms), retval);
704 KERNEL_UNLOCK_ONE(NULL);
705 break;
706
707 case X86_PMC_STARTSTOP:
708 KERNEL_LOCK(1, NULL);
709 error = pmc_startstop(l, SCARG(uap, parms), retval);
710 KERNEL_UNLOCK_ONE(NULL);
711 break;
712
713 case X86_PMC_READ:
714 KERNEL_LOCK(1, NULL);
715 error = pmc_read(l, SCARG(uap, parms), retval);
716 KERNEL_UNLOCK_ONE(NULL);
717 break;
718 #endif
719
720 case X86_SET_FSBASE:
721 error = x86_set_sdbase(SCARG(uap, parms), 'f');
722 break;
723
724 case X86_SET_GSBASE:
725 error = x86_set_sdbase(SCARG(uap, parms), 'g');
726 break;
727
728 case X86_GET_FSBASE:
729 error = x86_get_sdbase(SCARG(uap, parms), 'f');
730 break;
731
732 case X86_GET_GSBASE:
733 error = x86_get_sdbase(SCARG(uap, parms), 'g');
734 break;
735
736 default:
737 error = EINVAL;
738 break;
739 }
740 return (error);
741 }
742