sys_machdep.c revision 1.21 1 /* $NetBSD: sys_machdep.c,v 1.21 2009/11/11 13:38:53 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.21 2009/11/11 13:38:53 yamt Exp $");
34
35 #include "opt_mtrr.h"
36 #include "opt_perfctrs.h"
37 #include "opt_user_ldt.h"
38 #include "opt_vm86.h"
39 #include "opt_xen.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/ioctl.h>
44 #include <sys/file.h>
45 #include <sys/time.h>
46 #include <sys/proc.h>
47 #include <sys/user.h>
48 #include <sys/uio.h>
49 #include <sys/kernel.h>
50 #include <sys/buf.h>
51 #include <sys/signal.h>
52 #include <sys/malloc.h>
53 #include <sys/kmem.h>
54 #include <sys/kauth.h>
55 #include <sys/cpu.h>
56 #include <sys/mount.h>
57 #include <sys/syscallargs.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/cpufunc.h>
62 #include <machine/gdt.h>
63 #include <machine/psl.h>
64 #include <machine/reg.h>
65 #include <machine/sysarch.h>
66 #include <machine/mtrr.h>
67
68 #ifdef __x86_64__
69 /* Need to be checked. */
70 #undef USER_LDT
71 #undef PERFCTRS
72 #undef VM86
73 #undef IOPERM
74 #else
75 #if defined(XEN)
76 #undef IOPERM
77 #else /* defined(XEN) */
78 #define IOPERM
79 #endif /* defined(XEN) */
80 #endif
81
82 #ifdef VM86
83 #include <machine/vm86.h>
84 #endif
85
86 #ifdef PERFCTRS
87 #include <machine/pmc.h>
88 #endif
89
90 extern struct vm_map *kernel_map;
91
92 int x86_get_ioperm(struct lwp *, void *, register_t *);
93 int x86_set_ioperm(struct lwp *, void *, register_t *);
94 int x86_get_mtrr(struct lwp *, void *, register_t *);
95 int x86_set_mtrr(struct lwp *, void *, register_t *);
96 int x86_set_sdbase(void *, char, lwp_t *, bool);
97 int x86_get_sdbase(void *, char);
98
99 #ifdef LDT_DEBUG
100 static void x86_print_ldt(int, const struct segment_descriptor *);
101
102 static void
103 x86_print_ldt(int i, const struct segment_descriptor *d)
104 {
105 printf("[%d] lolimit=0x%x, lobase=0x%x, type=%u, dpl=%u, p=%u, "
106 "hilimit=0x%x, xx=%x, def32=%u, gran=%u, hibase=0x%x\n",
107 i, d->sd_lolimit, d->sd_lobase, d->sd_type, d->sd_dpl, d->sd_p,
108 d->sd_hilimit, d->sd_xx, d->sd_def32, d->sd_gran, d->sd_hibase);
109 }
110 #endif
111
112 int
113 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
114 {
115 #ifndef USER_LDT
116 return EINVAL;
117 #else
118 struct x86_get_ldt_args ua;
119 union descriptor *cp;
120 int error;
121
122 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
123 return error;
124
125 if (ua.num < 0 || ua.num > 8192)
126 return EINVAL;
127
128 cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
129 if (cp == NULL)
130 return ENOMEM;
131
132 error = x86_get_ldt1(l, &ua, cp);
133 *retval = ua.num;
134 if (error == 0)
135 error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
136
137 free(cp, M_TEMP);
138 return error;
139 #endif
140 }
141
142 int
143 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
144 {
145 #ifndef USER_LDT
146 return EINVAL;
147 #else
148 int error;
149 struct proc *p = l->l_proc;
150 pmap_t pmap = p->p_vmspace->vm_map.pmap;
151 int nldt, num;
152 union descriptor *lp;
153
154 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
155 NULL, NULL, NULL, NULL);
156 if (error)
157 return (error);
158
159 #ifdef LDT_DEBUG
160 printf("x86_get_ldt: start=%d num=%d descs=%p\n", ua->start,
161 ua->num, ua->desc);
162 #endif
163
164 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
165 ua->start + ua->num > 8192)
166 return (EINVAL);
167
168 mutex_enter(&cpu_lock);
169
170 if (pmap->pm_ldt != NULL) {
171 nldt = pmap->pm_ldt_len / sizeof(*lp);
172 lp = pmap->pm_ldt;
173 } else {
174 nldt = NLDT;
175 lp = ldt;
176 }
177
178 if (ua->start > nldt) {
179 mutex_exit(&cpu_lock);
180 return (EINVAL);
181 }
182
183 lp += ua->start;
184 num = min(ua->num, nldt - ua->start);
185 ua->num = num;
186 #ifdef LDT_DEBUG
187 {
188 int i;
189 for (i = 0; i < num; i++)
190 x86_print_ldt(i, &lp[i].sd);
191 }
192 #endif
193
194 memcpy(cp, lp, num * sizeof(union descriptor));
195 mutex_exit(&cpu_lock);
196
197 return 0;
198 #endif
199 }
200
201 int
202 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
203 {
204 #ifndef USER_LDT
205 return EINVAL;
206 #else
207 struct x86_set_ldt_args ua;
208 union descriptor *descv;
209 int error;
210
211 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
212 return (error);
213
214 if (ua.num < 0 || ua.num > 8192)
215 return EINVAL;
216
217 descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT);
218 if (descv == NULL)
219 return ENOMEM;
220
221 error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
222 if (error == 0)
223 error = x86_set_ldt1(l, &ua, descv);
224 *retval = ua.start;
225
226 free(descv, M_TEMP);
227 return error;
228 #endif
229 }
230
231 int
232 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
233 union descriptor *descv)
234 {
235 #ifndef USER_LDT
236 return EINVAL;
237 #else
238 int error, i, n, old_sel, new_sel;
239 struct proc *p = l->l_proc;
240 pmap_t pmap = p->p_vmspace->vm_map.pmap;
241 size_t old_len, new_len;
242 union descriptor *old_ldt, *new_ldt;
243
244 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
245 NULL, NULL, NULL, NULL);
246 if (error)
247 return (error);
248
249 if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 ||
250 ua->start + ua->num > 8192)
251 return (EINVAL);
252
253 /* Check descriptors for access violations. */
254 for (i = 0; i < ua->num; i++) {
255 union descriptor *desc = &descv[i];
256
257 switch (desc->sd.sd_type) {
258 case SDT_SYSNULL:
259 desc->sd.sd_p = 0;
260 break;
261 case SDT_SYS286CGT:
262 case SDT_SYS386CGT:
263 /*
264 * Only allow call gates targeting a segment
265 * in the LDT or a user segment in the fixed
266 * part of the gdt. Segments in the LDT are
267 * constrained (below) to be user segments.
268 */
269 if (desc->gd.gd_p != 0 &&
270 !ISLDT(desc->gd.gd_selector) &&
271 ((IDXSEL(desc->gd.gd_selector) >= NGDT) ||
272 (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl !=
273 SEL_UPL))) {
274 return EACCES;
275 }
276 break;
277 case SDT_MEMEC:
278 case SDT_MEMEAC:
279 case SDT_MEMERC:
280 case SDT_MEMERAC:
281 /* Must be "present" if executable and conforming. */
282 if (desc->sd.sd_p == 0)
283 return EACCES;
284 break;
285 case SDT_MEMRO:
286 case SDT_MEMROA:
287 case SDT_MEMRW:
288 case SDT_MEMRWA:
289 case SDT_MEMROD:
290 case SDT_MEMRODA:
291 case SDT_MEMRWD:
292 case SDT_MEMRWDA:
293 case SDT_MEME:
294 case SDT_MEMEA:
295 case SDT_MEMER:
296 case SDT_MEMERA:
297 break;
298 default:
299 /*
300 * Make sure that unknown descriptor types are
301 * not marked present.
302 */
303 if (desc->sd.sd_p != 0)
304 return EACCES;
305 break;
306 }
307
308 if (desc->sd.sd_p != 0) {
309 /* Only user (ring-3) descriptors may be present. */
310 if (desc->sd.sd_dpl != SEL_UPL)
311 return EACCES;
312 }
313 }
314
315 /*
316 * Install selected changes. We perform a copy, write, swap dance
317 * here to ensure that all updates happen atomically.
318 */
319
320 /* Allocate a new LDT. */
321 for (;;) {
322 new_len = (ua->start + ua->num) * sizeof(union descriptor);
323 new_len = max(new_len, pmap->pm_ldt_len);
324 new_len = max(new_len, NLDT * sizeof(union descriptor));
325 new_len = round_page(new_len);
326 new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
327 new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
328 mutex_enter(&cpu_lock);
329 if (pmap->pm_ldt_len <= new_len) {
330 break;
331 }
332 mutex_exit(&cpu_lock);
333 uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
334 UVM_KMF_WIRED);
335 }
336
337 /* Copy existing entries, if any. */
338 if (pmap->pm_ldt != NULL) {
339 old_ldt = pmap->pm_ldt;
340 old_len = pmap->pm_ldt_len;
341 old_sel = pmap->pm_ldt_sel;
342 memcpy(new_ldt, old_ldt, old_len);
343 } else {
344 old_ldt = NULL;
345 old_len = 0;
346 old_sel = -1;
347 memcpy(new_ldt, ldt, NLDT * sizeof(union descriptor));
348 }
349
350 /* Apply requested changes. */
351 for (i = 0, n = ua->start; i < ua->num; i++, n++) {
352 new_ldt[n] = descv[i];
353 }
354
355 /* Allocate LDT selector. */
356 new_sel = ldt_alloc(new_ldt, new_len);
357 if (new_sel == -1) {
358 mutex_exit(&cpu_lock);
359 uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len,
360 UVM_KMF_WIRED);
361 return ENOMEM;
362 }
363
364 /* All changes are now globally visible. Swap in the new LDT. */
365 pmap->pm_ldt = new_ldt;
366 pmap->pm_ldt_len = new_len;
367 pmap->pm_ldt_sel = new_sel;
368
369 /* Switch existing users onto new LDT. */
370 pmap_ldt_sync(pmap);
371
372 /* Free existing LDT (if any). */
373 if (old_ldt != NULL) {
374 ldt_free(old_sel);
375 uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len,
376 UVM_KMF_WIRED);
377 }
378 mutex_exit(&cpu_lock);
379
380 return error;
381 #endif
382 }
383
384 int
385 x86_iopl(struct lwp *l, void *args, register_t *retval)
386 {
387 int error;
388 struct x86_iopl_args ua;
389 #ifdef XEN
390 int iopl;
391 #else
392 struct trapframe *tf = l->l_md.md_regs;
393 #endif
394
395 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
396 NULL, NULL, NULL, NULL);
397 if (error)
398 return (error);
399
400 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
401 return error;
402
403 #ifdef XEN
404 if (ua.iopl)
405 iopl = SEL_UPL;
406 else
407 iopl = SEL_KPL;
408 l->l_addr->u_pcb.pcb_iopl = iopl;
409 /* Force the change at ring 0. */
410 {
411 struct physdev_op physop;
412 physop.cmd = PHYSDEVOP_SET_IOPL;
413 physop.u.set_iopl.iopl = iopl;
414 HYPERVISOR_physdev_op(&physop);
415 }
416 #elif defined(__x86_64__)
417 if (ua.iopl)
418 tf->tf_rflags |= PSL_IOPL;
419 else
420 tf->tf_rflags &= ~PSL_IOPL;
421 #else
422 if (ua.iopl)
423 tf->tf_eflags |= PSL_IOPL;
424 else
425 tf->tf_eflags &= ~PSL_IOPL;
426 #endif
427
428 return 0;
429 }
430
431 int
432 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
433 {
434 #ifdef IOPERM
435 int error;
436 struct pcb *pcb = &l->l_addr->u_pcb;
437 struct x86_get_ioperm_args ua;
438 void *dummymap = NULL;
439 void *iomap;
440
441 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
442 NULL, NULL, NULL, NULL);
443 if (error)
444 return (error);
445
446 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
447 return (error);
448
449 iomap = pcb->pcb_iomap;
450 if (iomap == NULL) {
451 iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
452 memset(dummymap, 0xff, IOMAPSIZE);
453 }
454 error = copyout(iomap, ua.iomap, IOMAPSIZE);
455 if (dummymap != NULL) {
456 kmem_free(dummymap, IOMAPSIZE);
457 }
458 return error;
459 #else
460 return EINVAL;
461 #endif
462 }
463
464 int
465 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
466 {
467 #ifdef IOPERM
468 struct cpu_info *ci;
469 int error;
470 struct pcb *pcb = &l->l_addr->u_pcb;
471 struct x86_set_ioperm_args ua;
472 void *new;
473 void *old;
474
475 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
476 NULL, NULL, NULL, NULL);
477 if (error)
478 return (error);
479
480 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
481 return (error);
482
483 new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
484 error = copyin(ua.iomap, new, IOMAPSIZE);
485 if (error) {
486 kmem_free(new, IOMAPSIZE);
487 return error;
488 }
489 old = pcb->pcb_iomap;
490 pcb->pcb_iomap = new;
491 if (old != NULL) {
492 kmem_free(old, IOMAPSIZE);
493 }
494
495 kpreempt_disable();
496 ci = curcpu();
497 memcpy(ci->ci_iomap, pcb->pcb_iomap, sizeof(ci->ci_iomap));
498 ci->ci_tss.tss_iobase =
499 ((uintptr_t)ci->ci_iomap - (uintptr_t)&ci->ci_tss) << 16;
500 kpreempt_enable();
501
502 return error;
503 #else
504 return EINVAL;
505 #endif
506 }
507
508 int
509 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
510 {
511 #ifdef MTRR
512 struct x86_get_mtrr_args ua;
513 int error, n;
514
515 if (mtrr_funcs == NULL)
516 return ENOSYS;
517
518 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
519 NULL, NULL, NULL, NULL);
520 if (error)
521 return (error);
522
523 error = copyin(args, &ua, sizeof ua);
524 if (error != 0)
525 return error;
526
527 error = copyin(ua.n, &n, sizeof n);
528 if (error != 0)
529 return error;
530
531 KERNEL_LOCK(1, NULL);
532 error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
533 KERNEL_UNLOCK_ONE(NULL);
534
535 copyout(&n, ua.n, sizeof (int));
536
537 return error;
538 #else
539 return EINVAL;
540 #endif
541 }
542
543 int
544 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
545 {
546 #ifdef MTRR
547 int error, n;
548 struct x86_set_mtrr_args ua;
549
550 if (mtrr_funcs == NULL)
551 return ENOSYS;
552
553 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
554 NULL, NULL, NULL, NULL);
555 if (error)
556 return (error);
557
558 error = copyin(args, &ua, sizeof ua);
559 if (error != 0)
560 return error;
561
562 error = copyin(ua.n, &n, sizeof n);
563 if (error != 0)
564 return error;
565
566 KERNEL_LOCK(1, NULL);
567 error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
568 if (n != 0)
569 mtrr_commit();
570 KERNEL_UNLOCK_ONE(NULL);
571
572 copyout(&n, ua.n, sizeof n);
573
574 return error;
575 #else
576 return EINVAL;
577 #endif
578 }
579
580 int
581 x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
582 {
583 #ifdef i386
584 union descriptor usd;
585 struct pcb *pcb;
586 vaddr_t base;
587 int error;
588
589 if (direct) {
590 base = (vaddr_t)arg;
591 } else {
592 error = copyin(arg, &base, sizeof(base));
593 if (error != 0)
594 return error;
595 }
596
597 usd.sd.sd_lobase = base & 0xffffff;
598 usd.sd.sd_hibase = (base >> 24) & 0xff;
599 usd.sd.sd_lolimit = 0xffff;
600 usd.sd.sd_hilimit = 0xf;
601 usd.sd.sd_type = SDT_MEMRWA;
602 usd.sd.sd_dpl = SEL_UPL;
603 usd.sd.sd_p = 1;
604 usd.sd.sd_xx = 0;
605 usd.sd.sd_def32 = 1;
606 usd.sd.sd_gran = 1;
607
608 kpreempt_disable();
609 pcb = &l->l_addr->u_pcb;
610 if (which == 'f') {
611 memcpy(&pcb->pcb_fsd, &usd.sd,
612 sizeof(struct segment_descriptor));
613 if (l == curlwp) {
614 update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd);
615 }
616 } else /* which == 'g' */ {
617 memcpy(&pcb->pcb_gsd, &usd.sd,
618 sizeof(struct segment_descriptor));
619 if (l == curlwp) {
620 update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd);
621 }
622 }
623 kpreempt_enable();
624
625 return 0;
626 #else
627 return EINVAL;
628 #endif
629 }
630
631 int
632 x86_get_sdbase(void *arg, char which)
633 {
634 #ifdef i386
635 struct segment_descriptor *sd;
636 vaddr_t base;
637
638 switch (which) {
639 case 'f':
640 sd = (struct segment_descriptor *)&curpcb->pcb_fsd;
641 break;
642 case 'g':
643 sd = (struct segment_descriptor *)&curpcb->pcb_gsd;
644 break;
645 default:
646 panic("x86_get_sdbase");
647 }
648
649 base = sd->sd_hibase << 24 | sd->sd_lobase;
650 return copyout(&base, arg, sizeof(base));
651 #else
652 return EINVAL;
653 #endif
654 }
655
656 int
657 sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
658 {
659 /* {
660 syscallarg(int) op;
661 syscallarg(void *) parms;
662 } */
663 int error = 0;
664
665 switch(SCARG(uap, op)) {
666 case X86_IOPL:
667 error = x86_iopl(l, SCARG(uap, parms), retval);
668 break;
669
670 case X86_GET_LDT:
671 error = x86_get_ldt(l, SCARG(uap, parms), retval);
672 break;
673
674 case X86_SET_LDT:
675 error = x86_set_ldt(l, SCARG(uap, parms), retval);
676 break;
677
678 case X86_GET_IOPERM:
679 error = x86_get_ioperm(l, SCARG(uap, parms), retval);
680 break;
681
682 case X86_SET_IOPERM:
683 error = x86_set_ioperm(l, SCARG(uap, parms), retval);
684 break;
685
686 case X86_GET_MTRR:
687 error = x86_get_mtrr(l, SCARG(uap, parms), retval);
688 break;
689 case X86_SET_MTRR:
690 error = x86_set_mtrr(l, SCARG(uap, parms), retval);
691 break;
692
693 #ifdef VM86
694 case X86_VM86:
695 error = x86_vm86(l, SCARG(uap, parms), retval);
696 break;
697 case X86_OLD_VM86:
698 error = compat_16_x86_vm86(l, SCARG(uap, parms), retval);
699 break;
700 #endif
701
702 #ifdef PERFCTRS
703 case X86_PMC_INFO:
704 KERNEL_LOCK(1, NULL);
705 error = pmc_info(l, SCARG(uap, parms), retval);
706 KERNEL_UNLOCK_ONE(NULL);
707 break;
708
709 case X86_PMC_STARTSTOP:
710 KERNEL_LOCK(1, NULL);
711 error = pmc_startstop(l, SCARG(uap, parms), retval);
712 KERNEL_UNLOCK_ONE(NULL);
713 break;
714
715 case X86_PMC_READ:
716 KERNEL_LOCK(1, NULL);
717 error = pmc_read(l, SCARG(uap, parms), retval);
718 KERNEL_UNLOCK_ONE(NULL);
719 break;
720 #endif
721
722 case X86_SET_FSBASE:
723 error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
724 break;
725
726 case X86_SET_GSBASE:
727 error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
728 break;
729
730 case X86_GET_FSBASE:
731 error = x86_get_sdbase(SCARG(uap, parms), 'f');
732 break;
733
734 case X86_GET_GSBASE:
735 error = x86_get_sdbase(SCARG(uap, parms), 'g');
736 break;
737
738 default:
739 error = EINVAL;
740 break;
741 }
742 return (error);
743 }
744
745 int
746 cpu_lwp_setprivate(lwp_t *l, void *addr)
747 {
748
749 return x86_set_sdbase(addr, 'g', l, true);
750 }
751