sys_machdep.c revision 1.56 1 /* $NetBSD: sys_machdep.c,v 1.56 2020/06/19 16:20:22 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2007, 2009, 2017 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, by Andrew Doran, and by Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.56 2020/06/19 16:20:22 maxv Exp $");
34
35 #include "opt_mtrr.h"
36 #include "opt_user_ldt.h"
37 #include "opt_compat_netbsd.h"
38 #include "opt_xen.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/file.h>
44 #include <sys/time.h>
45 #include <sys/proc.h>
46 #include <sys/uio.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/signal.h>
50 #include <sys/malloc.h>
51 #include <sys/kmem.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <machine/cpufunc.h>
60 #include <machine/gdt.h>
61 #include <machine/psl.h>
62 #include <machine/reg.h>
63 #include <machine/sysarch.h>
64 #include <machine/mtrr.h>
65
66 #if defined(__x86_64__) || defined(XENPV)
67 #undef IOPERM /* not implemented */
68 #else
69 #define IOPERM
70 #endif
71
72 #if defined(XENPV) && defined(USER_LDT)
73 #error "USER_LDT not supported on XENPV"
74 #endif
75
76 extern struct vm_map *kernel_map;
77
78 static int x86_get_ioperm(struct lwp *, void *, register_t *);
79 static int x86_set_ioperm(struct lwp *, void *, register_t *);
80 static int x86_set_sdbase32(void *, char, lwp_t *, bool);
81 int x86_set_sdbase(void *, char, lwp_t *, bool);
82 static int x86_get_sdbase32(void *, char);
83 int x86_get_sdbase(void *, char);
84
85 #ifdef i386
86 static int
87 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
88 {
89 #ifndef USER_LDT
90 return EINVAL;
91 #else
92 struct x86_get_ldt_args ua;
93 union descriptor *cp;
94 int error;
95
96 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
97 return error;
98
99 if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
100 return EINVAL;
101
102 cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
103 if (cp == NULL)
104 return ENOMEM;
105
106 error = x86_get_ldt1(l, &ua, cp);
107 *retval = ua.num;
108 if (error == 0)
109 error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
110
111 free(cp, M_TEMP);
112 return error;
113 #endif
114 }
115 #endif
116
117 int
118 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
119 {
120 #ifndef USER_LDT
121 return EINVAL;
122 #else
123 int error;
124 struct proc *p = l->l_proc;
125 pmap_t pmap = p->p_vmspace->vm_map.pmap;
126 int nldt, num;
127 union descriptor *lp;
128
129 #ifdef __x86_64__
130 const size_t min_ldt_size = LDT_SIZE;
131 #else
132 const size_t min_ldt_size = NLDT * sizeof(union descriptor);
133 #endif
134
135 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
136 NULL, NULL, NULL, NULL);
137 if (error)
138 return error;
139
140 if (ua->start < 0 || ua->num < 0 ||
141 ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS ||
142 ua->start + ua->num > MAX_USERLDT_SLOTS)
143 return EINVAL;
144
145 if (ua->start * sizeof(union descriptor) < min_ldt_size)
146 return EINVAL;
147
148 mutex_enter(&cpu_lock);
149
150 if (pmap->pm_ldt != NULL) {
151 nldt = MAX_USERLDT_SIZE / sizeof(*lp);
152 lp = pmap->pm_ldt;
153 } else {
154 #ifdef __x86_64__
155 nldt = LDT_SIZE / sizeof(*lp);
156 #else
157 nldt = NLDT;
158 #endif
159 lp = (union descriptor *)ldtstore;
160 }
161
162 if (ua->start > nldt) {
163 mutex_exit(&cpu_lock);
164 return EINVAL;
165 }
166
167 lp += ua->start;
168 num = uimin(ua->num, nldt - ua->start);
169 ua->num = num;
170
171 memcpy(cp, lp, num * sizeof(union descriptor));
172 mutex_exit(&cpu_lock);
173
174 return 0;
175 #endif
176 }
177
178 #ifdef i386
179 static int
180 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
181 {
182 #ifndef USER_LDT
183 return EINVAL;
184 #else
185 struct x86_set_ldt_args ua;
186 union descriptor *descv;
187 int error;
188
189 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
190 return error;
191
192 if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
193 return EINVAL;
194
195 descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_WAITOK);
196 error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
197 if (error == 0)
198 error = x86_set_ldt1(l, &ua, descv);
199 *retval = ua.start;
200
201 free(descv, M_TEMP);
202 return error;
203 #endif
204 }
205 #endif
206
207 int
208 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
209 union descriptor *descv)
210 {
211 #ifndef USER_LDT
212 return EINVAL;
213 #else
214 int error, i, n, old_sel, new_sel;
215 struct proc *p = l->l_proc;
216 pmap_t pmap = p->p_vmspace->vm_map.pmap;
217 union descriptor *old_ldt, *new_ldt;
218
219 #ifdef __x86_64__
220 const size_t min_ldt_size = LDT_SIZE;
221 #else
222 const size_t min_ldt_size = NLDT * sizeof(union descriptor);
223 #endif
224
225 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
226 NULL, NULL, NULL, NULL);
227 if (error)
228 return error;
229
230 if (ua->start < 0 || ua->num < 0 ||
231 ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS ||
232 ua->start + ua->num > MAX_USERLDT_SLOTS)
233 return EINVAL;
234
235 if (ua->start * sizeof(union descriptor) < min_ldt_size)
236 return EINVAL;
237
238 /* Check descriptors for access violations. */
239 for (i = 0; i < ua->num; i++) {
240 union descriptor *desc = &descv[i];
241
242 #ifdef __x86_64__
243 if (desc->sd.sd_long != 0)
244 return EACCES;
245 #endif
246
247 switch (desc->sd.sd_type) {
248 case SDT_SYSNULL:
249 desc->sd.sd_p = 0;
250 break;
251 case SDT_MEMEC:
252 case SDT_MEMEAC:
253 case SDT_MEMERC:
254 case SDT_MEMERAC:
255 /* Must be "present" if executable and conforming. */
256 if (desc->sd.sd_p == 0)
257 return EACCES;
258 break;
259 case SDT_MEMRO:
260 case SDT_MEMROA:
261 case SDT_MEMRW:
262 case SDT_MEMRWA:
263 case SDT_MEMROD:
264 case SDT_MEMRODA:
265 case SDT_MEMRWD:
266 case SDT_MEMRWDA:
267 case SDT_MEME:
268 case SDT_MEMEA:
269 case SDT_MEMER:
270 case SDT_MEMERA:
271 break;
272 default:
273 return EACCES;
274 }
275
276 if (desc->sd.sd_p != 0) {
277 /* Only user (ring-3) descriptors may be present. */
278 if (desc->sd.sd_dpl != SEL_UPL)
279 return EACCES;
280 }
281 }
282
283 /*
284 * Install selected changes.
285 */
286
287 /* Allocate a new LDT. */
288 new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
289 MAX_USERLDT_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA);
290
291 mutex_enter(&cpu_lock);
292
293 /* Copy existing entries, if any. */
294 if (pmap->pm_ldt != NULL) {
295 old_ldt = pmap->pm_ldt;
296 old_sel = pmap->pm_ldt_sel;
297 memcpy(new_ldt, old_ldt, MAX_USERLDT_SIZE);
298 } else {
299 old_ldt = NULL;
300 old_sel = -1;
301 memcpy(new_ldt, ldtstore, min_ldt_size);
302 }
303
304 /* Apply requested changes. */
305 for (i = 0, n = ua->start; i < ua->num; i++, n++) {
306 new_ldt[n] = descv[i];
307 }
308
309 /* Allocate LDT selector. */
310 new_sel = ldt_alloc(new_ldt, MAX_USERLDT_SIZE);
311 if (new_sel == -1) {
312 mutex_exit(&cpu_lock);
313 uvm_km_free(kernel_map, (vaddr_t)new_ldt, MAX_USERLDT_SIZE,
314 UVM_KMF_WIRED);
315 return ENOMEM;
316 }
317
318 /* All changes are now globally visible. Swap in the new LDT. */
319 atomic_store_relaxed(&pmap->pm_ldt_sel, new_sel);
320 /* membar_store_store for pmap_fork() to read these unlocked safely */
321 membar_producer();
322 atomic_store_relaxed(&pmap->pm_ldt, new_ldt);
323
324 /* Switch existing users onto new LDT. */
325 pmap_ldt_sync(pmap);
326
327 /* Free existing LDT (if any). */
328 if (old_ldt != NULL) {
329 ldt_free(old_sel);
330 /* exit the mutex before free */
331 mutex_exit(&cpu_lock);
332 uvm_km_free(kernel_map, (vaddr_t)old_ldt, MAX_USERLDT_SIZE,
333 UVM_KMF_WIRED);
334 } else {
335 mutex_exit(&cpu_lock);
336 }
337
338 return error;
339 #endif
340 }
341
342 int
343 x86_iopl(struct lwp *l, void *args, register_t *retval)
344 {
345 int error;
346 struct x86_iopl_args ua;
347 #ifdef XENPV
348 int iopl;
349 #else
350 struct trapframe *tf = l->l_md.md_regs;
351 #endif
352
353 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
354 NULL, NULL, NULL, NULL);
355 if (error)
356 return error;
357
358 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
359 return error;
360
361 #ifdef XENPV
362 if (ua.iopl)
363 iopl = SEL_UPL;
364 else
365 iopl = SEL_KPL;
366
367 {
368 struct pcb *pcb;
369
370 pcb = lwp_getpcb(l);
371 pcb->pcb_iopl = iopl;
372
373 /* Force the change at ring 0. */
374 struct physdev_set_iopl set_iopl;
375 set_iopl.iopl = iopl;
376 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
377 }
378 #elif defined(__x86_64__)
379 if (ua.iopl)
380 tf->tf_rflags |= PSL_IOPL;
381 else
382 tf->tf_rflags &= ~PSL_IOPL;
383 #else
384 if (ua.iopl)
385 tf->tf_eflags |= PSL_IOPL;
386 else
387 tf->tf_eflags &= ~PSL_IOPL;
388 #endif
389
390 return 0;
391 }
392
393 static int
394 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
395 {
396 #ifdef IOPERM
397 int error;
398 struct pcb *pcb = lwp_getpcb(l);
399 struct x86_get_ioperm_args ua;
400 void *dummymap = NULL;
401 void *iomap;
402
403 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
404 NULL, NULL, NULL, NULL);
405 if (error)
406 return error;
407
408 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
409 return error;
410
411 iomap = pcb->pcb_iomap;
412 if (iomap == NULL) {
413 iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
414 memset(dummymap, 0xff, IOMAPSIZE);
415 }
416 error = copyout(iomap, ua.iomap, IOMAPSIZE);
417 if (dummymap != NULL) {
418 kmem_free(dummymap, IOMAPSIZE);
419 }
420 return error;
421 #else
422 return EINVAL;
423 #endif
424 }
425
426 static int
427 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
428 {
429 #ifdef IOPERM
430 struct cpu_info *ci;
431 int error;
432 struct pcb *pcb = lwp_getpcb(l);
433 struct x86_set_ioperm_args ua;
434 void *new;
435 void *old;
436
437 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
438 NULL, NULL, NULL, NULL);
439 if (error)
440 return error;
441
442 if ((error = copyin(args, &ua, sizeof(ua))) != 0)
443 return error;
444
445 new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
446 error = copyin(ua.iomap, new, IOMAPSIZE);
447 if (error) {
448 kmem_free(new, IOMAPSIZE);
449 return error;
450 }
451 old = pcb->pcb_iomap;
452 pcb->pcb_iomap = new;
453 if (old != NULL) {
454 kmem_free(old, IOMAPSIZE);
455 }
456
457 CTASSERT(offsetof(struct cpu_tss, iomap) -
458 offsetof(struct cpu_tss, tss) == IOMAP_VALIDOFF);
459
460 kpreempt_disable();
461 ci = curcpu();
462 memcpy(ci->ci_tss->iomap, pcb->pcb_iomap, IOMAPSIZE);
463 ci->ci_tss->tss.tss_iobase = IOMAP_VALIDOFF << 16;
464 kpreempt_enable();
465
466 return error;
467 #else
468 return EINVAL;
469 #endif
470 }
471
472 static int
473 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
474 {
475 #ifdef MTRR
476 struct x86_get_mtrr_args ua;
477 int error, n;
478
479 if (mtrr_funcs == NULL)
480 return ENOSYS;
481
482 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
483 NULL, NULL, NULL, NULL);
484 if (error)
485 return error;
486
487 error = copyin(args, &ua, sizeof ua);
488 if (error != 0)
489 return error;
490
491 error = copyin(ua.n, &n, sizeof n);
492 if (error != 0)
493 return error;
494
495 KERNEL_LOCK(1, NULL);
496 error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
497 KERNEL_UNLOCK_ONE(NULL);
498
499 copyout(&n, ua.n, sizeof (int));
500
501 return error;
502 #else
503 return EINVAL;
504 #endif
505 }
506
507 static int
508 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
509 {
510 #ifdef MTRR
511 int error, n;
512 struct x86_set_mtrr_args ua;
513
514 if (mtrr_funcs == NULL)
515 return ENOSYS;
516
517 error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
518 NULL, NULL, NULL, NULL);
519 if (error)
520 return error;
521
522 error = copyin(args, &ua, sizeof ua);
523 if (error != 0)
524 return error;
525
526 error = copyin(ua.n, &n, sizeof n);
527 if (error != 0)
528 return error;
529
530 KERNEL_LOCK(1, NULL);
531 error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
532 if (n != 0)
533 mtrr_commit();
534 KERNEL_UNLOCK_ONE(NULL);
535
536 copyout(&n, ua.n, sizeof n);
537
538 return error;
539 #else
540 return EINVAL;
541 #endif
542 }
543
544 #ifdef __x86_64__
545 #define pcb_fsd pcb_fs
546 #define pcb_gsd pcb_gs
547 #define segment_descriptor mem_segment_descriptor
548 #endif
549
550 static int
551 x86_set_sdbase32(void *arg, char which, lwp_t *l, bool direct)
552 {
553 struct trapframe *tf = l->l_md.md_regs;
554 union descriptor usd;
555 struct pcb *pcb;
556 uint32_t base;
557 int error;
558
559 if (direct) {
560 base = (vaddr_t)arg;
561 } else {
562 error = copyin(arg, &base, sizeof(base));
563 if (error != 0)
564 return error;
565 }
566
567 memset(&usd, 0, sizeof(usd));
568 usd.sd.sd_lobase = base & 0xffffff;
569 usd.sd.sd_hibase = (base >> 24) & 0xff;
570 usd.sd.sd_lolimit = 0xffff;
571 usd.sd.sd_hilimit = 0xf;
572 usd.sd.sd_type = SDT_MEMRWA;
573 usd.sd.sd_dpl = SEL_UPL;
574 usd.sd.sd_p = 1;
575 usd.sd.sd_def32 = 1;
576 usd.sd.sd_gran = 1;
577
578 pcb = lwp_getpcb(l);
579 kpreempt_disable();
580 if (which == 'f') {
581 memcpy(&pcb->pcb_fsd, &usd.sd,
582 sizeof(struct segment_descriptor));
583 if (l == curlwp) {
584 update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd);
585 }
586 tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
587 } else /* which == 'g' */ {
588 memcpy(&pcb->pcb_gsd, &usd.sd,
589 sizeof(struct segment_descriptor));
590 if (l == curlwp) {
591 update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd);
592 #if defined(__x86_64__) && defined(XENPV)
593 setusergs(GSEL(GUGS_SEL, SEL_UPL));
594 #endif
595 }
596 tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
597 }
598 kpreempt_enable();
599 return 0;
600 }
601
602 int
603 x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
604 {
605 #ifdef i386
606 return x86_set_sdbase32(arg, which, l, direct);
607 #else
608 struct pcb *pcb;
609 vaddr_t base;
610
611 if (l->l_proc->p_flag & PK_32) {
612 return x86_set_sdbase32(arg, which, l, direct);
613 }
614
615 if (direct) {
616 base = (vaddr_t)arg;
617 } else {
618 int error = copyin(arg, &base, sizeof(base));
619 if (error != 0)
620 return error;
621 }
622
623 if (base >= VM_MAXUSER_ADDRESS)
624 return EINVAL;
625
626 pcb = lwp_getpcb(l);
627
628 kpreempt_disable();
629 switch(which) {
630 case 'f':
631 pcb->pcb_fs = base;
632 if (l == curlwp)
633 wrmsr(MSR_FSBASE, pcb->pcb_fs);
634 break;
635 case 'g':
636 pcb->pcb_gs = base;
637 if (l == curlwp)
638 wrmsr(MSR_KERNELGSBASE, pcb->pcb_gs);
639 break;
640 default:
641 panic("x86_set_sdbase");
642 }
643 kpreempt_enable();
644
645 return 0;
646 #endif
647 }
648
649 static int
650 x86_get_sdbase32(void *arg, char which)
651 {
652 struct segment_descriptor *sd;
653 uint32_t base;
654
655 switch (which) {
656 case 'f':
657 sd = (void *)&curpcb->pcb_fsd;
658 break;
659 case 'g':
660 sd = (void *)&curpcb->pcb_gsd;
661 break;
662 default:
663 panic("x86_get_sdbase32");
664 }
665
666 base = sd->sd_hibase << 24 | sd->sd_lobase;
667 return copyout(&base, arg, sizeof(base));
668 }
669
670 int
671 x86_get_sdbase(void *arg, char which)
672 {
673 #ifdef i386
674 return x86_get_sdbase32(arg, which);
675 #else
676 vaddr_t base;
677 struct pcb *pcb;
678
679 if (curproc->p_flag & PK_32) {
680 return x86_get_sdbase32(arg, which);
681 }
682
683 pcb = lwp_getpcb(curlwp);
684
685 switch(which) {
686 case 'f':
687 base = pcb->pcb_fs;
688 break;
689 case 'g':
690 base = pcb->pcb_gs;
691 break;
692 default:
693 panic("x86_get_sdbase");
694 }
695
696 return copyout(&base, arg, sizeof(base));
697 #endif
698 }
699
700 int
701 sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, register_t *retval)
702 {
703 /* {
704 syscallarg(int) op;
705 syscallarg(void *) parms;
706 } */
707 int error = 0;
708
709 switch(SCARG(uap, op)) {
710 case X86_IOPL:
711 error = x86_iopl(l, SCARG(uap, parms), retval);
712 break;
713
714 #ifdef i386
715 /*
716 * On amd64, this is done via netbsd32_sysarch.
717 */
718 case X86_GET_LDT:
719 error = x86_get_ldt(l, SCARG(uap, parms), retval);
720 break;
721
722 case X86_SET_LDT:
723 error = x86_set_ldt(l, SCARG(uap, parms), retval);
724 break;
725 #endif
726
727 case X86_GET_IOPERM:
728 error = x86_get_ioperm(l, SCARG(uap, parms), retval);
729 break;
730
731 case X86_SET_IOPERM:
732 error = x86_set_ioperm(l, SCARG(uap, parms), retval);
733 break;
734
735 case X86_GET_MTRR:
736 error = x86_get_mtrr(l, SCARG(uap, parms), retval);
737 break;
738 case X86_SET_MTRR:
739 error = x86_set_mtrr(l, SCARG(uap, parms), retval);
740 break;
741
742 case X86_SET_FSBASE:
743 error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
744 break;
745
746 case X86_SET_GSBASE:
747 error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
748 break;
749
750 case X86_GET_FSBASE:
751 error = x86_get_sdbase(SCARG(uap, parms), 'f');
752 break;
753
754 case X86_GET_GSBASE:
755 error = x86_get_sdbase(SCARG(uap, parms), 'g');
756 break;
757
758 default:
759 error = EINVAL;
760 break;
761 }
762 return error;
763 }
764
765 int
766 cpu_lwp_setprivate(lwp_t *l, void *addr)
767 {
768
769 #ifdef __x86_64__
770 if ((l->l_proc->p_flag & PK_32) == 0) {
771 return x86_set_sdbase(addr, 'f', l, true);
772 }
773 #endif
774 return x86_set_sdbase(addr, 'g', l, true);
775 }
776