1 1.58 riastrad /* $NetBSD: sys_machdep.c,v 1.58 2022/08/20 23:49:31 riastradh Exp $ */ 2 1.1 ad 3 1.38 maxv /* 4 1.38 maxv * Copyright (c) 1998, 2007, 2009, 2017 The NetBSD Foundation, Inc. 5 1.1 ad * All rights reserved. 6 1.1 ad * 7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.38 maxv * by Charles M. Hannum, by Andrew Doran, and by Maxime Villard. 9 1.1 ad * 10 1.1 ad * Redistribution and use in source and binary forms, with or without 11 1.1 ad * modification, are permitted provided that the following conditions 12 1.1 ad * are met: 13 1.1 ad * 1. Redistributions of source code must retain the above copyright 14 1.1 ad * notice, this list of conditions and the following disclaimer. 15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 ad * notice, this list of conditions and the following disclaimer in the 17 1.1 ad * documentation and/or other materials provided with the distribution. 18 1.1 ad * 19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 ad */ 31 1.1 ad 32 1.1 ad #include <sys/cdefs.h> 33 1.58 riastrad __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.58 2022/08/20 23:49:31 riastradh Exp $"); 34 1.1 ad 35 1.1 ad #include "opt_mtrr.h" 36 1.1 ad #include "opt_user_ldt.h" 37 1.27 christos #include "opt_compat_netbsd.h" 38 1.1 ad #include "opt_xen.h" 39 1.1 ad 40 1.1 ad #include <sys/param.h> 41 1.1 ad #include <sys/systm.h> 42 1.1 ad #include <sys/ioctl.h> 43 1.1 ad #include <sys/file.h> 44 1.1 ad #include <sys/time.h> 45 1.1 ad #include <sys/proc.h> 46 1.1 ad #include <sys/uio.h> 47 1.1 ad #include <sys/kernel.h> 48 1.1 ad #include <sys/buf.h> 49 1.1 ad #include <sys/signal.h> 50 1.1 ad #include <sys/malloc.h> 51 1.9 yamt #include <sys/kmem.h> 52 1.1 ad #include <sys/kauth.h> 53 1.17 ad #include <sys/cpu.h> 54 1.1 ad #include <sys/mount.h> 55 1.1 ad #include <sys/syscallargs.h> 56 1.1 ad 57 1.1 ad #include <uvm/uvm_extern.h> 58 1.1 ad 59 1.1 ad #include <machine/cpufunc.h> 60 1.1 ad #include <machine/gdt.h> 61 1.1 ad #include <machine/psl.h> 62 1.1 ad #include <machine/reg.h> 63 1.1 ad #include <machine/sysarch.h> 64 1.1 ad #include <machine/mtrr.h> 65 1.58 riastrad #include <machine/pmap_private.h> 66 1.1 ad 67 1.51 cherry #if defined(__x86_64__) || defined(XENPV) 68 1.40 maxv #undef IOPERM /* not implemented */ 69 1.1 ad #else 70 1.1 ad #define IOPERM 71 1.1 ad #endif 72 1.1 ad 73 1.51 cherry #if defined(XENPV) && defined(USER_LDT) 74 1.51 cherry #error "USER_LDT not supported on XENPV" 75 1.35 maxv #endif 76 1.35 maxv 77 1.1 ad extern struct vm_map *kernel_map; 78 1.1 ad 79 1.56 maxv static int x86_get_ioperm(struct lwp *, void *, register_t *); 80 1.56 maxv static int x86_set_ioperm(struct lwp *, void *, register_t *); 81 1.56 maxv static int x86_set_sdbase32(void *, char, lwp_t *, bool); 82 1.18 ad int x86_set_sdbase(void *, char, lwp_t *, bool); 83 1.56 maxv static int x86_get_sdbase32(void *, char); 84 1.18 ad int x86_get_sdbase(void *, char); 85 1.1 ad 86 1.56 maxv #ifdef i386 87 1.56 maxv static int 88 1.1 ad x86_get_ldt(struct lwp *l, void *args, register_t *retval) 89 1.1 ad { 90 1.2 dsl #ifndef USER_LDT 91 1.2 dsl return EINVAL; 92 1.2 dsl #else 93 1.2 dsl struct x86_get_ldt_args ua; 94 1.2 dsl union descriptor *cp; 95 1.2 dsl int error; 96 1.2 dsl 97 1.2 dsl if ((error = copyin(args, &ua, sizeof(ua))) != 0) 98 1.2 dsl return error; 99 1.2 dsl 100 1.54 maxv if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) 101 1.2 dsl return EINVAL; 102 1.2 dsl 103 1.2 dsl cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK); 104 1.2 dsl if (cp == NULL) 105 1.2 dsl return ENOMEM; 106 1.2 dsl 107 1.2 dsl error = x86_get_ldt1(l, &ua, cp); 108 1.2 dsl *retval = ua.num; 109 1.2 dsl if (error == 0) 110 1.2 dsl error = copyout(cp, ua.desc, ua.num * sizeof(*cp)); 111 1.2 dsl 112 1.2 dsl free(cp, M_TEMP); 113 1.2 dsl return error; 114 1.2 dsl #endif 115 1.2 dsl } 116 1.56 maxv #endif 117 1.2 dsl 118 1.2 dsl int 119 1.2 dsl x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp) 120 1.2 dsl { 121 1.2 dsl #ifndef USER_LDT 122 1.2 dsl return EINVAL; 123 1.2 dsl #else 124 1.1 ad int error; 125 1.1 ad struct proc *p = l->l_proc; 126 1.1 ad pmap_t pmap = p->p_vmspace->vm_map.pmap; 127 1.1 ad int nldt, num; 128 1.2 dsl union descriptor *lp; 129 1.1 ad 130 1.38 maxv #ifdef __x86_64__ 131 1.38 maxv const size_t min_ldt_size = LDT_SIZE; 132 1.38 maxv #else 133 1.38 maxv const size_t min_ldt_size = NLDT * sizeof(union descriptor); 134 1.38 maxv #endif 135 1.38 maxv 136 1.1 ad error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET, 137 1.1 ad NULL, NULL, NULL, NULL); 138 1.1 ad if (error) 139 1.38 maxv return error; 140 1.1 ad 141 1.54 maxv if (ua->start < 0 || ua->num < 0 || 142 1.54 maxv ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS || 143 1.54 maxv ua->start + ua->num > MAX_USERLDT_SLOTS) 144 1.38 maxv return EINVAL; 145 1.1 ad 146 1.38 maxv if (ua->start * sizeof(union descriptor) < min_ldt_size) 147 1.32 maxv return EINVAL; 148 1.32 maxv 149 1.17 ad mutex_enter(&cpu_lock); 150 1.1 ad 151 1.17 ad if (pmap->pm_ldt != NULL) { 152 1.54 maxv nldt = MAX_USERLDT_SIZE / sizeof(*lp); 153 1.1 ad lp = pmap->pm_ldt; 154 1.1 ad } else { 155 1.32 maxv #ifdef __x86_64__ 156 1.32 maxv nldt = LDT_SIZE / sizeof(*lp); 157 1.32 maxv #else 158 1.1 ad nldt = NLDT; 159 1.32 maxv #endif 160 1.32 maxv lp = (union descriptor *)ldtstore; 161 1.1 ad } 162 1.1 ad 163 1.2 dsl if (ua->start > nldt) { 164 1.17 ad mutex_exit(&cpu_lock); 165 1.38 maxv return EINVAL; 166 1.1 ad } 167 1.1 ad 168 1.2 dsl lp += ua->start; 169 1.49 riastrad num = uimin(ua->num, nldt - ua->start); 170 1.2 dsl ua->num = num; 171 1.1 ad 172 1.1 ad memcpy(cp, lp, num * sizeof(union descriptor)); 173 1.17 ad mutex_exit(&cpu_lock); 174 1.1 ad 175 1.2 dsl return 0; 176 1.2 dsl #endif 177 1.2 dsl } 178 1.2 dsl 179 1.56 maxv #ifdef i386 180 1.56 maxv static int 181 1.2 dsl x86_set_ldt(struct lwp *l, void *args, register_t *retval) 182 1.2 dsl { 183 1.2 dsl #ifndef USER_LDT 184 1.2 dsl return EINVAL; 185 1.2 dsl #else 186 1.2 dsl struct x86_set_ldt_args ua; 187 1.2 dsl union descriptor *descv; 188 1.2 dsl int error; 189 1.2 dsl 190 1.2 dsl if ((error = copyin(args, &ua, sizeof(ua))) != 0) 191 1.38 maxv return error; 192 1.2 dsl 193 1.54 maxv if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) 194 1.2 dsl return EINVAL; 195 1.2 dsl 196 1.52 chs descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_WAITOK); 197 1.2 dsl error = copyin(ua.desc, descv, sizeof (*descv) * ua.num); 198 1.1 ad if (error == 0) 199 1.2 dsl error = x86_set_ldt1(l, &ua, descv); 200 1.2 dsl *retval = ua.start; 201 1.1 ad 202 1.2 dsl free(descv, M_TEMP); 203 1.2 dsl return error; 204 1.1 ad #endif 205 1.1 ad } 206 1.56 maxv #endif 207 1.1 ad 208 1.1 ad int 209 1.2 dsl x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua, 210 1.2 dsl union descriptor *descv) 211 1.1 ad { 212 1.2 dsl #ifndef USER_LDT 213 1.2 dsl return EINVAL; 214 1.2 dsl #else 215 1.17 ad int error, i, n, old_sel, new_sel; 216 1.1 ad struct proc *p = l->l_proc; 217 1.1 ad pmap_t pmap = p->p_vmspace->vm_map.pmap; 218 1.17 ad union descriptor *old_ldt, *new_ldt; 219 1.1 ad 220 1.32 maxv #ifdef __x86_64__ 221 1.32 maxv const size_t min_ldt_size = LDT_SIZE; 222 1.32 maxv #else 223 1.32 maxv const size_t min_ldt_size = NLDT * sizeof(union descriptor); 224 1.32 maxv #endif 225 1.32 maxv 226 1.1 ad error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET, 227 1.1 ad NULL, NULL, NULL, NULL); 228 1.1 ad if (error) 229 1.38 maxv return error; 230 1.1 ad 231 1.54 maxv if (ua->start < 0 || ua->num < 0 || 232 1.54 maxv ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS || 233 1.54 maxv ua->start + ua->num > MAX_USERLDT_SLOTS) 234 1.38 maxv return EINVAL; 235 1.1 ad 236 1.38 maxv if (ua->start * sizeof(union descriptor) < min_ldt_size) 237 1.32 maxv return EINVAL; 238 1.32 maxv 239 1.1 ad /* Check descriptors for access violations. */ 240 1.2 dsl for (i = 0; i < ua->num; i++) { 241 1.1 ad union descriptor *desc = &descv[i]; 242 1.1 ad 243 1.43 maxv #ifdef __x86_64__ 244 1.43 maxv if (desc->sd.sd_long != 0) 245 1.43 maxv return EACCES; 246 1.43 maxv #endif 247 1.43 maxv 248 1.1 ad switch (desc->sd.sd_type) { 249 1.1 ad case SDT_SYSNULL: 250 1.1 ad desc->sd.sd_p = 0; 251 1.1 ad break; 252 1.1 ad case SDT_MEMEC: 253 1.1 ad case SDT_MEMEAC: 254 1.1 ad case SDT_MEMERC: 255 1.1 ad case SDT_MEMERAC: 256 1.1 ad /* Must be "present" if executable and conforming. */ 257 1.2 dsl if (desc->sd.sd_p == 0) 258 1.2 dsl return EACCES; 259 1.1 ad break; 260 1.1 ad case SDT_MEMRO: 261 1.1 ad case SDT_MEMROA: 262 1.1 ad case SDT_MEMRW: 263 1.1 ad case SDT_MEMRWA: 264 1.1 ad case SDT_MEMROD: 265 1.1 ad case SDT_MEMRODA: 266 1.1 ad case SDT_MEMRWD: 267 1.1 ad case SDT_MEMRWDA: 268 1.1 ad case SDT_MEME: 269 1.1 ad case SDT_MEMEA: 270 1.1 ad case SDT_MEMER: 271 1.1 ad case SDT_MEMERA: 272 1.1 ad break; 273 1.1 ad default: 274 1.38 maxv return EACCES; 275 1.1 ad } 276 1.1 ad 277 1.1 ad if (desc->sd.sd_p != 0) { 278 1.1 ad /* Only user (ring-3) descriptors may be present. */ 279 1.2 dsl if (desc->sd.sd_dpl != SEL_UPL) 280 1.2 dsl return EACCES; 281 1.1 ad } 282 1.1 ad } 283 1.1 ad 284 1.17 ad /* 285 1.54 maxv * Install selected changes. 286 1.17 ad */ 287 1.17 ad 288 1.17 ad /* Allocate a new LDT. */ 289 1.54 maxv new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, 290 1.54 maxv MAX_USERLDT_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA); 291 1.54 maxv 292 1.54 maxv mutex_enter(&cpu_lock); 293 1.1 ad 294 1.17 ad /* Copy existing entries, if any. */ 295 1.17 ad if (pmap->pm_ldt != NULL) { 296 1.1 ad old_ldt = pmap->pm_ldt; 297 1.17 ad old_sel = pmap->pm_ldt_sel; 298 1.54 maxv memcpy(new_ldt, old_ldt, MAX_USERLDT_SIZE); 299 1.17 ad } else { 300 1.17 ad old_ldt = NULL; 301 1.17 ad old_sel = -1; 302 1.32 maxv memcpy(new_ldt, ldtstore, min_ldt_size); 303 1.17 ad } 304 1.1 ad 305 1.17 ad /* Apply requested changes. */ 306 1.17 ad for (i = 0, n = ua->start; i < ua->num; i++, n++) { 307 1.17 ad new_ldt[n] = descv[i]; 308 1.17 ad } 309 1.1 ad 310 1.17 ad /* Allocate LDT selector. */ 311 1.54 maxv new_sel = ldt_alloc(new_ldt, MAX_USERLDT_SIZE); 312 1.17 ad if (new_sel == -1) { 313 1.17 ad mutex_exit(&cpu_lock); 314 1.54 maxv uvm_km_free(kernel_map, (vaddr_t)new_ldt, MAX_USERLDT_SIZE, 315 1.1 ad UVM_KMF_WIRED); 316 1.17 ad return ENOMEM; 317 1.17 ad } 318 1.17 ad 319 1.17 ad /* All changes are now globally visible. Swap in the new LDT. */ 320 1.54 maxv atomic_store_relaxed(&pmap->pm_ldt_sel, new_sel); 321 1.30 dholland /* membar_store_store for pmap_fork() to read these unlocked safely */ 322 1.30 dholland membar_producer(); 323 1.54 maxv atomic_store_relaxed(&pmap->pm_ldt, new_ldt); 324 1.17 ad 325 1.17 ad /* Switch existing users onto new LDT. */ 326 1.17 ad pmap_ldt_sync(pmap); 327 1.17 ad 328 1.17 ad /* Free existing LDT (if any). */ 329 1.17 ad if (old_ldt != NULL) { 330 1.17 ad ldt_free(old_sel); 331 1.30 dholland /* exit the mutex before free */ 332 1.30 dholland mutex_exit(&cpu_lock); 333 1.54 maxv uvm_km_free(kernel_map, (vaddr_t)old_ldt, MAX_USERLDT_SIZE, 334 1.1 ad UVM_KMF_WIRED); 335 1.30 dholland } else { 336 1.30 dholland mutex_exit(&cpu_lock); 337 1.17 ad } 338 1.2 dsl 339 1.17 ad return error; 340 1.1 ad #endif 341 1.1 ad } 342 1.1 ad 343 1.1 ad int 344 1.1 ad x86_iopl(struct lwp *l, void *args, register_t *retval) 345 1.1 ad { 346 1.1 ad int error; 347 1.1 ad struct x86_iopl_args ua; 348 1.51 cherry #ifdef XENPV 349 1.9 yamt int iopl; 350 1.1 ad #else 351 1.1 ad struct trapframe *tf = l->l_md.md_regs; 352 1.1 ad #endif 353 1.1 ad 354 1.1 ad error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL, 355 1.1 ad NULL, NULL, NULL, NULL); 356 1.1 ad if (error) 357 1.38 maxv return error; 358 1.1 ad 359 1.1 ad if ((error = copyin(args, &ua, sizeof(ua))) != 0) 360 1.1 ad return error; 361 1.1 ad 362 1.51 cherry #ifdef XENPV 363 1.9 yamt if (ua.iopl) 364 1.9 yamt iopl = SEL_UPL; 365 1.9 yamt else 366 1.9 yamt iopl = SEL_KPL; 367 1.22 rmind 368 1.22 rmind { 369 1.22 rmind struct pcb *pcb; 370 1.22 rmind 371 1.22 rmind pcb = lwp_getpcb(l); 372 1.22 rmind pcb->pcb_iopl = iopl; 373 1.22 rmind 374 1.1 ad /* Force the change at ring 0. */ 375 1.53 jdolecek struct physdev_set_iopl set_iopl; 376 1.53 jdolecek set_iopl.iopl = iopl; 377 1.53 jdolecek HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); 378 1.22 rmind } 379 1.1 ad #elif defined(__x86_64__) 380 1.1 ad if (ua.iopl) 381 1.1 ad tf->tf_rflags |= PSL_IOPL; 382 1.1 ad else 383 1.1 ad tf->tf_rflags &= ~PSL_IOPL; 384 1.1 ad #else 385 1.1 ad if (ua.iopl) 386 1.1 ad tf->tf_eflags |= PSL_IOPL; 387 1.1 ad else 388 1.1 ad tf->tf_eflags &= ~PSL_IOPL; 389 1.1 ad #endif 390 1.1 ad 391 1.1 ad return 0; 392 1.1 ad } 393 1.1 ad 394 1.56 maxv static int 395 1.1 ad x86_get_ioperm(struct lwp *l, void *args, register_t *retval) 396 1.1 ad { 397 1.1 ad #ifdef IOPERM 398 1.1 ad int error; 399 1.22 rmind struct pcb *pcb = lwp_getpcb(l); 400 1.1 ad struct x86_get_ioperm_args ua; 401 1.9 yamt void *dummymap = NULL; 402 1.9 yamt void *iomap; 403 1.1 ad 404 1.1 ad error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET, 405 1.1 ad NULL, NULL, NULL, NULL); 406 1.1 ad if (error) 407 1.38 maxv return error; 408 1.1 ad 409 1.1 ad if ((error = copyin(args, &ua, sizeof(ua))) != 0) 410 1.38 maxv return error; 411 1.1 ad 412 1.9 yamt iomap = pcb->pcb_iomap; 413 1.9 yamt if (iomap == NULL) { 414 1.9 yamt iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP); 415 1.9 yamt memset(dummymap, 0xff, IOMAPSIZE); 416 1.9 yamt } 417 1.9 yamt error = copyout(iomap, ua.iomap, IOMAPSIZE); 418 1.9 yamt if (dummymap != NULL) { 419 1.9 yamt kmem_free(dummymap, IOMAPSIZE); 420 1.9 yamt } 421 1.9 yamt return error; 422 1.1 ad #else 423 1.1 ad return EINVAL; 424 1.1 ad #endif 425 1.1 ad } 426 1.1 ad 427 1.56 maxv static int 428 1.1 ad x86_set_ioperm(struct lwp *l, void *args, register_t *retval) 429 1.1 ad { 430 1.1 ad #ifdef IOPERM 431 1.9 yamt struct cpu_info *ci; 432 1.1 ad int error; 433 1.22 rmind struct pcb *pcb = lwp_getpcb(l); 434 1.1 ad struct x86_set_ioperm_args ua; 435 1.9 yamt void *new; 436 1.9 yamt void *old; 437 1.1 ad 438 1.57 msaitoh error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET, 439 1.1 ad NULL, NULL, NULL, NULL); 440 1.1 ad if (error) 441 1.38 maxv return error; 442 1.1 ad 443 1.1 ad if ((error = copyin(args, &ua, sizeof(ua))) != 0) 444 1.38 maxv return error; 445 1.1 ad 446 1.9 yamt new = kmem_alloc(IOMAPSIZE, KM_SLEEP); 447 1.9 yamt error = copyin(ua.iomap, new, IOMAPSIZE); 448 1.9 yamt if (error) { 449 1.9 yamt kmem_free(new, IOMAPSIZE); 450 1.9 yamt return error; 451 1.9 yamt } 452 1.9 yamt old = pcb->pcb_iomap; 453 1.9 yamt pcb->pcb_iomap = new; 454 1.9 yamt if (old != NULL) { 455 1.9 yamt kmem_free(old, IOMAPSIZE); 456 1.9 yamt } 457 1.9 yamt 458 1.46 maxv CTASSERT(offsetof(struct cpu_tss, iomap) - 459 1.46 maxv offsetof(struct cpu_tss, tss) == IOMAP_VALIDOFF); 460 1.46 maxv 461 1.13 ad kpreempt_disable(); 462 1.9 yamt ci = curcpu(); 463 1.45 maxv memcpy(ci->ci_tss->iomap, pcb->pcb_iomap, IOMAPSIZE); 464 1.46 maxv ci->ci_tss->tss.tss_iobase = IOMAP_VALIDOFF << 16; 465 1.13 ad kpreempt_enable(); 466 1.9 yamt 467 1.9 yamt return error; 468 1.1 ad #else 469 1.1 ad return EINVAL; 470 1.1 ad #endif 471 1.1 ad } 472 1.1 ad 473 1.56 maxv static int 474 1.1 ad x86_get_mtrr(struct lwp *l, void *args, register_t *retval) 475 1.1 ad { 476 1.1 ad #ifdef MTRR 477 1.1 ad struct x86_get_mtrr_args ua; 478 1.1 ad int error, n; 479 1.1 ad 480 1.1 ad if (mtrr_funcs == NULL) 481 1.1 ad return ENOSYS; 482 1.1 ad 483 1.57 msaitoh error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET, 484 1.1 ad NULL, NULL, NULL, NULL); 485 1.1 ad if (error) 486 1.38 maxv return error; 487 1.1 ad 488 1.1 ad error = copyin(args, &ua, sizeof ua); 489 1.1 ad if (error != 0) 490 1.1 ad return error; 491 1.1 ad 492 1.1 ad error = copyin(ua.n, &n, sizeof n); 493 1.1 ad if (error != 0) 494 1.1 ad return error; 495 1.1 ad 496 1.12 ad KERNEL_LOCK(1, NULL); 497 1.1 ad error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER); 498 1.12 ad KERNEL_UNLOCK_ONE(NULL); 499 1.1 ad 500 1.1 ad copyout(&n, ua.n, sizeof (int)); 501 1.1 ad 502 1.1 ad return error; 503 1.1 ad #else 504 1.1 ad return EINVAL; 505 1.1 ad #endif 506 1.1 ad } 507 1.1 ad 508 1.56 maxv static int 509 1.1 ad x86_set_mtrr(struct lwp *l, void *args, register_t *retval) 510 1.1 ad { 511 1.1 ad #ifdef MTRR 512 1.1 ad int error, n; 513 1.1 ad struct x86_set_mtrr_args ua; 514 1.1 ad 515 1.1 ad if (mtrr_funcs == NULL) 516 1.1 ad return ENOSYS; 517 1.1 ad 518 1.57 msaitoh error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET, 519 1.1 ad NULL, NULL, NULL, NULL); 520 1.1 ad if (error) 521 1.38 maxv return error; 522 1.1 ad 523 1.1 ad error = copyin(args, &ua, sizeof ua); 524 1.1 ad if (error != 0) 525 1.1 ad return error; 526 1.1 ad 527 1.1 ad error = copyin(ua.n, &n, sizeof n); 528 1.1 ad if (error != 0) 529 1.1 ad return error; 530 1.1 ad 531 1.12 ad KERNEL_LOCK(1, NULL); 532 1.1 ad error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER); 533 1.1 ad if (n != 0) 534 1.1 ad mtrr_commit(); 535 1.12 ad KERNEL_UNLOCK_ONE(NULL); 536 1.1 ad 537 1.1 ad copyout(&n, ua.n, sizeof n); 538 1.1 ad 539 1.1 ad return error; 540 1.1 ad #else 541 1.1 ad return EINVAL; 542 1.1 ad #endif 543 1.1 ad } 544 1.1 ad 545 1.24 chs #ifdef __x86_64__ 546 1.24 chs #define pcb_fsd pcb_fs 547 1.24 chs #define pcb_gsd pcb_gs 548 1.24 chs #define segment_descriptor mem_segment_descriptor 549 1.24 chs #endif 550 1.24 chs 551 1.56 maxv static int 552 1.24 chs x86_set_sdbase32(void *arg, char which, lwp_t *l, bool direct) 553 1.5 ad { 554 1.24 chs struct trapframe *tf = l->l_md.md_regs; 555 1.24 chs union descriptor usd; 556 1.18 ad struct pcb *pcb; 557 1.24 chs uint32_t base; 558 1.6 ad int error; 559 1.5 ad 560 1.18 ad if (direct) { 561 1.18 ad base = (vaddr_t)arg; 562 1.18 ad } else { 563 1.18 ad error = copyin(arg, &base, sizeof(base)); 564 1.18 ad if (error != 0) 565 1.18 ad return error; 566 1.18 ad } 567 1.5 ad 568 1.24 chs memset(&usd, 0, sizeof(usd)); 569 1.19 bouyer usd.sd.sd_lobase = base & 0xffffff; 570 1.19 bouyer usd.sd.sd_hibase = (base >> 24) & 0xff; 571 1.19 bouyer usd.sd.sd_lolimit = 0xffff; 572 1.19 bouyer usd.sd.sd_hilimit = 0xf; 573 1.19 bouyer usd.sd.sd_type = SDT_MEMRWA; 574 1.19 bouyer usd.sd.sd_dpl = SEL_UPL; 575 1.19 bouyer usd.sd.sd_p = 1; 576 1.19 bouyer usd.sd.sd_def32 = 1; 577 1.19 bouyer usd.sd.sd_gran = 1; 578 1.6 ad 579 1.24 chs pcb = lwp_getpcb(l); 580 1.13 ad kpreempt_disable(); 581 1.6 ad if (which == 'f') { 582 1.19 bouyer memcpy(&pcb->pcb_fsd, &usd.sd, 583 1.19 bouyer sizeof(struct segment_descriptor)); 584 1.18 ad if (l == curlwp) { 585 1.19 bouyer update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd); 586 1.18 ad } 587 1.24 chs tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL); 588 1.6 ad } else /* which == 'g' */ { 589 1.19 bouyer memcpy(&pcb->pcb_gsd, &usd.sd, 590 1.19 bouyer sizeof(struct segment_descriptor)); 591 1.18 ad if (l == curlwp) { 592 1.19 bouyer update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd); 593 1.51 cherry #if defined(__x86_64__) && defined(XENPV) 594 1.24 chs setusergs(GSEL(GUGS_SEL, SEL_UPL)); 595 1.24 chs #endif 596 1.18 ad } 597 1.24 chs tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL); 598 1.6 ad } 599 1.13 ad kpreempt_enable(); 600 1.24 chs return 0; 601 1.24 chs } 602 1.5 ad 603 1.24 chs int 604 1.24 chs x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct) 605 1.24 chs { 606 1.24 chs #ifdef i386 607 1.24 chs return x86_set_sdbase32(arg, which, l, direct); 608 1.5 ad #else 609 1.24 chs struct pcb *pcb; 610 1.24 chs vaddr_t base; 611 1.24 chs 612 1.24 chs if (l->l_proc->p_flag & PK_32) { 613 1.24 chs return x86_set_sdbase32(arg, which, l, direct); 614 1.24 chs } 615 1.24 chs 616 1.24 chs if (direct) { 617 1.24 chs base = (vaddr_t)arg; 618 1.24 chs } else { 619 1.29 christos int error = copyin(arg, &base, sizeof(base)); 620 1.24 chs if (error != 0) 621 1.24 chs return error; 622 1.24 chs } 623 1.24 chs 624 1.24 chs if (base >= VM_MAXUSER_ADDRESS) 625 1.24 chs return EINVAL; 626 1.24 chs 627 1.24 chs pcb = lwp_getpcb(l); 628 1.24 chs 629 1.24 chs kpreempt_disable(); 630 1.57 msaitoh switch (which) { 631 1.24 chs case 'f': 632 1.24 chs pcb->pcb_fs = base; 633 1.24 chs if (l == curlwp) 634 1.24 chs wrmsr(MSR_FSBASE, pcb->pcb_fs); 635 1.24 chs break; 636 1.24 chs case 'g': 637 1.24 chs pcb->pcb_gs = base; 638 1.24 chs if (l == curlwp) 639 1.24 chs wrmsr(MSR_KERNELGSBASE, pcb->pcb_gs); 640 1.24 chs break; 641 1.24 chs default: 642 1.28 dholland panic("x86_set_sdbase"); 643 1.24 chs } 644 1.24 chs kpreempt_enable(); 645 1.24 chs 646 1.29 christos return 0; 647 1.5 ad #endif 648 1.5 ad } 649 1.5 ad 650 1.56 maxv static int 651 1.24 chs x86_get_sdbase32(void *arg, char which) 652 1.5 ad { 653 1.5 ad struct segment_descriptor *sd; 654 1.24 chs uint32_t base; 655 1.5 ad 656 1.5 ad switch (which) { 657 1.5 ad case 'f': 658 1.24 chs sd = (void *)&curpcb->pcb_fsd; 659 1.5 ad break; 660 1.5 ad case 'g': 661 1.24 chs sd = (void *)&curpcb->pcb_gsd; 662 1.5 ad break; 663 1.5 ad default: 664 1.28 dholland panic("x86_get_sdbase32"); 665 1.5 ad } 666 1.5 ad 667 1.5 ad base = sd->sd_hibase << 24 | sd->sd_lobase; 668 1.21 yamt return copyout(&base, arg, sizeof(base)); 669 1.24 chs } 670 1.24 chs 671 1.24 chs int 672 1.24 chs x86_get_sdbase(void *arg, char which) 673 1.24 chs { 674 1.24 chs #ifdef i386 675 1.24 chs return x86_get_sdbase32(arg, which); 676 1.5 ad #else 677 1.24 chs vaddr_t base; 678 1.24 chs struct pcb *pcb; 679 1.24 chs 680 1.24 chs if (curproc->p_flag & PK_32) { 681 1.24 chs return x86_get_sdbase32(arg, which); 682 1.24 chs } 683 1.24 chs 684 1.24 chs pcb = lwp_getpcb(curlwp); 685 1.24 chs 686 1.57 msaitoh switch (which) { 687 1.24 chs case 'f': 688 1.24 chs base = pcb->pcb_fs; 689 1.24 chs break; 690 1.24 chs case 'g': 691 1.24 chs base = pcb->pcb_gs; 692 1.24 chs break; 693 1.24 chs default: 694 1.24 chs panic("x86_get_sdbase"); 695 1.24 chs } 696 1.24 chs 697 1.24 chs return copyout(&base, arg, sizeof(base)); 698 1.5 ad #endif 699 1.5 ad } 700 1.5 ad 701 1.5 ad int 702 1.57 msaitoh sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap, 703 1.57 msaitoh register_t *retval) 704 1.1 ad { 705 1.8 dsl /* { 706 1.1 ad syscallarg(int) op; 707 1.1 ad syscallarg(void *) parms; 708 1.8 dsl } */ 709 1.1 ad int error = 0; 710 1.1 ad 711 1.57 msaitoh switch (SCARG(uap, op)) { 712 1.57 msaitoh case X86_IOPL: 713 1.1 ad error = x86_iopl(l, SCARG(uap, parms), retval); 714 1.1 ad break; 715 1.1 ad 716 1.32 maxv #ifdef i386 717 1.32 maxv /* 718 1.32 maxv * On amd64, this is done via netbsd32_sysarch. 719 1.32 maxv */ 720 1.57 msaitoh case X86_GET_LDT: 721 1.1 ad error = x86_get_ldt(l, SCARG(uap, parms), retval); 722 1.1 ad break; 723 1.1 ad 724 1.57 msaitoh case X86_SET_LDT: 725 1.1 ad error = x86_set_ldt(l, SCARG(uap, parms), retval); 726 1.1 ad break; 727 1.32 maxv #endif 728 1.1 ad 729 1.57 msaitoh case X86_GET_IOPERM: 730 1.1 ad error = x86_get_ioperm(l, SCARG(uap, parms), retval); 731 1.1 ad break; 732 1.1 ad 733 1.57 msaitoh case X86_SET_IOPERM: 734 1.1 ad error = x86_set_ioperm(l, SCARG(uap, parms), retval); 735 1.1 ad break; 736 1.1 ad 737 1.1 ad case X86_GET_MTRR: 738 1.1 ad error = x86_get_mtrr(l, SCARG(uap, parms), retval); 739 1.1 ad break; 740 1.1 ad case X86_SET_MTRR: 741 1.1 ad error = x86_set_mtrr(l, SCARG(uap, parms), retval); 742 1.1 ad break; 743 1.1 ad 744 1.5 ad case X86_SET_FSBASE: 745 1.18 ad error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false); 746 1.5 ad break; 747 1.5 ad 748 1.5 ad case X86_SET_GSBASE: 749 1.18 ad error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false); 750 1.5 ad break; 751 1.5 ad 752 1.5 ad case X86_GET_FSBASE: 753 1.5 ad error = x86_get_sdbase(SCARG(uap, parms), 'f'); 754 1.5 ad break; 755 1.5 ad 756 1.5 ad case X86_GET_GSBASE: 757 1.5 ad error = x86_get_sdbase(SCARG(uap, parms), 'g'); 758 1.5 ad break; 759 1.5 ad 760 1.1 ad default: 761 1.1 ad error = EINVAL; 762 1.1 ad break; 763 1.1 ad } 764 1.38 maxv return error; 765 1.1 ad } 766 1.18 ad 767 1.18 ad int 768 1.18 ad cpu_lwp_setprivate(lwp_t *l, void *addr) 769 1.18 ad { 770 1.18 ad 771 1.24 chs #ifdef __x86_64__ 772 1.24 chs if ((l->l_proc->p_flag & PK_32) == 0) { 773 1.24 chs return x86_set_sdbase(addr, 'f', l, true); 774 1.24 chs } 775 1.57 msaitoh #endif 776 1.18 ad return x86_set_sdbase(addr, 'g', l, true); 777 1.18 ad } 778