1 /* $NetBSD: machdep.c,v 1.343 2025/04/24 01:52:38 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1992, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This software was developed by the Computer Systems Engineering group 38 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 39 * contributed to Berkeley. 40 * 41 * All advertising materials mentioning features or use of this software 42 * must display the following acknowledgement: 43 * This product includes software developed by the University of 44 * California, Lawrence Berkeley Laboratory. 45 * 46 * Redistribution and use in source and binary forms, with or without 47 * modification, are permitted provided that the following conditions 48 * are met: 49 * 1. Redistributions of source code must retain the above copyright 50 * notice, this list of conditions and the following disclaimer. 51 * 2. Redistributions in binary form must reproduce the above copyright 52 * notice, this list of conditions and the following disclaimer in the 53 * documentation and/or other materials provided with the distribution. 54 * 3. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * @(#)machdep.c 8.6 (Berkeley) 1/14/94 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.343 2025/04/24 01:52:38 riastradh Exp $"); 75 76 #include "opt_compat_netbsd.h" 77 #include "opt_compat_sunos.h" 78 #include "opt_sparc_arch.h" 79 #include "opt_modular.h" 80 #include "opt_multiprocessor.h" 81 82 #include <sys/param.h> 83 #include <sys/signal.h> 84 #include <sys/signalvar.h> 85 #include <sys/proc.h> 86 #include <sys/vmem.h> 87 #include <sys/cpu.h> 88 #include <sys/buf.h> 89 #include <sys/device.h> 90 #include <sys/reboot.h> 91 #include <sys/systm.h> 92 #include <sys/kernel.h> 93 #include <sys/conf.h> 94 #include <sys/file.h> 95 #include <sys/kmem.h> 96 #include <sys/mbuf.h> 97 #include <sys/mount.h> 98 #include <sys/msgbuf.h> 99 #include <sys/syscallargs.h> 100 #include <sys/exec.h> 101 #include <sys/exec_aout.h> 102 #include <sys/ucontext.h> 103 #include <sys/module.h> 104 #include <sys/mutex.h> 105 #include <sys/ras.h> 106 107 #include <dev/mm.h> 108 109 #include <uvm/uvm.h> /* we use uvm.kernel_object */ 110 111 #include <sys/sysctl.h> 112 113 #ifdef COMPAT_13 114 #include <compat/sys/signal.h> 115 #include <compat/sys/signalvar.h> 116 #endif 117 118 #define _SPARC_BUS_DMA_PRIVATE 119 #include <machine/autoconf.h> 120 #include <sys/bus.h> 121 #include <machine/frame.h> 122 #include <machine/cpu.h> 123 #include <machine/pcb.h> 124 #include <machine/pmap.h> 125 #include <machine/oldmon.h> 126 #include <machine/bsd_openprom.h> 127 #include <machine/bootinfo.h> 128 #include <machine/eeprom.h> 129 130 #include <sparc/sparc/asm.h> 131 #include <sparc/sparc/cache.h> 132 #include <sparc/sparc/vaddrs.h> 133 #include <sparc/sparc/cpuvar.h> 134 135 #include "fb.h" 136 #include "power.h" 137 138 #if NPOWER > 0 139 #include <sparc/dev/power.h> 140 #endif 141 142 kmutex_t fpu_mtx; 143 144 /* 145 * dvmamap24 is used to manage DVMA memory for devices that have the upper 146 * eight address bits wired to all-ones (e.g. `le' and `ie') 147 */ 148 vmem_t *dvmamap24; 149 150 void dumpsys(void); 151 void stackdump(void); 152 153 /* 154 * Machine-dependent startup code 155 */ 156 void 157 cpu_startup(void) 158 { 159 #ifdef DEBUG 160 extern int pmapdebug; 161 int opmapdebug = pmapdebug; 162 #endif 163 struct pcb *pcb; 164 vsize_t size; 165 paddr_t pa; 166 char pbuf[9]; 167 168 #ifdef DEBUG 169 pmapdebug = 0; 170 #endif 171 172 /* XXX */ 173 pcb = lwp_getpcb(&lwp0); 174 if (pcb && pcb->pcb_psr == 0) 175 pcb->pcb_psr = getpsr(); 176 177 /* 178 * Re-map the message buffer from its temporary address 179 * at KERNBASE to MSGBUF_VA. 180 */ 181 #if !defined(MSGBUFSIZE) || MSGBUFSIZE <= 8192 182 /* 183 * We use the free page(s) in front of the kernel load address. 184 */ 185 size = 8192; 186 187 /* Get physical address of the message buffer */ 188 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa); 189 190 /* Invalidate the current mapping at KERNBASE. */ 191 pmap_kremove((vaddr_t)KERNBASE, size); 192 pmap_update(pmap_kernel()); 193 194 /* Enter the new mapping */ 195 pmap_map(MSGBUF_VA, pa, pa + size, VM_PROT_READ|VM_PROT_WRITE); 196 197 /* 198 * Re-initialize the message buffer. 199 */ 200 initmsgbuf((void *)MSGBUF_VA, size); 201 #else /* MSGBUFSIZE */ 202 { 203 struct pglist mlist; 204 struct vm_page *m; 205 vaddr_t va0, va; 206 207 /* 208 * We use the free page(s) in front of the kernel load address, 209 * and then allocate some more. 210 */ 211 size = round_page(MSGBUFSIZE); 212 213 /* Get physical address of first 8192 chunk of the message buffer */ 214 pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &pa); 215 216 /* Allocate additional physical pages */ 217 if (uvm_pglistalloc(size - 8192, 218 vm_first_phys, vm_first_phys+vm_num_phys, 219 0, 0, &mlist, 1, 0) != 0) 220 panic("cpu_start: no memory for message buffer"); 221 222 /* Invalidate the current mapping at KERNBASE. */ 223 pmap_kremove((vaddr_t)KERNBASE, 8192); 224 pmap_update(pmap_kernel()); 225 226 /* Allocate virtual memory space */ 227 va0 = va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 228 if (va == 0) 229 panic("cpu_start: no virtual memory for message buffer"); 230 231 /* Map first 8192 */ 232 while (va < va0 + 8192) { 233 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 234 pa += PAGE_SIZE; 235 va += PAGE_SIZE; 236 } 237 pmap_update(pmap_kernel()); 238 239 /* Map the rest of the pages */ 240 TAILQ_FOREACH(m, &mlist ,pageq.queue) { 241 if (va >= va0 + size) 242 panic("cpu_start: memory buffer size botch"); 243 pa = VM_PAGE_TO_PHYS(m); 244 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 245 va += PAGE_SIZE; 246 } 247 pmap_update(pmap_kernel()); 248 249 /* 250 * Re-initialize the message buffer. 251 */ 252 initmsgbuf((void *)va0, size); 253 } 254 #endif /* MSGBUFSIZE */ 255 256 /* 257 * Good {morning,afternoon,evening,night}. 258 */ 259 printf("%s%s", copyright, version); 260 /*identifycpu();*/ 261 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 262 printf("total memory = %s\n", pbuf); 263 264 /* 265 * Tune buffer cache variables based on the capabilities of the MMU 266 * to cut down on VM space allocated for the buffer caches that 267 * would lead to MMU resource shortage. 268 */ 269 if (CPU_ISSUN4 || CPU_ISSUN4C) { 270 /* Clip UBC windows */ 271 if (cpuinfo.mmu_nsegment <= 128) { 272 /* 273 * ubc_nwins and ubc_winshift control the amount 274 * of VM used by the UBC. Normally, this VM is 275 * not wired in the kernel map, hence non-locked 276 * `PMEGs' (see pmap.c) are used for this space. 277 * We still limit possible fragmentation to prevent 278 * the occasional wired UBC mappings from tying up 279 * too many PMEGs. 280 * 281 * Set the upper limit to 9 segments (default 282 * winshift = 13). 283 */ 284 ubc_nwins = 512; 285 286 /* 287 * buf_setvalimit() allocates a submap for buffer 288 * allocation. We use it to limit the number of locked 289 * `PMEGs' (see pmap.c) dedicated to the buffer cache. 290 * 291 * Set the upper limit to 12 segments (3MB), which 292 * corresponds approximately to the size of the 293 * traditional 5% rule (assuming a maximum 64MB of 294 * memory in small sun4c machines). 295 */ 296 buf_setvalimit(12 * 256*1024); 297 } 298 299 /* Clip max data & stack to avoid running into the MMU hole */ 300 #if MAXDSIZ > 256*1024*1024 301 maxdmap = 256*1024*1024; 302 #endif 303 #if MAXSSIZ > 256*1024*1024 304 maxsmap = 256*1024*1024; 305 #endif 306 } 307 308 if (CPU_ISSUN4 || CPU_ISSUN4C) { 309 /* 310 * Allocate DMA map for 24-bit devices (le, ie) 311 * [dvma_base - dvma_end] is for VME devices.. 312 */ 313 dvmamap24 = vmem_create("dvmamap24", 314 D24_DVMA_BASE, 315 D24_DVMA_END - D24_DVMA_BASE, 316 PAGE_SIZE, /* quantum */ 317 NULL, /* importfn */ 318 NULL, /* releasefn */ 319 NULL, /* source */ 320 0, /* qcache_max */ 321 VM_SLEEP, 322 IPL_VM); 323 if (dvmamap24 == NULL) 324 panic("unable to allocate DVMA map"); 325 } 326 327 #ifdef DEBUG 328 pmapdebug = opmapdebug; 329 #endif 330 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 331 printf("avail memory = %s\n", pbuf); 332 333 pmap_redzone(); 334 335 mutex_init(&fpu_mtx, MUTEX_DEFAULT, IPL_SCHED); 336 } 337 338 /* 339 * Set up registers on exec. 340 * 341 * XXX this entire mess must be fixed 342 */ 343 /* ARGSUSED */ 344 void 345 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 346 { 347 struct trapframe *tf = l->l_md.md_tf; 348 struct fpstate *fs; 349 int psr; 350 351 /* Don't allow unaligned data references by default */ 352 l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN; 353 354 /* 355 * Set the registers to 0 except for: 356 * %o6: stack pointer, built in exec()) 357 * %psr: (retain CWP and PSR_S bits) 358 * %g1: p->p_psstrp (used by crt0) 359 * %pc,%npc: entry point of program 360 */ 361 psr = tf->tf_psr & (PSR_S | PSR_CWP); 362 if ((fs = l->l_md.md_fpstate) != NULL) { 363 struct cpu_info *cpi; 364 int s; 365 /* 366 * We hold an FPU state. If we own *some* FPU chip state 367 * we must get rid of it, and the only way to do that is 368 * to save it. In any case, get rid of our FPU state. 369 */ 370 FPU_LOCK(s); 371 if ((cpi = l->l_md.md_fpu) != NULL) { 372 if (cpi->fplwp != l) 373 panic("FPU(%d): fplwp %p", 374 cpi->ci_cpuid, cpi->fplwp); 375 if (l == cpuinfo.fplwp) 376 savefpstate(fs); 377 #if defined(MULTIPROCESSOR) 378 else 379 XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); 380 #endif 381 cpi->fplwp = NULL; 382 } 383 l->l_md.md_fpu = NULL; 384 FPU_UNLOCK(s); 385 kmem_free(fs, sizeof(struct fpstate)); 386 l->l_md.md_fpstate = NULL; 387 } 388 memset((void *)tf, 0, sizeof *tf); 389 tf->tf_psr = psr; 390 tf->tf_global[1] = l->l_proc->p_psstrp; 391 tf->tf_pc = pack->ep_entry & ~3; 392 tf->tf_npc = tf->tf_pc + 4; 393 stack -= sizeof(struct rwindow); 394 tf->tf_out[6] = stack; 395 } 396 397 #ifdef DEBUG 398 int sigdebug = 0; 399 int sigpid = 0; 400 #define SDB_FOLLOW 0x01 401 #define SDB_KSTACK 0x02 402 #define SDB_FPSTATE 0x04 403 #endif 404 405 /* 406 * machine dependent system variables. 407 */ 408 static int 409 sysctl_machdep_boot(SYSCTLFN_ARGS) 410 { 411 struct sysctlnode node = *rnode; 412 struct btinfo_kernelfile *bi_file; 413 const char *cp; 414 415 416 switch (node.sysctl_num) { 417 case CPU_BOOTED_KERNEL: 418 if ((bi_file = lookup_bootinfo(BTINFO_KERNELFILE)) != NULL) 419 cp = bi_file->name; 420 else 421 cp = prom_getbootfile(); 422 if (cp != NULL && cp[0] == '\0') 423 cp = "netbsd"; 424 break; 425 case CPU_BOOTED_DEVICE: 426 cp = prom_getbootpath(); 427 break; 428 case CPU_BOOT_ARGS: 429 cp = prom_getbootargs(); 430 break; 431 default: 432 return (EINVAL); 433 } 434 435 if (cp == NULL || cp[0] == '\0') 436 return (ENOENT); 437 438 node.sysctl_data = __UNCONST(cp); 439 node.sysctl_size = strlen(cp) + 1; 440 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 441 } 442 443 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 444 { 445 446 sysctl_createv(clog, 0, NULL, NULL, 447 CTLFLAG_PERMANENT, 448 CTLTYPE_NODE, "machdep", NULL, 449 NULL, 0, NULL, 0, 450 CTL_MACHDEP, CTL_EOL); 451 452 sysctl_createv(clog, 0, NULL, NULL, 453 CTLFLAG_PERMANENT, 454 CTLTYPE_STRING, "booted_kernel", NULL, 455 sysctl_machdep_boot, 0, NULL, 0, 456 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 457 sysctl_createv(clog, 0, NULL, NULL, 458 CTLFLAG_PERMANENT, 459 CTLTYPE_STRING, "booted_device", NULL, 460 sysctl_machdep_boot, 0, NULL, 0, 461 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 462 sysctl_createv(clog, 0, NULL, NULL, 463 CTLFLAG_PERMANENT, 464 CTLTYPE_STRING, "boot_args", NULL, 465 sysctl_machdep_boot, 0, NULL, 0, 466 CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL); 467 sysctl_createv(clog, 0, NULL, NULL, 468 CTLFLAG_PERMANENT, 469 CTLTYPE_INT, "cpu_arch", NULL, 470 NULL, 0, &cpu_arch, 0, 471 CTL_MACHDEP, CPU_ARCH, CTL_EOL); 472 } 473 474 /* 475 * Send an interrupt to process. 476 */ 477 struct sigframe { 478 siginfo_t sf_si; 479 ucontext_t sf_uc; 480 }; 481 482 void 483 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 484 { 485 struct lwp *l = curlwp; 486 struct proc *p = l->l_proc; 487 struct sigacts *ps = p->p_sigacts; 488 struct trapframe *tf; 489 ucontext_t uc; 490 struct sigframe *fp; 491 u_int onstack, oldsp, newsp; 492 u_int catcher; 493 int sig, error; 494 size_t ucsz; 495 496 sig = ksi->ksi_signo; 497 catcher = (u_int)SIGACTION(p, sig).sa_handler; 498 499 tf = l->l_md.md_tf; 500 oldsp = tf->tf_out[6]; 501 502 /* 503 * Compute new user stack addresses, subtract off 504 * one signal frame, and align. 505 */ 506 onstack = 507 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 508 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; 509 510 if (onstack) 511 fp = (struct sigframe *) 512 ((char *)l->l_sigstk.ss_sp + 513 l->l_sigstk.ss_size); 514 else 515 fp = (struct sigframe *)oldsp; 516 517 fp = (struct sigframe *)((int)(fp - 1) & ~STACK_ALIGNBYTES); 518 519 #ifdef DEBUG 520 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 521 printf("sendsig: %s[%d] sig %d newusp %p si %p uc %p\n", 522 p->p_comm, p->p_pid, sig, fp, &fp->sf_si, &fp->sf_uc); 523 #endif 524 525 /* 526 * Build the signal context to be used by sigreturn. 527 */ 528 uc.uc_flags = _UC_SIGMASK | 529 ((l->l_sigstk.ss_flags & SS_ONSTACK) 530 ? _UC_SETSTACK : _UC_CLRSTACK); 531 uc.uc_sigmask = *mask; 532 uc.uc_link = l->l_ctxlink; 533 memset(&uc.uc_stack, 0, sizeof(uc.uc_stack)); 534 535 /* 536 * Now copy the stack contents out to user space. 537 * We need to make sure that when we start the signal handler, 538 * its %i6 (%fp), which is loaded from the newly allocated stack area, 539 * joins seamlessly with the frame it was in when the signal occurred, 540 * so that the debugger and _longjmp code can back up through it. 541 * Since we're calling the handler directly, allocate a full size 542 * C stack frame. 543 */ 544 sendsig_reset(l, sig); 545 mutex_exit(p->p_lock); 546 newsp = (int)fp - sizeof(struct frame); 547 cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags); 548 ucsz = (int)&uc.__uc_pad - (int)&uc; 549 error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof ksi->ksi_info) || 550 copyout(&uc, &fp->sf_uc, ucsz) || 551 ustore_int((u_int *)&((struct rwindow *)newsp)->rw_in[6], oldsp)); 552 mutex_enter(p->p_lock); 553 554 if (error) { 555 /* 556 * Process has trashed its stack; give it an illegal 557 * instruction to halt it in its tracks. 558 */ 559 #ifdef DEBUG 560 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 561 printf("sendsig: window save or copyout error\n"); 562 #endif 563 sigexit(l, SIGILL); 564 /* NOTREACHED */ 565 } 566 567 switch (ps->sa_sigdesc[sig].sd_vers) { 568 default: 569 /* Unsupported trampoline version; kill the process. */ 570 sigexit(l, SIGILL); 571 case __SIGTRAMP_SIGINFO_VERSION: 572 /* 573 * Arrange to continue execution at the user's handler. 574 * It needs a new stack pointer, a return address and 575 * three arguments: (signo, siginfo *, ucontext *). 576 */ 577 578 tf->tf_pc = catcher; 579 tf->tf_npc = catcher + 4; 580 tf->tf_out[0] = sig; 581 tf->tf_out[1] = (int)&fp->sf_si; 582 tf->tf_out[2] = (int)&fp->sf_uc; 583 tf->tf_out[6] = newsp; 584 tf->tf_out[7] = (int)ps->sa_sigdesc[sig].sd_tramp - 8; 585 break; 586 } 587 588 /* Remember that we're now on the signal stack. */ 589 if (onstack) 590 l->l_sigstk.ss_flags |= SS_ONSTACK; 591 592 #ifdef DEBUG 593 if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) 594 printf("sendsig: about to return to catcher\n"); 595 #endif 596 } 597 598 void 599 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) 600 { 601 struct trapframe *tf = (struct trapframe *)l->l_md.md_tf; 602 __greg_t *r = mcp->__gregs; 603 __greg_t ras_pc; 604 #ifdef FPU_CONTEXT 605 __fpregset_t *f = &mcp->__fpregs; 606 struct fpstate *fps = l->l_md.md_fpstate; 607 #endif 608 609 /* 610 * Put the stack in a consistent state before we whack away 611 * at it. Note that write_user_windows may just dump the 612 * registers into the pcb; we need them in the process's memory. 613 */ 614 write_user_windows(); 615 if (rwindow_save(l)) { 616 mutex_enter(l->l_proc->p_lock); 617 sigexit(l, SIGILL); 618 } 619 620 /* 621 * Get the general purpose registers 622 */ 623 r[_REG_PSR] = tf->tf_psr; 624 r[_REG_PC] = tf->tf_pc; 625 r[_REG_nPC] = tf->tf_npc; 626 r[_REG_Y] = tf->tf_y; 627 r[_REG_G1] = tf->tf_global[1]; 628 r[_REG_G2] = tf->tf_global[2]; 629 r[_REG_G3] = tf->tf_global[3]; 630 r[_REG_G4] = tf->tf_global[4]; 631 r[_REG_G5] = tf->tf_global[5]; 632 r[_REG_G6] = tf->tf_global[6]; 633 r[_REG_G7] = tf->tf_global[7]; 634 r[_REG_O0] = tf->tf_out[0]; 635 r[_REG_O1] = tf->tf_out[1]; 636 r[_REG_O2] = tf->tf_out[2]; 637 r[_REG_O3] = tf->tf_out[3]; 638 r[_REG_O4] = tf->tf_out[4]; 639 r[_REG_O5] = tf->tf_out[5]; 640 r[_REG_O6] = tf->tf_out[6]; 641 r[_REG_O7] = tf->tf_out[7]; 642 643 if ((ras_pc = (__greg_t)ras_lookup(l->l_proc, 644 (void *) r[_REG_PC])) != -1) { 645 r[_REG_PC] = ras_pc; 646 r[_REG_nPC] = ras_pc + 4; 647 } 648 649 *flags |= (_UC_CPU|_UC_TLSBASE); 650 651 #ifdef FPU_CONTEXT 652 /* 653 * Get the floating point registers 654 */ 655 memcpy(f->__fpu_regs, fps->fs_regs, sizeof(fps->fs_regs)); 656 f->__fp_nqsize = sizeof(struct fp_qentry); 657 f->__fp_nqel = fps->fs_qsize; 658 f->__fp_fsr = fps->fs_fsr; 659 if (f->__fp_q != NULL) { 660 size_t sz = f->__fp_nqel * f->__fp_nqsize; 661 if (sz > sizeof(fps->fs_queue)) { 662 #ifdef DIAGNOSTIC 663 printf("getcontext: fp_queue too large\n"); 664 #endif 665 return; 666 } 667 if (copyout(fps->fs_queue, f->__fp_q, sz) != 0) { 668 #ifdef DIAGNOSTIC 669 printf("getcontext: copy of fp_queue failed %d\n", 670 error); 671 #endif 672 return; 673 } 674 } 675 f->fp_busy = 0; /* XXX: How do we determine that? */ 676 *flags |= _UC_FPU; 677 #endif 678 679 return; 680 } 681 682 int 683 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc) 684 { 685 const __greg_t *gr = mc->__gregs; 686 687 /* 688 * Only the icc bits in the psr are used, so it need not be 689 * verified. pc and npc must be multiples of 4. This is all 690 * that is required; if it holds, just do it. 691 */ 692 if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 || 693 gr[_REG_PC] == 0 || gr[_REG_nPC] == 0) 694 return EINVAL; 695 696 return 0; 697 } 698 699 /* 700 * Set to mcontext specified. 701 * Return to previous pc and psl as specified by 702 * context left by sendsig. Check carefully to 703 * make sure that the user has not modified the 704 * psl to gain improper privileges or to cause 705 * a machine fault. 706 * This is almost like sigreturn() and it shows. 707 */ 708 int 709 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) 710 { 711 struct trapframe *tf; 712 const __greg_t *r = mcp->__gregs; 713 struct proc *p = l->l_proc; 714 int error; 715 #ifdef FPU_CONTEXT 716 __fpregset_t *f = &mcp->__fpregs; 717 struct fpstate *fps = l->l_md.md_fpstate; 718 #endif 719 720 write_user_windows(); 721 if (rwindow_save(l)) { 722 mutex_enter(p->p_lock); 723 sigexit(l, SIGILL); 724 } 725 726 #ifdef DEBUG 727 if (sigdebug & SDB_FOLLOW) 728 printf("__setmcontext: %s[%d], __mcontext %p\n", 729 l->l_proc->p_comm, l->l_proc->p_pid, mcp); 730 #endif 731 732 if (flags & _UC_CPU) { 733 /* Validate */ 734 error = cpu_mcontext_validate(l, mcp); 735 if (error) 736 return error; 737 738 /* Restore register context. */ 739 tf = (struct trapframe *)l->l_md.md_tf; 740 741 /* take only psr ICC field */ 742 tf->tf_psr = (tf->tf_psr & ~PSR_ICC) | 743 (r[_REG_PSR] & PSR_ICC); 744 tf->tf_pc = r[_REG_PC]; 745 tf->tf_npc = r[_REG_nPC]; 746 tf->tf_y = r[_REG_Y]; 747 748 /* Restore everything */ 749 tf->tf_global[1] = r[_REG_G1]; 750 tf->tf_global[2] = r[_REG_G2]; 751 tf->tf_global[3] = r[_REG_G3]; 752 tf->tf_global[4] = r[_REG_G4]; 753 tf->tf_global[5] = r[_REG_G5]; 754 tf->tf_global[6] = r[_REG_G6]; 755 /* done in lwp_setprivate */ 756 /* tf->tf_global[7] = r[_REG_G7]; */ 757 758 tf->tf_out[0] = r[_REG_O0]; 759 tf->tf_out[1] = r[_REG_O1]; 760 tf->tf_out[2] = r[_REG_O2]; 761 tf->tf_out[3] = r[_REG_O3]; 762 tf->tf_out[4] = r[_REG_O4]; 763 tf->tf_out[5] = r[_REG_O5]; 764 tf->tf_out[6] = r[_REG_O6]; 765 tf->tf_out[7] = r[_REG_O7]; 766 767 if (flags & _UC_TLSBASE) 768 lwp_setprivate(l, (void *)(uintptr_t)r[_REG_G7]); 769 } 770 771 #ifdef FPU_CONTEXT 772 if (flags & _UC_FPU) { 773 /* 774 * Set the floating point registers 775 */ 776 int error; 777 size_t sz = f->__fp_nqel * f->__fp_nqsize; 778 if (sz > sizeof(fps->fs_queue)) { 779 #ifdef DIAGNOSTIC 780 printf("setmcontext: fp_queue too large\n"); 781 #endif 782 return (EINVAL); 783 } 784 memcpy(fps->fs_regs, f->__fpu_regs, sizeof(fps->fs_regs)); 785 fps->fs_qsize = f->__fp_nqel; 786 fps->fs_fsr = f->__fp_fsr; 787 if (f->__fp_q != NULL) { 788 if ((error = copyin(f->__fp_q, fps->fs_queue, sz)) != 0) { 789 #ifdef DIAGNOSTIC 790 printf("setmcontext: fp_queue copy failed\n"); 791 #endif 792 return (error); 793 } 794 } 795 } 796 #endif 797 798 mutex_enter(p->p_lock); 799 if (flags & _UC_SETSTACK) 800 l->l_sigstk.ss_flags |= SS_ONSTACK; 801 if (flags & _UC_CLRSTACK) 802 l->l_sigstk.ss_flags &= ~SS_ONSTACK; 803 mutex_exit(p->p_lock); 804 805 return (0); 806 } 807 808 int waittime = -1; 809 810 void 811 cpu_reboot(int howto, char *user_boot_string) 812 { 813 int i; 814 char opts[4]; 815 static char str[128]; 816 817 /* If system is cold, just halt. */ 818 if (cold) { 819 howto |= RB_HALT; 820 goto haltsys; 821 } 822 823 #if NFB > 0 824 fb_unblank(); 825 #endif 826 boothowto = howto; 827 if ((howto & RB_NOSYNC) == 0 && waittime < 0) { 828 829 /* XXX protect against curlwp->p_stats.foo refs in sync() */ 830 if (curlwp == NULL) 831 curlwp = &lwp0; 832 waittime = 0; 833 vfs_shutdown(); 834 } 835 836 /* Disable interrupts. But still allow IPI on MP systems */ 837 if (sparc_ncpus > 1) 838 (void)splsched(); 839 else 840 (void)splhigh(); 841 842 #if defined(MULTIPROCESSOR) 843 /* Direct system interrupts to this CPU, since dump uses polled I/O */ 844 if (CPU_ISSUN4M) 845 *((u_int *)ICR_ITR) = cpuinfo.mid - 8; 846 #endif 847 848 /* If rebooting and a dump is requested, do it. */ 849 #if 0 850 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) 851 #else 852 if (howto & RB_DUMP) 853 #endif 854 dumpsys(); 855 856 haltsys: 857 858 /* Run any shutdown hooks. */ 859 doshutdownhooks(); 860 861 pmf_system_shutdown(boothowto); 862 863 /* If powerdown was requested, do it. */ 864 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 865 prom_interpret("power-off"); 866 #if NPOWER > 0 867 /* Fall back on `power' device if the PROM can't do it */ 868 powerdown(); 869 #endif 870 printf("WARNING: powerdown not supported\n"); 871 /* 872 * RB_POWERDOWN implies RB_HALT... fall into it... 873 */ 874 } 875 876 if (howto & RB_HALT) { 877 #if defined(MULTIPROCESSOR) 878 mp_halt_cpus(); 879 printf("cpu%d halted\n\n", cpu_number()); 880 #else 881 printf("halted\n\n"); 882 #endif 883 prom_halt(); 884 } 885 886 printf("rebooting\n\n"); 887 888 i = 1; 889 if (howto & RB_SINGLE) 890 opts[i++] = 's'; 891 if (howto & RB_KDB) 892 opts[i++] = 'd'; 893 opts[i] = '\0'; 894 opts[0] = (i > 1) ? '-' : '\0'; 895 896 if (user_boot_string && *user_boot_string) { 897 i = strlen(user_boot_string); 898 if (i > sizeof(str) - sizeof(opts) - 1) 899 prom_boot(user_boot_string); /* XXX */ 900 memcpy(str, user_boot_string, i); 901 if (opts[0] != '\0') 902 str[i] = ' '; 903 } 904 strcat(str, opts); 905 prom_boot(str); 906 /*NOTREACHED*/ 907 } 908 909 uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */ 910 int dumpsize = 0; /* also for savecore */ 911 long dumplo = 0; 912 913 void 914 cpu_dumpconf(void) 915 { 916 int nblks, dumpblks; 917 918 if (dumpdev == NODEV) 919 return; 920 nblks = bdev_size(dumpdev); 921 922 dumpblks = ctod(physmem) + pmap_dumpsize(); 923 if (dumpblks > (nblks - ctod(1))) 924 /* 925 * dump size is too big for the partition. 926 * Note, we safeguard a click at the front for a 927 * possible disk label. 928 */ 929 return; 930 931 /* Put the dump at the end of the partition */ 932 dumplo = nblks - dumpblks; 933 934 /* 935 * savecore(8) expects dumpsize to be the number of pages 936 * of actual core dumped (i.e. excluding the MMU stuff). 937 */ 938 dumpsize = physmem; 939 } 940 941 #define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */ 942 static vaddr_t dumpspace; 943 struct pcb dumppcb; 944 945 void * 946 reserve_dumppages(void *p) 947 { 948 949 dumpspace = (vaddr_t)p; 950 return ((char *)p + BYTES_PER_DUMP); 951 } 952 953 /* 954 * Write a crash dump. 955 */ 956 void 957 dumpsys(void) 958 { 959 const struct bdevsw *bdev; 960 int psize; 961 daddr_t blkno; 962 int (*dump)(dev_t, daddr_t, void *, size_t); 963 int error = 0; 964 struct memarr *mp; 965 int nmem; 966 967 /* copy registers to memory */ 968 snapshot(cpuinfo.curpcb); 969 memcpy(&dumppcb, cpuinfo.curpcb, sizeof dumppcb); 970 stackdump(); 971 972 if (dumpdev == NODEV) 973 return; 974 bdev = bdevsw_lookup(dumpdev); 975 if (bdev == NULL || bdev->d_psize == NULL) 976 return; 977 978 /* 979 * For dumps during autoconfiguration, 980 * if dump device has already configured... 981 */ 982 if (dumpsize == 0) 983 cpu_dumpconf(); 984 if (dumplo <= 0) { 985 printf("\ndump to dev %u,%u not possible\n", 986 major(dumpdev), minor(dumpdev)); 987 return; 988 } 989 printf("\ndumping to dev %u,%u offset %ld\n", 990 major(dumpdev), minor(dumpdev), dumplo); 991 992 psize = bdev_size(dumpdev); 993 printf("dump "); 994 if (psize == -1) { 995 printf("area unavailable\n"); 996 return; 997 } 998 blkno = dumplo; 999 dump = bdev->d_dump; 1000 1001 error = pmap_dumpmmu(dump, blkno); 1002 blkno += pmap_dumpsize(); 1003 1004 for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) { 1005 unsigned i = 0, n; 1006 int maddr = mp->addr; 1007 1008 if (maddr == 0) { 1009 /* Skip first page at physical address 0 */ 1010 maddr += PAGE_SIZE; 1011 i += PAGE_SIZE; 1012 blkno += btodb(PAGE_SIZE); 1013 } 1014 1015 for (; i < mp->len; i += n) { 1016 n = mp->len - i; 1017 if (n > BYTES_PER_DUMP) 1018 n = BYTES_PER_DUMP; 1019 1020 /* print out how many MBs we have dumped */ 1021 if (i && (i % (1024*1024)) == 0) 1022 printf_nolog("%d ", i / (1024*1024)); 1023 1024 (void) pmap_map(dumpspace, maddr, maddr + n, 1025 VM_PROT_READ); 1026 error = (*dump)(dumpdev, blkno, 1027 (void *)dumpspace, (int)n); 1028 pmap_kremove(dumpspace, n); 1029 pmap_update(pmap_kernel()); 1030 if (error) 1031 break; 1032 maddr += n; 1033 blkno += btodb(n); 1034 } 1035 } 1036 1037 switch (error) { 1038 1039 case ENXIO: 1040 printf("device bad\n"); 1041 break; 1042 1043 case EFAULT: 1044 printf("device not ready\n"); 1045 break; 1046 1047 case EINVAL: 1048 printf("area improper\n"); 1049 break; 1050 1051 case EIO: 1052 printf("i/o error\n"); 1053 break; 1054 1055 case 0: 1056 printf("succeeded\n"); 1057 break; 1058 1059 default: 1060 printf("error %d\n", error); 1061 break; 1062 } 1063 } 1064 1065 /* 1066 * get the fp and dump the stack as best we can. don't leave the 1067 * current stack page 1068 */ 1069 void 1070 stackdump(void) 1071 { 1072 struct frame *fp = getfp(), *sfp; 1073 1074 sfp = fp; 1075 printf("Frame pointer is at %p\n", fp); 1076 printf("Call traceback:\n"); 1077 while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) { 1078 printf(" pc = 0x%x args = (0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp = %p\n", 1079 fp->fr_pc, fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], 1080 fp->fr_arg[3], fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp); 1081 fp = fp->fr_fp; 1082 } 1083 } 1084 1085 int 1086 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp) 1087 { 1088 1089 return (ENOEXEC); 1090 } 1091 1092 #if defined(SUN4) 1093 void 1094 oldmon_w_trace(u_long va) 1095 { 1096 struct cpu_info * const ci = curcpu(); 1097 u_long stop; 1098 struct frame *fp; 1099 1100 printf("curlwp = %p, pid %d\n", curlwp, curproc->p_pid); 1101 1102 printf("uvm: cpu%u: swtch %"PRIu64", trap %"PRIu64", sys %"PRIu64", " 1103 "intr %"PRIu64", soft %"PRIu64", faults %"PRIu64"\n", 1104 cpu_index(ci), ci->ci_data.cpu_nswtch, ci->ci_data.cpu_ntrap, 1105 ci->ci_data.cpu_nsyscall, ci->ci_data.cpu_nintr, 1106 ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nfault); 1107 write_user_windows(); 1108 1109 #define round_up(x) (( (x) + (PAGE_SIZE-1) ) & (~(PAGE_SIZE-1)) ) 1110 1111 printf("\nstack trace with sp = 0x%lx\n", va); 1112 stop = round_up(va); 1113 printf("stop at 0x%lx\n", stop); 1114 fp = (struct frame *) va; 1115 while (round_up((u_long) fp) == stop) { 1116 printf(" 0x%x(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x) fp %p\n", fp->fr_pc, 1117 fp->fr_arg[0], fp->fr_arg[1], fp->fr_arg[2], fp->fr_arg[3], 1118 fp->fr_arg[4], fp->fr_arg[5], fp->fr_fp); 1119 fp = fp->fr_fp; 1120 if (fp == NULL) 1121 break; 1122 } 1123 printf("end of stack trace\n"); 1124 } 1125 1126 void 1127 oldmon_w_cmd(u_long va, char *ar) 1128 { 1129 switch (*ar) { 1130 case '\0': 1131 switch (va) { 1132 case 0: 1133 panic("g0 panic"); 1134 case 4: 1135 printf("w: case 4\n"); 1136 break; 1137 default: 1138 printf("w: unknown case %ld\n", va); 1139 break; 1140 } 1141 break; 1142 case 't': 1143 oldmon_w_trace(va); 1144 break; 1145 default: 1146 printf("w: arg not allowed\n"); 1147 } 1148 } 1149 1150 int 1151 ldcontrolb(void *addr) 1152 { 1153 struct pcb *xpcb; 1154 u_long saveonfault; 1155 int res; 1156 int s; 1157 1158 if (CPU_ISSUN4M || CPU_ISSUN4D) { 1159 printf("warning: ldcontrolb called on sun4m/sun4d\n"); 1160 return 0; 1161 } 1162 1163 s = splhigh(); 1164 xpcb = lwp_getpcb(curlwp); 1165 1166 saveonfault = (u_long)xpcb->pcb_onfault; 1167 res = xldcontrolb(addr, xpcb); 1168 xpcb->pcb_onfault = (void *)saveonfault; 1169 1170 splx(s); 1171 return (res); 1172 } 1173 #endif /* SUN4 */ 1174 1175 void 1176 wzero(void *vb, u_int l) 1177 { 1178 u_char *b = vb; 1179 u_char *be = b + l; 1180 u_short *sp; 1181 1182 if (l == 0) 1183 return; 1184 1185 /* front, */ 1186 if ((u_long)b & 1) 1187 *b++ = 0; 1188 1189 /* back, */ 1190 if (b != be && ((u_long)be & 1) != 0) { 1191 be--; 1192 *be = 0; 1193 } 1194 1195 /* and middle. */ 1196 sp = (u_short *)b; 1197 while (sp != (u_short *)be) 1198 *sp++ = 0; 1199 } 1200 1201 void 1202 wcopy(const void *vb1, void *vb2, u_int l) 1203 { 1204 const u_char *b1e, *b1 = vb1; 1205 u_char *b2 = vb2; 1206 const u_short *sp; 1207 int bstore = 0; 1208 1209 if (l == 0) 1210 return; 1211 1212 /* front, */ 1213 if ((u_long)b1 & 1) { 1214 *b2++ = *b1++; 1215 l--; 1216 } 1217 1218 /* middle, */ 1219 sp = (const u_short *)b1; 1220 b1e = b1 + l; 1221 if (l & 1) 1222 b1e--; 1223 bstore = (u_long)b2 & 1; 1224 1225 while (sp < (const u_short *)b1e) { 1226 if (bstore) { 1227 b2[1] = *sp & 0xff; 1228 b2[0] = *sp >> 8; 1229 } else 1230 *((short *)b2) = *sp; 1231 sp++; 1232 b2 += 2; 1233 } 1234 1235 /* and back. */ 1236 if (l & 1) 1237 *b2 = *b1e; 1238 } 1239 1240 #ifdef MODULAR 1241 void 1242 module_init_md(void) 1243 { 1244 } 1245 #endif 1246 1247 static size_t 1248 _bus_dmamap_mapsize(int const nsegments) 1249 { 1250 KASSERT(nsegments > 0); 1251 return sizeof(struct sparc_bus_dmamap) + 1252 (sizeof(bus_dma_segment_t) * (nsegments - 1)); 1253 } 1254 1255 /* 1256 * Common function for DMA map creation. May be called by bus-specific 1257 * DMA map creation functions. 1258 */ 1259 int 1260 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, 1261 bus_size_t maxsegsz, bus_size_t boundary, int flags, 1262 bus_dmamap_t *dmamp) 1263 { 1264 struct sparc_bus_dmamap *map; 1265 void *mapstore; 1266 1267 /* 1268 * Allocate and initialize the DMA map. The end of the map 1269 * is a variable-sized array of segments, so we allocate enough 1270 * room for them in one shot. 1271 * 1272 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation 1273 * of ALLOCNOW notifies others that we've reserved these resources, 1274 * and they are not to be freed. 1275 * 1276 * The bus_dmamap_t includes one bus_dma_segment_t, hence 1277 * the (nsegments - 1). 1278 */ 1279 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments), 1280 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 1281 return (ENOMEM); 1282 1283 map = (struct sparc_bus_dmamap *)mapstore; 1284 map->_dm_size = size; 1285 map->_dm_segcnt = nsegments; 1286 map->_dm_maxmaxsegsz = maxsegsz; 1287 map->_dm_boundary = boundary; 1288 map->_dm_align = PAGE_SIZE; 1289 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); 1290 map->dm_maxsegsz = maxsegsz; 1291 map->dm_mapsize = 0; /* no valid mappings */ 1292 map->dm_nsegs = 0; 1293 1294 *dmamp = map; 1295 return (0); 1296 } 1297 1298 /* 1299 * Common function for DMA map destruction. May be called by bus-specific 1300 * DMA map destruction functions. 1301 */ 1302 void 1303 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map) 1304 { 1305 1306 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt)); 1307 } 1308 1309 /* 1310 * Like _bus_dmamap_load(), but for mbufs. 1311 */ 1312 int 1313 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, 1314 struct mbuf *m, int flags) 1315 { 1316 1317 panic("_bus_dmamap_load_mbuf: not implemented"); 1318 } 1319 1320 /* 1321 * Like _bus_dmamap_load(), but for uios. 1322 */ 1323 int 1324 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, 1325 struct uio *uio, int flags) 1326 { 1327 1328 panic("_bus_dmamap_load_uio: not implemented"); 1329 } 1330 1331 /* 1332 * Like _bus_dmamap_load(), but for raw memory allocated with 1333 * bus_dmamem_alloc(). 1334 */ 1335 int 1336 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 1337 bus_dma_segment_t *segs, int nsegs, bus_size_t size, 1338 int flags) 1339 { 1340 1341 panic("_bus_dmamap_load_raw: not implemented"); 1342 } 1343 1344 /* 1345 * Common function for DMA map synchronization. May be called 1346 * by bus-specific DMA map synchronization functions. 1347 */ 1348 void 1349 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 1350 bus_addr_t offset, bus_size_t len, int ops) 1351 { 1352 } 1353 1354 /* 1355 * Common function for DMA-safe memory allocation. May be called 1356 * by bus-specific DMA memory allocation functions. 1357 */ 1358 int 1359 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, 1360 bus_size_t alignment, bus_size_t boundary, 1361 bus_dma_segment_t *segs, int nsegs, int *rsegs, 1362 int flags) 1363 { 1364 vaddr_t low, high; 1365 struct pglist *mlist; 1366 int error; 1367 1368 /* Always round the size. */ 1369 size = round_page(size); 1370 low = vm_first_phys; 1371 high = vm_first_phys + vm_num_phys - PAGE_SIZE; 1372 1373 if ((mlist = kmem_alloc(sizeof(*mlist), 1374 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL) 1375 return (ENOMEM); 1376 1377 /* 1378 * Allocate pages from the VM system. 1379 */ 1380 error = uvm_pglistalloc(size, low, high, 0, 0, 1381 mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 1382 if (error) { 1383 kmem_free(mlist, sizeof(*mlist)); 1384 return (error); 1385 } 1386 1387 /* 1388 * Simply keep a pointer around to the linked list, so 1389 * bus_dmamap_free() can return it. 1390 * 1391 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES 1392 * ARE IN OUR CUSTODY. 1393 */ 1394 segs[0]._ds_mlist = mlist; 1395 1396 /* 1397 * We now have physical pages, but no DVMA addresses yet. These 1398 * will be allocated in bus_dmamap_load*() routines. Hence we 1399 * save any alignment and boundary requirements in this DMA 1400 * segment. 1401 */ 1402 segs[0].ds_addr = 0; 1403 segs[0].ds_len = 0; 1404 segs[0]._ds_va = 0; 1405 *rsegs = 1; 1406 return (0); 1407 } 1408 1409 /* 1410 * Common function for freeing DMA-safe memory. May be called by 1411 * bus-specific DMA memory free functions. 1412 */ 1413 void 1414 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1415 { 1416 struct pglist *mlist = segs[0]._ds_mlist; 1417 1418 if (nsegs != 1) 1419 panic("bus_dmamem_free: nsegs = %d", nsegs); 1420 1421 /* 1422 * Return the list of pages back to the VM system. 1423 */ 1424 uvm_pglistfree(mlist); 1425 kmem_free(mlist, sizeof(*mlist)); 1426 } 1427 1428 /* 1429 * Common function for unmapping DMA-safe memory. May be called by 1430 * bus-specific DMA memory unmapping functions. 1431 */ 1432 void 1433 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1434 { 1435 1436 #ifdef DIAGNOSTIC 1437 if ((u_long)kva & PAGE_MASK) 1438 panic("_bus_dmamem_unmap"); 1439 #endif 1440 1441 size = round_page(size); 1442 pmap_kremove((vaddr_t)kva, size); 1443 pmap_update(pmap_kernel()); 1444 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1445 } 1446 1447 /* 1448 * Common function for mmap(2)'ing DMA-safe memory. May be called by 1449 * bus-specific DMA mmap(2)'ing functions. 1450 */ 1451 paddr_t 1452 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1453 off_t off, int prot, int flags) 1454 { 1455 1456 panic("_bus_dmamem_mmap: not implemented"); 1457 } 1458 1459 /* 1460 * Utility to allocate an aligned kernel virtual address range 1461 */ 1462 vaddr_t 1463 _bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew) 1464 { 1465 size_t oversize; 1466 vaddr_t va, sva; 1467 1468 /* 1469 * Find a region of kernel virtual addresses that is aligned 1470 * to the given address modulo the requested alignment, i.e. 1471 * 1472 * (va - skew) == 0 mod align 1473 * 1474 * The following conditions apply to the arguments: 1475 * 1476 * - `size' must be a multiple of the VM page size 1477 * - `align' must be a power of two 1478 * and greater than or equal to the VM page size 1479 * - `skew' must be smaller than `align' 1480 * - `size' must be smaller than `boundary' 1481 */ 1482 1483 #ifdef DIAGNOSTIC 1484 if ((size & PAGE_MASK) != 0) 1485 panic("_bus_dma_valloc_skewed: invalid size %lx", size); 1486 if ((align & PAGE_MASK) != 0) 1487 panic("_bus_dma_valloc_skewed: invalid alignment %lx", align); 1488 if (align < skew) 1489 panic("_bus_dma_valloc_skewed: align %lx < skew %lx", 1490 align, skew); 1491 #endif 1492 1493 /* XXX - Implement this! */ 1494 if (boundary) { 1495 printf("_bus_dma_valloc_skewed: " 1496 "boundary check not implemented"); 1497 return (0); 1498 } 1499 1500 /* 1501 * First, find a region large enough to contain any aligned chunk 1502 */ 1503 oversize = size + align - PAGE_SIZE; 1504 sva = vm_map_min(kernel_map); 1505 if (uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 1506 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 1507 UVM_ADV_RANDOM, UVM_FLAG_NOWAIT))) 1508 return (0); 1509 1510 /* 1511 * Compute start of aligned region 1512 */ 1513 va = sva; 1514 va += (skew + align - va) & (align - 1); 1515 1516 /* 1517 * Return excess virtual addresses 1518 */ 1519 if (va != sva) 1520 (void)uvm_unmap(kernel_map, sva, va); 1521 if (va + size != sva + oversize) 1522 (void)uvm_unmap(kernel_map, va + size, sva + oversize); 1523 1524 return (va); 1525 } 1526 1527 /* sun4/sun4c DMA map functions */ 1528 int sun4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 1529 bus_size_t, struct proc *, int); 1530 int sun4_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, 1531 bus_dma_segment_t *, int, bus_size_t, int); 1532 void sun4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 1533 int sun4_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 1534 int, size_t, void **, int); 1535 1536 /* 1537 * sun4/sun4c: load DMA map with a linear buffer. 1538 */ 1539 int 1540 sun4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 1541 void *buf, bus_size_t buflen, 1542 struct proc *p, int flags) 1543 { 1544 bus_size_t sgsize; 1545 vaddr_t va = (vaddr_t)buf; 1546 int pagesz = PAGE_SIZE; 1547 vaddr_t dva; 1548 pmap_t pmap; 1549 1550 /* 1551 * Make sure that on error condition we return "no valid mappings". 1552 */ 1553 map->dm_nsegs = 0; 1554 1555 if (buflen > map->_dm_size) 1556 return (EINVAL); 1557 1558 cache_flush(buf, buflen); 1559 1560 if ((map->_dm_flags & BUS_DMA_24BIT) == 0) { 1561 /* 1562 * XXX Need to implement "don't DMA across this boundary". 1563 */ 1564 if (map->_dm_boundary != 0) { 1565 bus_addr_t baddr; 1566 1567 /* Calculate first boundary line after `buf' */ 1568 baddr = ((bus_addr_t)va + map->_dm_boundary) & 1569 -map->_dm_boundary; 1570 1571 /* 1572 * If the requested segment crosses the boundary, 1573 * we can't grant a direct map. For now, steal some 1574 * space from the `24BIT' map instead. 1575 * 1576 * (XXX - no overflow detection here) 1577 */ 1578 if (buflen > (baddr - (bus_addr_t)va)) 1579 goto no_fit; 1580 } 1581 map->dm_mapsize = buflen; 1582 map->dm_nsegs = 1; 1583 map->dm_segs[0].ds_addr = (bus_addr_t)va; 1584 map->dm_segs[0].ds_len = buflen; 1585 map->_dm_flags |= _BUS_DMA_DIRECTMAP; 1586 return (0); 1587 } 1588 1589 no_fit: 1590 sgsize = round_page(buflen + (va & (pagesz - 1))); 1591 1592 const vm_flag_t vmflags = VM_BESTFIT | 1593 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1594 1595 if (vmem_xalloc(dvmamap24, sgsize, 1596 0, /* alignment */ 1597 0, /* phase */ 1598 map->_dm_boundary, /* nocross */ 1599 VMEM_ADDR_MIN, /* minaddr */ 1600 VMEM_ADDR_MAX, /* maxaddr */ 1601 vmflags, 1602 &dva) != 0) { 1603 return (ENOMEM); 1604 } 1605 1606 /* 1607 * We always use just one segment. 1608 */ 1609 map->dm_mapsize = buflen; 1610 map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1)); 1611 map->dm_segs[0].ds_len = buflen; 1612 map->dm_segs[0]._ds_sgsize = sgsize; 1613 1614 if (p != NULL) 1615 pmap = p->p_vmspace->vm_map.pmap; 1616 else 1617 pmap = pmap_kernel(); 1618 1619 for (; buflen > 0; ) { 1620 paddr_t pa; 1621 1622 /* 1623 * Get the physical address for this page. 1624 */ 1625 (void) pmap_extract(pmap, va, &pa); 1626 1627 /* 1628 * Compute the segment size, and adjust counts. 1629 */ 1630 sgsize = pagesz - (va & (pagesz - 1)); 1631 if (buflen < sgsize) 1632 sgsize = buflen; 1633 1634 #ifdef notyet 1635 #if defined(SUN4) 1636 if (have_iocache) 1637 pa |= PG_IOC; 1638 #endif 1639 #endif 1640 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC, 1641 VM_PROT_READ | VM_PROT_WRITE, 0); 1642 1643 dva += pagesz; 1644 va += sgsize; 1645 buflen -= sgsize; 1646 } 1647 pmap_update(pmap_kernel()); 1648 1649 map->dm_nsegs = 1; 1650 return (0); 1651 } 1652 1653 /* 1654 * Like _bus_dmamap_load(), but for raw memory allocated with 1655 * bus_dmamem_alloc(). 1656 */ 1657 int 1658 sun4_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 1659 bus_dma_segment_t *segs, int nsegs, bus_size_t size, 1660 int flags) 1661 { 1662 struct vm_page *m; 1663 paddr_t pa; 1664 vaddr_t dva; 1665 bus_size_t sgsize; 1666 struct pglist *mlist; 1667 int pagesz = PAGE_SIZE; 1668 int error; 1669 1670 map->dm_nsegs = 0; 1671 sgsize = (size + pagesz - 1) & -pagesz; 1672 1673 /* Allocate DVMA addresses */ 1674 if ((map->_dm_flags & BUS_DMA_24BIT) != 0) { 1675 const vm_flag_t vmflags = VM_BESTFIT | 1676 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 1677 1678 error = vmem_xalloc(dvmamap24, sgsize, 1679 0, /* alignment */ 1680 0, /* phase */ 1681 map->_dm_boundary, /* nocross */ 1682 VMEM_ADDR_MIN, /* minaddr */ 1683 VMEM_ADDR_MAX, /* maxaddr */ 1684 vmflags, 1685 &dva); 1686 if (error) 1687 return (error); 1688 } else { 1689 /* Any properly aligned virtual address will do */ 1690 dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary, 1691 pagesz, 0); 1692 if (dva == 0) 1693 return (ENOMEM); 1694 } 1695 1696 map->dm_segs[0].ds_addr = dva; 1697 map->dm_segs[0].ds_len = size; 1698 map->dm_segs[0]._ds_sgsize = sgsize; 1699 1700 /* Map physical pages into IOMMU */ 1701 mlist = segs[0]._ds_mlist; 1702 for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) { 1703 if (sgsize == 0) 1704 panic("sun4_dmamap_load_raw: size botch"); 1705 pa = VM_PAGE_TO_PHYS(m); 1706 #ifdef notyet 1707 #if defined(SUN4) 1708 if (have_iocache) 1709 pa |= PG_IOC; 1710 #endif 1711 #endif 1712 pmap_kenter_pa(dva, (pa & -pagesz) | PMAP_NC, 1713 VM_PROT_READ | VM_PROT_WRITE, 0); 1714 1715 dva += pagesz; 1716 sgsize -= pagesz; 1717 } 1718 pmap_update(pmap_kernel()); 1719 1720 map->dm_nsegs = 1; 1721 map->dm_mapsize = size; 1722 1723 return (0); 1724 } 1725 1726 /* 1727 * sun4/sun4c function for unloading a DMA map. 1728 */ 1729 void 1730 sun4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1731 { 1732 bus_dma_segment_t *segs = map->dm_segs; 1733 int nsegs = map->dm_nsegs; 1734 int flags = map->_dm_flags; 1735 vaddr_t dva; 1736 bus_size_t len; 1737 int i; 1738 1739 map->dm_maxsegsz = map->_dm_maxmaxsegsz; 1740 1741 if ((flags & _BUS_DMA_DIRECTMAP) != 0) { 1742 /* Nothing to release */ 1743 map->dm_mapsize = 0; 1744 map->dm_nsegs = 0; 1745 map->_dm_flags &= ~_BUS_DMA_DIRECTMAP; 1746 return; 1747 } 1748 1749 for (i = 0; i < nsegs; i++) { 1750 dva = segs[i].ds_addr & -PAGE_SIZE; 1751 len = segs[i]._ds_sgsize; 1752 1753 pmap_kremove(dva, len); 1754 1755 if ((flags & BUS_DMA_24BIT) != 0) { 1756 vmem_xfree(dvmamap24, dva, len); 1757 } else { 1758 uvm_unmap(kernel_map, dva, dva + len); 1759 } 1760 } 1761 pmap_update(pmap_kernel()); 1762 1763 /* Mark the mappings as invalid. */ 1764 map->dm_mapsize = 0; 1765 map->dm_nsegs = 0; 1766 } 1767 1768 /* 1769 * Common function for mapping DMA-safe memory. May be called by 1770 * bus-specific DMA memory map functions. 1771 */ 1772 int 1773 sun4_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1774 size_t size, void **kvap, int flags) 1775 { 1776 struct vm_page *m; 1777 vaddr_t va; 1778 struct pglist *mlist; 1779 const uvm_flag_t kmflags = 1780 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 1781 1782 if (nsegs != 1) 1783 panic("sun4_dmamem_map: nsegs = %d", nsegs); 1784 1785 size = round_page(size); 1786 1787 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 1788 if (va == 0) 1789 return (ENOMEM); 1790 1791 segs[0]._ds_va = va; 1792 *kvap = (void *)va; 1793 1794 mlist = segs[0]._ds_mlist; 1795 TAILQ_FOREACH(m, mlist, pageq.queue) { 1796 paddr_t pa; 1797 1798 if (size == 0) 1799 panic("sun4_dmamem_map: size botch"); 1800 1801 pa = VM_PAGE_TO_PHYS(m); 1802 pmap_kenter_pa(va, pa | PMAP_NC, 1803 VM_PROT_READ | VM_PROT_WRITE, 0); 1804 1805 va += PAGE_SIZE; 1806 size -= PAGE_SIZE; 1807 } 1808 pmap_update(pmap_kernel()); 1809 1810 return (0); 1811 } 1812 1813 1814 struct sparc_bus_dma_tag mainbus_dma_tag = { 1815 NULL, 1816 _bus_dmamap_create, 1817 _bus_dmamap_destroy, 1818 sun4_dmamap_load, 1819 _bus_dmamap_load_mbuf, 1820 _bus_dmamap_load_uio, 1821 sun4_dmamap_load_raw, 1822 sun4_dmamap_unload, 1823 _bus_dmamap_sync, 1824 1825 _bus_dmamem_alloc, 1826 _bus_dmamem_free, 1827 sun4_dmamem_map, 1828 _bus_dmamem_unmap, 1829 _bus_dmamem_mmap 1830 }; 1831 1832 1833 /* 1834 * Base bus space handlers. 1835 */ 1836 static int sparc_bus_map(bus_space_tag_t, bus_addr_t, 1837 bus_size_t, int, vaddr_t, 1838 bus_space_handle_t *); 1839 static int sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t, 1840 bus_size_t); 1841 static int sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t, 1842 bus_size_t, bus_size_t, 1843 bus_space_handle_t *); 1844 static paddr_t sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t, 1845 int, int); 1846 static void *sparc_mainbus_intr_establish(bus_space_tag_t, int, int, 1847 int (*)(void *), 1848 void *, 1849 void (*)(void)); 1850 static void sparc_bus_barrier(bus_space_tag_t, bus_space_handle_t, 1851 bus_size_t, bus_size_t, int); 1852 1853 int 1854 bus_space_map( 1855 bus_space_tag_t t, 1856 bus_addr_t a, 1857 bus_size_t s, 1858 int f, 1859 bus_space_handle_t *hp) 1860 { 1861 return (*t->sparc_bus_map)(t, a, s, f, (vaddr_t)0, hp); 1862 } 1863 1864 int 1865 bus_space_map2( 1866 bus_space_tag_t t, 1867 bus_addr_t a, 1868 bus_size_t s, 1869 int f, 1870 vaddr_t v, 1871 bus_space_handle_t *hp) 1872 { 1873 return (*t->sparc_bus_map)(t, a, s, f, v, hp); 1874 } 1875 1876 void 1877 bus_space_unmap( 1878 bus_space_tag_t t, 1879 bus_space_handle_t h, 1880 bus_size_t s) 1881 { 1882 (*t->sparc_bus_unmap)(t, h, s); 1883 } 1884 1885 int 1886 bus_space_subregion( 1887 bus_space_tag_t t, 1888 bus_space_handle_t h, 1889 bus_size_t o, 1890 bus_size_t s, 1891 bus_space_handle_t *hp) 1892 { 1893 return (*t->sparc_bus_subregion)(t, h, o, s, hp); 1894 } 1895 1896 paddr_t 1897 bus_space_mmap( 1898 bus_space_tag_t t, 1899 bus_addr_t a, 1900 off_t o, 1901 int p, 1902 int f) 1903 { 1904 return (*t->sparc_bus_mmap)(t, a, o, p, f); 1905 } 1906 1907 void * 1908 bus_intr_establish( 1909 bus_space_tag_t t, 1910 int p, 1911 int l, 1912 int (*h)(void *), 1913 void *a) 1914 { 1915 return (*t->sparc_intr_establish)(t, p, l, h, a, NULL); 1916 } 1917 1918 void * 1919 bus_intr_establish2( 1920 bus_space_tag_t t, 1921 int p, 1922 int l, 1923 int (*h)(void *), 1924 void *a, 1925 void (*v)(void)) 1926 { 1927 return (*t->sparc_intr_establish)(t, p, l, h, a, v); 1928 } 1929 1930 void 1931 bus_space_barrier( 1932 bus_space_tag_t t, 1933 bus_space_handle_t h, 1934 bus_size_t o, 1935 bus_size_t s, 1936 int f) 1937 { 1938 (*t->sparc_bus_barrier)(t, h, o, s, f); 1939 } 1940 1941 void 1942 bus_space_write_multi_stream_2( 1943 bus_space_tag_t t, 1944 bus_space_handle_t h, 1945 bus_size_t o, 1946 const uint16_t *a, 1947 bus_size_t c) 1948 { 1949 while (c-- > 0) 1950 bus_space_write_2_real(t, h, o, *a++); 1951 } 1952 1953 void 1954 bus_space_write_multi_stream_4( 1955 bus_space_tag_t t, 1956 bus_space_handle_t h, 1957 bus_size_t o, 1958 const uint32_t *a, 1959 bus_size_t c) 1960 { 1961 while (c-- > 0) 1962 bus_space_write_4_real(t, h, o, *a++); 1963 } 1964 1965 void 1966 bus_space_write_multi_stream_8( 1967 bus_space_tag_t t, 1968 bus_space_handle_t h, 1969 bus_size_t o, 1970 const uint64_t *a, 1971 bus_size_t c) 1972 { 1973 while (c-- > 0) 1974 bus_space_write_8_real(t, h, o, *a++); 1975 } 1976 1977 1978 /* 1979 * void bus_space_set_multi_N(bus_space_tag_t tag, 1980 * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val, 1981 * bus_size_t count); 1982 * 1983 * Write the 1, 2, 4, or 8 byte value `val' to bus space described 1984 * by tag/handle/offset `count' times. 1985 */ 1986 void 1987 bus_space_set_multi_1( 1988 bus_space_tag_t t, 1989 bus_space_handle_t h, 1990 bus_size_t o, 1991 const uint8_t v, 1992 bus_size_t c) 1993 { 1994 while (c-- > 0) 1995 bus_space_write_1(t, h, o, v); 1996 } 1997 1998 void 1999 bus_space_set_multi_2( 2000 bus_space_tag_t t, 2001 bus_space_handle_t h, 2002 bus_size_t o, 2003 const uint16_t v, 2004 bus_size_t c) 2005 { 2006 while (c-- > 0) 2007 bus_space_write_2(t, h, o, v); 2008 } 2009 2010 void 2011 bus_space_set_multi_4( 2012 bus_space_tag_t t, 2013 bus_space_handle_t h, 2014 bus_size_t o, 2015 const uint32_t v, 2016 bus_size_t c) 2017 { 2018 while (c-- > 0) 2019 bus_space_write_4(t, h, o, v); 2020 } 2021 2022 void 2023 bus_space_set_multi_8( 2024 bus_space_tag_t t, 2025 bus_space_handle_t h, 2026 bus_size_t o, 2027 const uint64_t v, 2028 bus_size_t c) 2029 { 2030 while (c-- > 0) 2031 bus_space_write_8(t, h, o, v); 2032 } 2033 2034 2035 /* 2036 * void bus_space_read_region_N(bus_space_tag_t tag, 2037 * bus_space_handle_t bsh, bus_size_t off, 2038 * u_intN_t *addr, bus_size_t count); 2039 * 2040 */ 2041 void 2042 bus_space_read_region_1( 2043 bus_space_tag_t t, 2044 bus_space_handle_t h, 2045 bus_size_t o, 2046 uint8_t *a, 2047 bus_size_t c) 2048 { 2049 for (; c; a++, c--, o++) 2050 *a = bus_space_read_1(t, h, o); 2051 } 2052 2053 void 2054 bus_space_read_region_2( 2055 bus_space_tag_t t, 2056 bus_space_handle_t h, 2057 bus_size_t o, 2058 uint16_t *a, 2059 bus_size_t c) 2060 { 2061 for (; c; a++, c--, o+=2) 2062 *a = bus_space_read_2(t, h, o); 2063 } 2064 2065 void 2066 bus_space_read_region_4( 2067 bus_space_tag_t t, 2068 bus_space_handle_t h, 2069 bus_size_t o, 2070 uint32_t *a, 2071 bus_size_t c) 2072 { 2073 for (; c; a++, c--, o+=4) 2074 *a = bus_space_read_4(t, h, o); 2075 } 2076 2077 void 2078 bus_space_read_region_8( 2079 bus_space_tag_t t, 2080 bus_space_handle_t h, 2081 bus_size_t o, 2082 uint64_t *a, 2083 bus_size_t c) 2084 { 2085 for (; c; a++, c--, o+=8) 2086 *a = bus_space_read_8(t, h, o); 2087 } 2088 2089 /* 2090 * void bus_space_write_region_N(bus_space_tag_t tag, 2091 * bus_space_handle_t bsh, bus_size_t off, 2092 * u_intN_t *addr, bus_size_t count); 2093 * 2094 */ 2095 void 2096 bus_space_write_region_1( 2097 bus_space_tag_t t, 2098 bus_space_handle_t h, 2099 bus_size_t o, 2100 const uint8_t *a, 2101 bus_size_t c) 2102 { 2103 for (; c; a++, c--, o++) 2104 bus_space_write_1(t, h, o, *a); 2105 } 2106 2107 void 2108 bus_space_write_region_2( 2109 bus_space_tag_t t, 2110 bus_space_handle_t h, 2111 bus_size_t o, 2112 const uint16_t *a, 2113 bus_size_t c) 2114 { 2115 for (; c; a++, c--, o+=2) 2116 bus_space_write_2(t, h, o, *a); 2117 } 2118 2119 void 2120 bus_space_write_region_4( 2121 bus_space_tag_t t, 2122 bus_space_handle_t h, 2123 bus_size_t o, 2124 const uint32_t *a, 2125 bus_size_t c) 2126 { 2127 for (; c; a++, c--, o+=4) 2128 bus_space_write_4(t, h, o, *a); 2129 } 2130 2131 void 2132 bus_space_write_region_8( 2133 bus_space_tag_t t, 2134 bus_space_handle_t h, 2135 bus_size_t o, 2136 const uint64_t *a, 2137 bus_size_t c) 2138 { 2139 for (; c; a++, c--, o+=8) 2140 bus_space_write_8(t, h, o, *a); 2141 } 2142 2143 2144 /* 2145 * void bus_space_set_region_N(bus_space_tag_t tag, 2146 * bus_space_handle_t bsh, bus_size_t off, 2147 * u_intN_t *addr, bus_size_t count); 2148 * 2149 */ 2150 void 2151 bus_space_set_region_1( 2152 bus_space_tag_t t, 2153 bus_space_handle_t h, 2154 bus_size_t o, 2155 const uint8_t v, 2156 bus_size_t c) 2157 { 2158 for (; c; c--, o++) 2159 bus_space_write_1(t, h, o, v); 2160 } 2161 2162 void 2163 bus_space_set_region_2( 2164 bus_space_tag_t t, 2165 bus_space_handle_t h, 2166 bus_size_t o, 2167 const uint16_t v, 2168 bus_size_t c) 2169 { 2170 for (; c; c--, o+=2) 2171 bus_space_write_2(t, h, o, v); 2172 } 2173 2174 void 2175 bus_space_set_region_4( 2176 bus_space_tag_t t, 2177 bus_space_handle_t h, 2178 bus_size_t o, 2179 const uint32_t v, 2180 bus_size_t c) 2181 { 2182 for (; c; c--, o+=4) 2183 bus_space_write_4(t, h, o, v); 2184 } 2185 2186 void 2187 bus_space_set_region_8( 2188 bus_space_tag_t t, 2189 bus_space_handle_t h, 2190 bus_size_t o, 2191 const uint64_t v, 2192 bus_size_t c) 2193 { 2194 for (; c; c--, o+=8) 2195 bus_space_write_8(t, h, o, v); 2196 } 2197 2198 2199 /* 2200 * void bus_space_copy_region_N(bus_space_tag_t tag, 2201 * bus_space_handle_t bsh1, bus_size_t off1, 2202 * bus_space_handle_t bsh2, bus_size_t off2, 2203 * bus_size_t count); 2204 * 2205 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting 2206 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2. 2207 */ 2208 void 2209 bus_space_copy_region_1( 2210 bus_space_tag_t t, 2211 bus_space_handle_t h1, 2212 bus_size_t o1, 2213 bus_space_handle_t h2, 2214 bus_size_t o2, 2215 bus_size_t c) 2216 { 2217 for (; c; c--, o1++, o2++) 2218 bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2)); 2219 } 2220 2221 void 2222 bus_space_copy_region_2( 2223 bus_space_tag_t t, 2224 bus_space_handle_t h1, 2225 bus_size_t o1, 2226 bus_space_handle_t h2, 2227 bus_size_t o2, 2228 bus_size_t c) 2229 { 2230 for (; c; c--, o1+=2, o2+=2) 2231 bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2)); 2232 } 2233 2234 void 2235 bus_space_copy_region_4( 2236 bus_space_tag_t t, 2237 bus_space_handle_t h1, 2238 bus_size_t o1, 2239 bus_space_handle_t h2, 2240 bus_size_t o2, 2241 bus_size_t c) 2242 { 2243 for (; c; c--, o1+=4, o2+=4) 2244 bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2)); 2245 } 2246 2247 void 2248 bus_space_copy_region_8( 2249 bus_space_tag_t t, 2250 bus_space_handle_t h1, 2251 bus_size_t o1, 2252 bus_space_handle_t h2, 2253 bus_size_t o2, 2254 bus_size_t c) 2255 { 2256 for (; c; c--, o1+=8, o2+=8) 2257 bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2)); 2258 } 2259 2260 /* 2261 * void bus_space_read_region_stream_N(bus_space_tag_t tag, 2262 * bus_space_handle_t bsh, bus_size_t off, 2263 * u_intN_t *addr, bus_size_t count); 2264 * 2265 */ 2266 void 2267 bus_space_read_region_stream_1( 2268 bus_space_tag_t t, 2269 bus_space_handle_t h, 2270 bus_size_t o, 2271 uint8_t *a, 2272 bus_size_t c) 2273 { 2274 for (; c; a++, c--, o++) 2275 *a = bus_space_read_stream_1(t, h, o); 2276 } 2277 void 2278 bus_space_read_region_stream_2( 2279 bus_space_tag_t t, 2280 bus_space_handle_t h, 2281 bus_size_t o, 2282 uint16_t *a, 2283 bus_size_t c) 2284 { 2285 for (; c; a++, c--, o+=2) 2286 *a = bus_space_read_stream_2(t, h, o); 2287 } 2288 void 2289 bus_space_read_region_stream_4( 2290 bus_space_tag_t t, 2291 bus_space_handle_t h, 2292 bus_size_t o, 2293 uint32_t *a, 2294 bus_size_t c) 2295 { 2296 for (; c; a++, c--, o+=4) 2297 *a = bus_space_read_stream_4(t, h, o); 2298 } 2299 void 2300 bus_space_read_region_stream_8( 2301 bus_space_tag_t t, 2302 bus_space_handle_t h, 2303 bus_size_t o, 2304 uint64_t *a, 2305 bus_size_t c) 2306 { 2307 for (; c; a++, c--, o+=8) 2308 *a = bus_space_read_stream_8(t, h, o); 2309 } 2310 2311 /* 2312 * void bus_space_write_region_stream_N(bus_space_tag_t tag, 2313 * bus_space_handle_t bsh, bus_size_t off, 2314 * u_intN_t *addr, bus_size_t count); 2315 * 2316 */ 2317 void 2318 bus_space_write_region_stream_1( 2319 bus_space_tag_t t, 2320 bus_space_handle_t h, 2321 bus_size_t o, 2322 const uint8_t *a, 2323 bus_size_t c) 2324 { 2325 for (; c; a++, c--, o++) 2326 bus_space_write_stream_1(t, h, o, *a); 2327 } 2328 2329 void 2330 bus_space_write_region_stream_2( 2331 bus_space_tag_t t, 2332 bus_space_handle_t h, 2333 bus_size_t o, 2334 const uint16_t *a, 2335 bus_size_t c) 2336 { 2337 for (; c; a++, c--, o+=2) 2338 bus_space_write_stream_2(t, h, o, *a); 2339 } 2340 2341 void 2342 bus_space_write_region_stream_4( 2343 bus_space_tag_t t, 2344 bus_space_handle_t h, 2345 bus_size_t o, 2346 const uint32_t *a, 2347 bus_size_t c) 2348 { 2349 for (; c; a++, c--, o+=4) 2350 bus_space_write_stream_4(t, h, o, *a); 2351 } 2352 2353 void 2354 bus_space_write_region_stream_8( 2355 bus_space_tag_t t, 2356 bus_space_handle_t h, 2357 bus_size_t o, 2358 const uint64_t *a, 2359 bus_size_t c) 2360 { 2361 for (; c; a++, c--, o+=8) 2362 bus_space_write_stream_8(t, h, o, *a); 2363 } 2364 2365 2366 /* 2367 * void bus_space_set_region_stream_N(bus_space_tag_t tag, 2368 * bus_space_handle_t bsh, bus_size_t off, 2369 * u_intN_t *addr, bus_size_t count); 2370 * 2371 */ 2372 void 2373 bus_space_set_region_stream_1( 2374 bus_space_tag_t t, 2375 bus_space_handle_t h, 2376 bus_size_t o, 2377 const uint8_t v, 2378 bus_size_t c) 2379 { 2380 for (; c; c--, o++) 2381 bus_space_write_stream_1(t, h, o, v); 2382 } 2383 2384 void 2385 bus_space_set_region_stream_2( 2386 bus_space_tag_t t, 2387 bus_space_handle_t h, 2388 bus_size_t o, 2389 const uint16_t v, 2390 bus_size_t c) 2391 { 2392 for (; c; c--, o+=2) 2393 bus_space_write_stream_2(t, h, o, v); 2394 } 2395 2396 void 2397 bus_space_set_region_stream_4( 2398 bus_space_tag_t t, 2399 bus_space_handle_t h, 2400 bus_size_t o, 2401 const uint32_t v, 2402 bus_size_t c) 2403 { 2404 for (; c; c--, o+=4) 2405 bus_space_write_stream_4(t, h, o, v); 2406 } 2407 2408 void 2409 bus_space_set_region_stream_8( 2410 bus_space_tag_t t, 2411 bus_space_handle_t h, 2412 bus_size_t o, 2413 const uint64_t v, 2414 bus_size_t c) 2415 { 2416 for (; c; c--, o+=8) 2417 bus_space_write_stream_8(t, h, o, v); 2418 } 2419 2420 /* 2421 * void bus_space_copy_region_stream_N(bus_space_tag_t tag, 2422 * bus_space_handle_t bsh1, bus_size_t off1, 2423 * bus_space_handle_t bsh2, bus_size_t off2, 2424 * bus_size_t count); 2425 * 2426 * Copy `count' 1, 2, 4, or 8 byte values from bus space starting 2427 * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2. 2428 */ 2429 2430 void 2431 bus_space_copy_region_stream_1( 2432 bus_space_tag_t t, 2433 bus_space_handle_t h1, 2434 bus_size_t o1, 2435 bus_space_handle_t h2, 2436 bus_size_t o2, 2437 bus_size_t c) 2438 { 2439 for (; c; c--, o1++, o2++) 2440 bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2)); 2441 } 2442 2443 void 2444 bus_space_copy_region_stream_2( 2445 bus_space_tag_t t, 2446 bus_space_handle_t h1, 2447 bus_size_t o1, 2448 bus_space_handle_t h2, 2449 bus_size_t o2, 2450 bus_size_t c) 2451 { 2452 for (; c; c--, o1+=2, o2+=2) 2453 bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2)); 2454 } 2455 2456 void 2457 bus_space_copy_region_stream_4( 2458 bus_space_tag_t t, 2459 bus_space_handle_t h1, 2460 bus_size_t o1, 2461 bus_space_handle_t h2, 2462 bus_size_t o2, 2463 bus_size_t c) 2464 { 2465 for (; c; c--, o1+=4, o2+=4) 2466 bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2)); 2467 } 2468 2469 void 2470 bus_space_copy_region_stream_8( 2471 bus_space_tag_t t, 2472 bus_space_handle_t h1, 2473 bus_size_t o1, 2474 bus_space_handle_t h2, 2475 bus_size_t o2, 2476 bus_size_t c) 2477 { 2478 for (; c; c--, o1+=8, o2+=8) 2479 bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2)); 2480 } 2481 2482 void 2483 bus_space_write_1( 2484 bus_space_tag_t t, 2485 bus_space_handle_t h, 2486 bus_size_t o, 2487 uint8_t v) 2488 { 2489 (*t->sparc_write_1)(t, h, o, v); 2490 } 2491 2492 void 2493 bus_space_write_2( 2494 bus_space_tag_t t, 2495 bus_space_handle_t h, 2496 bus_size_t o, 2497 uint16_t v) 2498 { 2499 (*t->sparc_write_2)(t, h, o, v); 2500 } 2501 2502 void 2503 bus_space_write_4( 2504 bus_space_tag_t t, 2505 bus_space_handle_t h, 2506 bus_size_t o, 2507 uint32_t v) 2508 { 2509 (*t->sparc_write_4)(t, h, o, v); 2510 } 2511 2512 void 2513 bus_space_write_8( 2514 bus_space_tag_t t, 2515 bus_space_handle_t h, 2516 bus_size_t o, 2517 uint64_t v) 2518 { 2519 (*t->sparc_write_8)(t, h, o, v); 2520 } 2521 2522 #if __SLIM_SPARC_BUS_SPACE 2523 2524 void 2525 bus_space_write_1( 2526 bus_space_tag_t t, 2527 bus_space_handle_t h, 2528 bus_size_t o, 2529 uint8_t v) 2530 { 2531 __insn_barrier(); 2532 bus_space_write_1_real(t, h, o, v); 2533 } 2534 2535 void 2536 bus_space_write_2( 2537 bus_space_tag_t t, 2538 bus_space_handle_t h, 2539 bus_size_t o, 2540 uint16_t v) 2541 { 2542 __insn_barrier(); 2543 bus_space_write_2_real(t, h, o, v); 2544 } 2545 2546 void 2547 bus_space_write_4( 2548 bus_space_tag_t t, 2549 bus_space_handle_t h, 2550 bus_size_t o, 2551 uint32_t v) 2552 { 2553 __insn_barrier(); 2554 bus_space_write_4_real(t, h, o, v); 2555 } 2556 2557 void 2558 bus_space_write_8( 2559 bus_space_tag_t t, 2560 bus_space_handle_t h, 2561 bus_size_t o, 2562 uint64_t v) 2563 { 2564 __insn_barrier(); 2565 bus_space_write_8_real(t, h, o, v); 2566 } 2567 2568 #endif /* __SLIM_SPARC_BUS_SPACE */ 2569 2570 uint8_t 2571 bus_space_read_1( 2572 bus_space_tag_t t, 2573 bus_space_handle_t h, 2574 bus_size_t o) 2575 { 2576 return (*t->sparc_read_1)(t, h, o); 2577 } 2578 2579 uint16_t 2580 bus_space_read_2( 2581 bus_space_tag_t t, 2582 bus_space_handle_t h, 2583 bus_size_t o) 2584 { 2585 return (*t->sparc_read_2)(t, h, o); 2586 } 2587 2588 uint32_t 2589 bus_space_read_4( 2590 bus_space_tag_t t, 2591 bus_space_handle_t h, 2592 bus_size_t o) 2593 { 2594 return (*t->sparc_read_4)(t, h, o); 2595 } 2596 2597 uint64_t 2598 bus_space_read_8( 2599 bus_space_tag_t t, 2600 bus_space_handle_t h, 2601 bus_size_t o) 2602 { 2603 return (*t->sparc_read_8)(t, h, o); 2604 } 2605 2606 #if __SLIM_SPARC_BUS_SPACE 2607 uint8_t 2608 bus_space_read_1( 2609 bus_space_tag_t t, 2610 bus_space_handle_t h, 2611 bus_size_t o) 2612 { 2613 __insn_barrier(); 2614 return bus_space_read_1_real(t, h, o); 2615 } 2616 2617 uint16_t 2618 bus_space_read_2( 2619 bus_space_tag_t t, 2620 bus_space_handle_t h, 2621 bus_size_t o) 2622 { 2623 __insn_barrier(); 2624 return bus_space_read_2_real(t, h, o); 2625 } 2626 2627 uint32_t 2628 bus_space_read_4( 2629 bus_space_tag_t t, 2630 bus_space_handle_t h, 2631 bus_size_t o) 2632 { 2633 __insn_barrier(); 2634 return bus_space_read_4_real(t, h, o); 2635 } 2636 2637 uint64_t 2638 bus_space_read_8( 2639 bus_space_tag_t t, 2640 bus_space_handle_t h, 2641 bus_size_t o) 2642 { 2643 __insn_barrier(); 2644 return bus_space_read_8_real(t, h, o); 2645 } 2646 2647 #endif /* __SLIM_SPARC_BUS_SPACE */ 2648 2649 void 2650 bus_space_read_multi_1( 2651 bus_space_tag_t t, 2652 bus_space_handle_t h, 2653 bus_size_t o, 2654 uint8_t *a, 2655 bus_size_t c) 2656 { 2657 while (c-- > 0) 2658 *a++ = bus_space_read_1(t, h, o); 2659 } 2660 2661 void 2662 bus_space_read_multi_2( 2663 bus_space_tag_t t, 2664 bus_space_handle_t h, 2665 bus_size_t o, 2666 uint16_t *a, 2667 bus_size_t c) 2668 { 2669 while (c-- > 0) 2670 *a++ = bus_space_read_2(t, h, o); 2671 } 2672 2673 void 2674 bus_space_read_multi_4( 2675 bus_space_tag_t t, 2676 bus_space_handle_t h, 2677 bus_size_t o, 2678 uint32_t *a, 2679 bus_size_t c) 2680 { 2681 while (c-- > 0) 2682 *a++ = bus_space_read_4(t, h, o); 2683 } 2684 2685 void 2686 bus_space_read_multi_8( 2687 bus_space_tag_t t, 2688 bus_space_handle_t h, 2689 bus_size_t o, 2690 uint64_t *a, 2691 bus_size_t c) 2692 { 2693 while (c-- > 0) 2694 *a++ = bus_space_read_8(t, h, o); 2695 } 2696 2697 /* 2698 * void bus_space_read_multi_N(bus_space_tag_t tag, 2699 * bus_space_handle_t bsh, bus_size_t offset, 2700 * u_intN_t *addr, bus_size_t count); 2701 * 2702 * Read `count' 1, 2, 4, or 8 byte quantities from bus space 2703 * described by tag/handle/offset and copy into buffer provided. 2704 */ 2705 void 2706 bus_space_read_multi_stream_2( 2707 bus_space_tag_t t, 2708 bus_space_handle_t h, 2709 bus_size_t o, 2710 uint16_t *a, 2711 bus_size_t c) 2712 { 2713 while (c-- > 0) 2714 *a++ = bus_space_read_2_real(t, h, o); 2715 } 2716 2717 void 2718 bus_space_read_multi_stream_4( 2719 bus_space_tag_t t, 2720 bus_space_handle_t h, 2721 bus_size_t o, 2722 uint32_t *a, 2723 bus_size_t c) 2724 { 2725 while (c-- > 0) 2726 *a++ = bus_space_read_4_real(t, h, o); 2727 } 2728 2729 void 2730 bus_space_read_multi_stream_8( 2731 bus_space_tag_t t, 2732 bus_space_handle_t h, 2733 bus_size_t o, 2734 uint64_t *a, 2735 bus_size_t c) 2736 { 2737 while (c-- > 0) 2738 *a++ = bus_space_read_8_real(t, h, o); 2739 } 2740 2741 /* 2742 * void bus_space_write_multi_N(bus_space_tag_t tag, 2743 * bus_space_handle_t bsh, bus_size_t offset, 2744 * const u_intN_t *addr, bus_size_t count); 2745 * 2746 * Write `count' 1, 2, 4, or 8 byte quantities from the buffer 2747 * provided to bus space described by tag/handle/offset. 2748 */ 2749 void 2750 bus_space_write_multi_1( 2751 bus_space_tag_t t, 2752 bus_space_handle_t h, 2753 bus_size_t o, 2754 const uint8_t *a, 2755 bus_size_t c) 2756 { 2757 while (c-- > 0) 2758 bus_space_write_1(t, h, o, *a++); 2759 } 2760 2761 void 2762 bus_space_write_multi_2( 2763 bus_space_tag_t t, 2764 bus_space_handle_t h, 2765 bus_size_t o, 2766 const uint16_t *a, 2767 bus_size_t c) 2768 { 2769 while (c-- > 0) 2770 bus_space_write_2(t, h, o, *a++); 2771 } 2772 2773 void 2774 bus_space_write_multi_4( 2775 bus_space_tag_t t, 2776 bus_space_handle_t h, 2777 bus_size_t o, 2778 const uint32_t *a, 2779 bus_size_t c) 2780 { 2781 while (c-- > 0) 2782 bus_space_write_4(t, h, o, *a++); 2783 } 2784 2785 void 2786 bus_space_write_multi_8( 2787 bus_space_tag_t t, 2788 bus_space_handle_t h, 2789 bus_size_t o, 2790 const uint64_t *a, 2791 bus_size_t c) 2792 { 2793 while (c-- > 0) 2794 bus_space_write_8(t, h, o, *a++); 2795 } 2796 2797 /* 2798 * Allocate a new bus tag and have it inherit the methods of the 2799 * given parent. 2800 */ 2801 bus_space_tag_t 2802 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie) 2803 { 2804 struct sparc_bus_space_tag *sbt; 2805 2806 sbt = kmem_zalloc(sizeof(*sbt), KM_SLEEP); 2807 2808 if (parent) { 2809 memcpy(sbt, parent, sizeof(*sbt)); 2810 sbt->parent = parent; 2811 sbt->ranges = NULL; 2812 sbt->nranges = 0; 2813 } 2814 2815 sbt->cookie = cookie; 2816 return (sbt); 2817 } 2818 2819 /* 2820 * Generic routine to translate an address using OpenPROM `ranges'. 2821 */ 2822 int 2823 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges, 2824 bus_addr_t *bap) 2825 { 2826 int i, space = BUS_ADDR_IOSPACE(*bap); 2827 2828 for (i = 0; i < nranges; i++) { 2829 struct openprom_range *rp = &ranges[i]; 2830 2831 if (rp->or_child_space != space) 2832 continue; 2833 2834 /* We've found the connection to the parent bus. */ 2835 *bap = BUS_ADDR(rp->or_parent_space, 2836 rp->or_parent_base + BUS_ADDR_PADDR(*bap)); 2837 return (0); 2838 } 2839 2840 return (EINVAL); 2841 } 2842 2843 static int 2844 sparc_bus_map_iodev(bus_space_tag_t t, bus_addr_t ba, bus_size_t size, int flags, 2845 vaddr_t va, bus_space_handle_t *hp) 2846 { 2847 vaddr_t v; 2848 paddr_t pa; 2849 unsigned int pmtype; 2850 bus_space_tag_t pt; 2851 static vaddr_t iobase; 2852 2853 /* 2854 * This base class bus map function knows about address range 2855 * translation so bus drivers that need no other special 2856 * handling can just keep this method in their tags. 2857 * 2858 * We expect to resolve range translations iteratively, but allow 2859 * for recursion just in case. 2860 */ 2861 while ((pt = t->parent) != NULL) { 2862 if (t->ranges != NULL) { 2863 int error; 2864 2865 if ((error = bus_space_translate_address_generic( 2866 t->ranges, t->nranges, &ba)) != 0) 2867 return (error); 2868 } 2869 if (pt->sparc_bus_map != sparc_bus_map) 2870 return (bus_space_map2(pt, ba, size, flags, va, hp)); 2871 t = pt; 2872 } 2873 2874 if (iobase == 0) 2875 iobase = IODEV_BASE; 2876 2877 size = round_page(size); 2878 if (size == 0) { 2879 printf("sparc_bus_map: zero size\n"); 2880 return (EINVAL); 2881 } 2882 2883 if (va) 2884 v = trunc_page(va); 2885 else { 2886 v = iobase; 2887 iobase += size; 2888 if (iobase > IODEV_END) /* unlikely */ 2889 panic("sparc_bus_map: iobase=0x%lx", iobase); 2890 } 2891 2892 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba)); 2893 pa = BUS_ADDR_PADDR(ba); 2894 2895 /* note: preserve page offset */ 2896 *hp = (bus_space_handle_t)(v | ((u_long)pa & PGOFSET)); 2897 2898 pa = trunc_page(pa); 2899 do { 2900 pmap_kenter_pa(v, pa | pmtype | PMAP_NC, 2901 VM_PROT_READ | VM_PROT_WRITE, 0); 2902 v += PAGE_SIZE; 2903 pa += PAGE_SIZE; 2904 } while ((size -= PAGE_SIZE) > 0); 2905 2906 pmap_update(pmap_kernel()); 2907 return (0); 2908 } 2909 2910 static int 2911 sparc_bus_map_large(bus_space_tag_t t, bus_addr_t ba, 2912 bus_size_t size, int flags, bus_space_handle_t *hp) 2913 { 2914 vaddr_t v = 0; 2915 2916 if (uvm_map(kernel_map, &v, size, NULL, 0, PAGE_SIZE, 2917 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_NORMAL, 2918 0)) == 0) { 2919 return sparc_bus_map_iodev(t, ba, size, flags, v, hp); 2920 } 2921 return -1; 2922 } 2923 2924 int 2925 sparc_bus_map(bus_space_tag_t t, bus_addr_t ba, 2926 bus_size_t size, int flags, vaddr_t va, 2927 bus_space_handle_t *hp) 2928 { 2929 2930 if (flags & BUS_SPACE_MAP_LARGE) { 2931 return sparc_bus_map_large(t, ba, size, flags, hp); 2932 } else 2933 return sparc_bus_map_iodev(t, ba, size, flags, va, hp); 2934 2935 } 2936 2937 int 2938 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size) 2939 { 2940 vaddr_t va = trunc_page((vaddr_t)bh); 2941 2942 /* 2943 * XXX 2944 * mappings with BUS_SPACE_MAP_LARGE need additional care here 2945 * we can just check if the VA is in the IODEV range 2946 */ 2947 2948 pmap_kremove(va, round_page(size)); 2949 pmap_update(pmap_kernel()); 2950 return (0); 2951 } 2952 2953 int 2954 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle, 2955 bus_size_t offset, bus_size_t size, 2956 bus_space_handle_t *nhandlep) 2957 { 2958 2959 *nhandlep = handle + offset; 2960 return (0); 2961 } 2962 2963 paddr_t 2964 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t ba, off_t off, 2965 int prot, int flags) 2966 { 2967 u_int pmtype; 2968 paddr_t pa; 2969 bus_space_tag_t pt; 2970 2971 /* 2972 * Base class bus mmap function; see also sparc_bus_map 2973 */ 2974 while ((pt = t->parent) != NULL) { 2975 if (t->ranges != NULL) { 2976 int error; 2977 2978 if ((error = bus_space_translate_address_generic( 2979 t->ranges, t->nranges, &ba)) != 0) 2980 return (-1); 2981 } 2982 if (pt->sparc_bus_mmap != sparc_bus_mmap) 2983 return (bus_space_mmap(pt, ba, off, prot, flags)); 2984 t = pt; 2985 } 2986 2987 pmtype = PMAP_IOENC(BUS_ADDR_IOSPACE(ba)); 2988 pa = trunc_page(BUS_ADDR_PADDR(ba) + off); 2989 2990 return (paddr_t)(pa | pmtype | PMAP_NC); 2991 } 2992 2993 /* 2994 * Establish a temporary bus mapping for device probing. 2995 */ 2996 int 2997 bus_space_probe(bus_space_tag_t tag, bus_addr_t paddr, bus_size_t size, 2998 size_t offset, int flags, 2999 int (*callback)(void *, void *), void *arg) 3000 { 3001 bus_space_handle_t bh; 3002 void *tmp; 3003 int result; 3004 3005 if (bus_space_map2(tag, paddr, size, flags, TMPMAP_VA, &bh) != 0) 3006 return (0); 3007 3008 tmp = (void *)bh; 3009 result = (probeget((char *)tmp + offset, size) != -1); 3010 if (result && callback != NULL) 3011 result = (*callback)(tmp, arg); 3012 bus_space_unmap(tag, bh, size); 3013 return (result); 3014 } 3015 3016 3017 void * 3018 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level, 3019 int (*handler)(void *), void *arg, 3020 void (*fastvec)(void)) 3021 { 3022 struct intrhand *ih; 3023 3024 ih = kmem_alloc(sizeof(struct intrhand), KM_SLEEP); 3025 ih->ih_fun = handler; 3026 ih->ih_arg = arg; 3027 intr_establish(pil, level, ih, fastvec, false); 3028 return (ih); 3029 } 3030 3031 void sparc_bus_barrier (bus_space_tag_t t, bus_space_handle_t h, 3032 bus_size_t offset, bus_size_t size, int flags) 3033 { 3034 3035 /* No default barrier action defined */ 3036 return; 3037 } 3038 3039 static uint8_t 3040 sparc_bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o) 3041 { 3042 3043 return bus_space_read_1_real(t, h, o); 3044 } 3045 3046 static uint16_t 3047 sparc_bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o) 3048 { 3049 3050 return bus_space_read_2_real(t, h, o); 3051 } 3052 3053 static uint32_t 3054 sparc_bus_space_read_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o) 3055 { 3056 3057 return bus_space_read_4_real(t, h, o); 3058 } 3059 3060 static uint64_t 3061 sparc_bus_space_read_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o) 3062 { 3063 3064 return bus_space_read_8_real(t, h, o); 3065 } 3066 3067 static void 3068 sparc_bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, 3069 uint8_t v) 3070 { 3071 3072 bus_space_write_1_real(t, h, o, v); 3073 } 3074 3075 static void 3076 sparc_bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, 3077 uint16_t v) 3078 { 3079 3080 bus_space_write_2_real(t, h, o, v); 3081 } 3082 3083 static void 3084 sparc_bus_space_write_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, 3085 uint32_t v) 3086 { 3087 3088 bus_space_write_4_real(t, h, o, v); 3089 } 3090 3091 static void 3092 sparc_bus_space_write_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, 3093 uint64_t v) 3094 { 3095 3096 bus_space_write_8_real(t, h, o, v); 3097 } 3098 3099 struct sparc_bus_space_tag mainbus_space_tag = { 3100 NULL, /* cookie */ 3101 NULL, /* parent bus tag */ 3102 NULL, /* ranges */ 3103 0, /* nranges */ 3104 sparc_bus_map, /* bus_space_map */ 3105 sparc_bus_unmap, /* bus_space_unmap */ 3106 sparc_bus_subregion, /* bus_space_subregion */ 3107 sparc_bus_barrier, /* bus_space_barrier */ 3108 sparc_bus_mmap, /* bus_space_mmap */ 3109 sparc_mainbus_intr_establish, /* bus_intr_establish */ 3110 3111 sparc_bus_space_read_1, /* bus_space_read_1 */ 3112 sparc_bus_space_read_2, /* bus_space_read_2 */ 3113 sparc_bus_space_read_4, /* bus_space_read_4 */ 3114 sparc_bus_space_read_8, /* bus_space_read_8 */ 3115 sparc_bus_space_write_1, /* bus_space_write_1 */ 3116 sparc_bus_space_write_2, /* bus_space_write_2 */ 3117 sparc_bus_space_write_4, /* bus_space_write_4 */ 3118 sparc_bus_space_write_8 /* bus_space_write_8 */ 3119 }; 3120 3121 int 3122 mm_md_physacc(paddr_t pa, vm_prot_t prot) 3123 { 3124 3125 return pmap_pa_exists(pa) ? 0 : EFAULT; 3126 } 3127 3128 int 3129 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled) 3130 { 3131 const vaddr_t v = (vaddr_t)ptr; 3132 3133 *handled = (v >= MSGBUF_VA && v < MSGBUF_VA + PAGE_SIZE) || 3134 (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0); 3135 return 0; 3136 } 3137 3138 int 3139 mm_md_readwrite(dev_t dev, struct uio *uio) 3140 { 3141 3142 switch (minor(dev)) { 3143 #if defined(SUN4) 3144 case DEV_EEPROM: 3145 if (cputyp == CPU_SUN4) 3146 return eeprom_uio(uio); 3147 #endif 3148 } 3149 return ENXIO; 3150 } 3151