Home | History | Annotate | Line # | Download | only in sparc64
machdep.c revision 1.278
      1 /*	$NetBSD: machdep.c,v 1.277 2014/05/13 19:39:40 palle Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This software was developed by the Computer Systems Engineering group
     38  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     39  * contributed to Berkeley.
     40  *
     41  * All advertising materials mentioning features or use of this software
     42  * must display the following acknowledgement:
     43  *	This product includes software developed by the University of
     44  *	California, Lawrence Berkeley Laboratory.
     45  *
     46  * Redistribution and use in source and binary forms, with or without
     47  * modification, are permitted provided that the following conditions
     48  * are met:
     49  * 1. Redistributions of source code must retain the above copyright
     50  *    notice, this list of conditions and the following disclaimer.
     51  * 2. Redistributions in binary form must reproduce the above copyright
     52  *    notice, this list of conditions and the following disclaimer in the
     53  *    documentation and/or other materials provided with the distribution.
     54  * 3. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  *	@(#)machdep.c	8.6 (Berkeley) 1/14/94
     71  */
     72 
     73 #include <sys/cdefs.h>
     74 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.277 2014/05/13 19:39:40 palle Exp $");
     75 
     76 #include "opt_ddb.h"
     77 #include "opt_multiprocessor.h"
     78 #include "opt_modular.h"
     79 #include "opt_compat_netbsd.h"
     80 #include "opt_compat_svr4.h"
     81 #include "opt_compat_sunos.h"
     82 
     83 #include <sys/param.h>
     84 #include <sys/extent.h>
     85 #include <sys/signal.h>
     86 #include <sys/signalvar.h>
     87 #include <sys/proc.h>
     88 #include <sys/buf.h>
     89 #include <sys/device.h>
     90 #include <sys/ras.h>
     91 #include <sys/reboot.h>
     92 #include <sys/systm.h>
     93 #include <sys/kernel.h>
     94 #include <sys/conf.h>
     95 #include <sys/file.h>
     96 #include <sys/malloc.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/mount.h>
     99 #include <sys/msgbuf.h>
    100 #include <sys/syscallargs.h>
    101 #include <sys/exec.h>
    102 #include <sys/ucontext.h>
    103 #include <sys/cpu.h>
    104 #include <sys/module.h>
    105 #include <sys/ksyms.h>
    106 
    107 #include <sys/exec_aout.h>
    108 
    109 #include <dev/mm.h>
    110 
    111 #include <uvm/uvm.h>
    112 
    113 #include <sys/sysctl.h>
    114 #ifndef	ELFSIZE
    115 #ifdef __arch64__
    116 #define	ELFSIZE	64
    117 #else
    118 #define	ELFSIZE	32
    119 #endif
    120 #endif
    121 #include <sys/exec_elf.h>
    122 
    123 #define _SPARC_BUS_DMA_PRIVATE
    124 #include <machine/autoconf.h>
    125 #include <sys/bus.h>
    126 #include <machine/frame.h>
    127 #include <machine/cpu.h>
    128 #include <machine/pcb.h>
    129 #include <machine/pmap.h>
    130 #include <machine/openfirm.h>
    131 #include <machine/sparc64.h>
    132 
    133 #include <sparc64/sparc64/cache.h>
    134 
    135 /* #include "fb.h" */
    136 #include "ksyms.h"
    137 
    138 int bus_space_debug = 0; /* This may be used by macros elsewhere. */
    139 #ifdef DEBUG
    140 #define DPRINTF(l, s)   do { if (bus_space_debug & l) printf s; } while (0)
    141 #else
    142 #define DPRINTF(l, s)
    143 #endif
    144 
    145 #if defined(COMPAT_16) || defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32) || defined(COMPAT_SUNOS)
    146 #ifdef DEBUG
    147 /* See <sparc64/sparc64/sigdebug.h> */
    148 int sigdebug = 0x0;
    149 int sigpid = 0;
    150 #endif
    151 #endif
    152 
    153 extern vaddr_t avail_end;
    154 #ifdef MODULAR
    155 vaddr_t module_start, module_end;
    156 static struct vm_map module_map_store;
    157 #endif
    158 
    159 /*
    160  * Maximum number of DMA segments we'll allow in dmamem_load()
    161  * routines.  Can be overridden in config files, etc.
    162  */
    163 #ifndef MAX_DMA_SEGS
    164 #define MAX_DMA_SEGS	20
    165 #endif
    166 
    167 void	dumpsys(void);
    168 void	stackdump(void);
    169 
    170 
    171 /*
    172  * Machine-dependent startup code
    173  */
    174 void
    175 cpu_startup(void)
    176 {
    177 #ifdef DEBUG
    178 	extern int pmapdebug;
    179 	int opmapdebug = pmapdebug;
    180 #endif
    181 	char pbuf[9];
    182 
    183 #ifdef DEBUG
    184 	pmapdebug = 0;
    185 #endif
    186 
    187 	/*
    188 	 * Good {morning,afternoon,evening,night}.
    189 	 */
    190 	printf("%s%s", copyright, version);
    191 	/*identifycpu();*/
    192 	format_bytes(pbuf, sizeof(pbuf), ctob((uint64_t)physmem));
    193 	printf("total memory = %s\n", pbuf);
    194 
    195 #ifdef DEBUG
    196 	pmapdebug = opmapdebug;
    197 #endif
    198 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
    199 	printf("avail memory = %s\n", pbuf);
    200 
    201 #if 0
    202 	pmap_redzone();
    203 #endif
    204 
    205 #ifdef MODULAR
    206 	uvm_map_setup(&module_map_store, module_start, module_end, 0);
    207 	module_map_store.pmap = pmap_kernel();
    208 	module_map = &module_map_store;
    209 #endif
    210 }
    211 
    212 /*
    213  * Set up registers on exec.
    214  */
    215 
    216 #ifdef __arch64__
    217 #define STACK_OFFSET	BIAS
    218 #undef CCFSZ
    219 #define CCFSZ	CC64FSZ
    220 #else
    221 #define STACK_OFFSET	0
    222 #endif
    223 
    224 /* ARGSUSED */
    225 void
    226 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
    227 {
    228 	struct trapframe64 *tf = l->l_md.md_tf;
    229 	struct fpstate64 *fs;
    230 	int64_t tstate;
    231 	int pstate = PSTATE_USER;
    232 #ifdef __arch64__
    233 	Elf_Ehdr *eh = pack->ep_hdr;
    234 #endif
    235 
    236 	/* Clear the P_32 flag. */
    237 	l->l_proc->p_flag &= ~PK_32;
    238 
    239 	/* Don't allow misaligned code by default */
    240 	l->l_proc->p_md.md_flags &= ~MDP_FIXALIGN;
    241 
    242 	/*
    243 	 * Set the registers to 0 except for:
    244 	 *	%o6: stack pointer, built in exec())
    245 	 *	%tstate: (retain icc and xcc and cwp bits)
    246 	 *	%g1: p->p_psstrp (used by crt0)
    247 	 *	%tpc,%tnpc: entry point of program
    248 	 */
    249 #ifdef __arch64__
    250 	/* Check what memory model is requested */
    251 	switch ((eh->e_flags & EF_SPARCV9_MM)) {
    252 	default:
    253 		printf("Unknown memory model %d\n",
    254 		       (eh->e_flags & EF_SPARCV9_MM));
    255 		/* FALLTHROUGH */
    256 	case EF_SPARCV9_TSO:
    257 		pstate = PSTATE_MM_TSO|PSTATE_IE;
    258 		break;
    259 	case EF_SPARCV9_PSO:
    260 		pstate = PSTATE_MM_PSO|PSTATE_IE;
    261 		break;
    262 	case EF_SPARCV9_RMO:
    263 		pstate = PSTATE_MM_RMO|PSTATE_IE;
    264 		break;
    265 	}
    266 #endif
    267 	tstate = ((int64_t)ASI_PRIMARY_NO_FAULT << TSTATE_ASI_SHIFT) |
    268 	    (pstate << TSTATE_PSTATE_SHIFT) | (tf->tf_tstate & TSTATE_CWP);
    269 	if ((fs = l->l_md.md_fpstate) != NULL) {
    270 		/*
    271 		 * We hold an FPU state.  If we own *the* FPU chip state
    272 		 * we must get rid of it, and the only way to do that is
    273 		 * to save it.  In any case, get rid of our FPU state.
    274 		 */
    275 		fpusave_lwp(l, false);
    276 		pool_cache_put(fpstate_cache, fs);
    277 		l->l_md.md_fpstate = NULL;
    278 	}
    279 	memset(tf, 0, sizeof *tf);
    280 	tf->tf_tstate = tstate;
    281 	tf->tf_global[1] = l->l_proc->p_psstrp;
    282 	/* %g4 needs to point to the start of the data segment */
    283 	tf->tf_global[4] = 0;
    284 	tf->tf_pc = pack->ep_entry & ~3;
    285 	tf->tf_npc = tf->tf_pc + 4;
    286 	stack -= sizeof(struct rwindow);
    287 	tf->tf_out[6] = stack - STACK_OFFSET;
    288 	tf->tf_out[7] = 0UL;
    289 #ifdef NOTDEF_DEBUG
    290 	printf("setregs: setting tf %p sp %p pc %p\n", (long)tf,
    291 	       (long)tf->tf_out[6], (long)tf->tf_pc);
    292 #ifdef DDB
    293 	Debugger();
    294 #endif
    295 #endif
    296 }
    297 
    298 static char *parse_bootfile(char *);
    299 static char *parse_bootargs(char *);
    300 
    301 static char *
    302 parse_bootfile(char *args)
    303 {
    304 	char *cp;
    305 
    306 	/*
    307 	 * bootargs is of the form: [kernelname] [args...]
    308 	 * It can be the empty string if we booted from the default
    309 	 * kernel name.
    310 	 */
    311 	cp = args;
    312 	for (cp = args; *cp != 0 && *cp != ' ' && *cp != '\t'; cp++) {
    313 		if (*cp == '-') {
    314 			int c;
    315 			/*
    316 			 * If this `-' is most likely the start of boot
    317 			 * options, we're done.
    318 			 */
    319 			if (cp == args)
    320 				break;
    321 			if ((c = *(cp-1)) == ' ' || c == '\t')
    322 				break;
    323 		}
    324 	}
    325 	/* Now we've separated out the kernel name from the args */
    326 	*cp = '\0';
    327 	return (args);
    328 }
    329 
    330 static char *
    331 parse_bootargs(char *args)
    332 {
    333 	char *cp;
    334 
    335 	for (cp = args; *cp != '\0'; cp++) {
    336 		if (*cp == '-') {
    337 			int c;
    338 			/*
    339 			 * Looks like options start here, but check this
    340 			 * `-' is not part of the kernel name.
    341 			 */
    342 			if (cp == args)
    343 				break;
    344 			if ((c = *(cp-1)) == ' ' || c == '\t')
    345 				break;
    346 		}
    347 	}
    348 	return (cp);
    349 }
    350 
    351 /*
    352  * machine dependent system variables.
    353  */
    354 static int
    355 sysctl_machdep_boot(SYSCTLFN_ARGS)
    356 {
    357 	struct sysctlnode node = *rnode;
    358 	u_int chosen;
    359 	char bootargs[256];
    360 	const char *cp;
    361 
    362 	if ((chosen = OF_finddevice("/chosen")) == -1)
    363 		return (ENOENT);
    364 	if (node.sysctl_num == CPU_BOOTED_DEVICE)
    365 		cp = "bootpath";
    366 	else
    367 		cp = "bootargs";
    368 	if (OF_getprop(chosen, cp, bootargs, sizeof bootargs) < 0)
    369 		return (ENOENT);
    370 
    371 	switch (node.sysctl_num) {
    372 	case CPU_BOOTED_KERNEL:
    373 		cp = parse_bootfile(bootargs);
    374                 if (cp != NULL && cp[0] == '\0')
    375                         /* Unknown to firmware, return default name */
    376                         cp = "netbsd";
    377 		break;
    378 	case CPU_BOOT_ARGS:
    379 		cp = parse_bootargs(bootargs);
    380 		break;
    381 	case CPU_BOOTED_DEVICE:
    382 		cp = bootargs;
    383 		break;
    384 	}
    385 
    386 	if (cp == NULL || cp[0] == '\0')
    387 		return (ENOENT);
    388 
    389 	/*XXXUNCONST*/
    390 	node.sysctl_data = __UNCONST(cp);
    391 	node.sysctl_size = strlen(cp) + 1;
    392 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    393 }
    394 
    395 /*
    396  * figure out which VIS version the CPU supports
    397  * this assumes all CPUs in the system are the same
    398  */
    399 static int
    400 get_vis(void)
    401 {
    402 	int vis = 0;
    403 
    404 	if ( CPU_ISSUN4V ) {
    405 		/*
    406 		 * UA2005 and UA2007 supports VIS 1 and VIS 2.
    407 		 * Oracle SPARC Architecture 2011 supports VIS 3.
    408 		 *
    409 		 * XXX Settle with VIS 2 until we can determite the
    410 		 *     actual sun4v implementation.
    411 		 */
    412 		vis = 2;
    413 	} else {
    414 		if (GETVER_CPU_MANUF() == MANUF_FUJITSU) {
    415 			/* as far as I can tell SPARC64-III and up have VIS 1.0 */
    416 			if (GETVER_CPU_IMPL() >= IMPL_SPARC64_III) {
    417 				vis = 1;
    418 			}
    419 			/* XXX - which, if any, SPARC64 support VIS 2.0? */
    420 		} else {
    421 			/* this better be Sun */
    422 			vis = 1;	/* all UltraSPARCs support at least VIS 1.0 */
    423 			if (CPU_IS_USIII_UP()) {
    424 				vis = 2;
    425 			}
    426 		}
    427 	}
    428 	return vis;
    429 }
    430 
    431 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    432 {
    433 
    434 	sysctl_createv(clog, 0, NULL, NULL,
    435 		       CTLFLAG_PERMANENT,
    436 		       CTLTYPE_NODE, "machdep", NULL,
    437 		       NULL, 0, NULL, 0,
    438 		       CTL_MACHDEP, CTL_EOL);
    439 
    440 	sysctl_createv(clog, 0, NULL, NULL,
    441 		       CTLFLAG_PERMANENT,
    442 		       CTLTYPE_STRING, "booted_kernel", NULL,
    443 		       sysctl_machdep_boot, 0, NULL, 0,
    444 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
    445 	sysctl_createv(clog, 0, NULL, NULL,
    446 		       CTLFLAG_PERMANENT,
    447 		       CTLTYPE_STRING, "boot_args", NULL,
    448 		       sysctl_machdep_boot, 0, NULL, 0,
    449 		       CTL_MACHDEP, CPU_BOOT_ARGS, CTL_EOL);
    450 	sysctl_createv(clog, 0, NULL, NULL,
    451 		       CTLFLAG_PERMANENT,
    452 		       CTLTYPE_STRING, "booted_device", NULL,
    453 		       sysctl_machdep_boot, 0, NULL, 0,
    454 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
    455 	sysctl_createv(clog, 0, NULL, NULL,
    456 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    457 		       CTLTYPE_INT, "cpu_arch", NULL,
    458 		       NULL, 9, NULL, 0,
    459 		       CTL_MACHDEP, CPU_ARCH, CTL_EOL);
    460 	sysctl_createv(clog, 0, NULL, NULL,
    461 	               CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    462 	               CTLTYPE_INT, "vis",
    463 	               SYSCTL_DESCR("supported version of VIS instruction set"),
    464 	               NULL, get_vis(), NULL, 0,
    465 	               CTL_MACHDEP, CPU_VIS, CTL_EOL);
    466 }
    467 
    468 void *
    469 getframe(struct lwp *l, int sig, int *onstack)
    470 {
    471 	struct proc *p = l->l_proc;
    472 	struct trapframe64 *tf = l->l_md.md_tf;
    473 
    474 	/*
    475 	 * Compute new user stack addresses, subtract off
    476 	 * one signal frame, and align.
    477 	 */
    478 	*onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
    479 	    && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
    480 
    481 	if (*onstack)
    482 		return ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
    483 	else
    484 		return (void *)((uintptr_t)tf->tf_out[6] + STACK_OFFSET);
    485 }
    486 
    487 struct sigframe_siginfo {
    488 	siginfo_t	sf_si;		/* saved siginfo */
    489 	ucontext_t	sf_uc;		/* saved ucontext */
    490 };
    491 
    492 void
    493 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
    494 {
    495 	struct lwp *l = curlwp;
    496 	struct proc *p = l->l_proc;
    497 	struct sigacts *ps = p->p_sigacts;
    498 	int onstack, error;
    499 	int sig = ksi->ksi_signo;
    500 	ucontext_t uc;
    501 	long ucsz;
    502 	struct sigframe_siginfo *fp = getframe(l, sig, &onstack);
    503 	sig_t catcher = SIGACTION(p, sig).sa_handler;
    504 	struct trapframe64 *tf = l->l_md.md_tf;
    505 	struct rwindow *newsp;
    506 	/* Allocate an aligned sigframe */
    507 	fp = (void *)((u_long)(fp - 1) & ~0x0f);
    508 
    509 	uc.uc_flags = _UC_SIGMASK |
    510 	    ((l->l_sigstk.ss_flags & SS_ONSTACK)
    511 		? _UC_SETSTACK : _UC_CLRSTACK);
    512 	uc.uc_sigmask = *mask;
    513 	uc.uc_link = l->l_ctxlink;
    514 	memset(&uc.uc_stack, 0, sizeof(uc.uc_stack));
    515 
    516 	sendsig_reset(l, sig);
    517 	mutex_exit(p->p_lock);
    518 	cpu_getmcontext(l, &uc.uc_mcontext, &uc.uc_flags);
    519 	ucsz = (char *)&uc.__uc_pad - (char *)&uc;
    520 
    521 	/*
    522 	 * Now copy the stack contents out to user space.
    523 	 * We need to make sure that when we start the signal handler,
    524 	 * its %i6 (%fp), which is loaded from the newly allocated stack area,
    525 	 * joins seamlessly with the frame it was in when the signal occurred,
    526 	 * so that the debugger and _longjmp code can back up through it.
    527 	 * Since we're calling the handler directly, allocate a full size
    528 	 * C stack frame.
    529 	 */
    530 	newsp = (struct rwindow *)((u_long)fp - CCFSZ);
    531 	error = (copyout(&ksi->ksi_info, &fp->sf_si, sizeof(ksi->ksi_info)) != 0 ||
    532 	    copyout(&uc, &fp->sf_uc, ucsz) != 0 ||
    533 	    suword(&newsp->rw_in[6], (uintptr_t)tf->tf_out[6]) != 0);
    534 	mutex_enter(p->p_lock);
    535 
    536 	if (error) {
    537 		/*
    538 		 * Process has trashed its stack; give it an illegal
    539 		 * instruction to halt it in its tracks.
    540 		 */
    541 		sigexit(l, SIGILL);
    542 		/* NOTREACHED */
    543 	}
    544 
    545 	tf->tf_pc = (const vaddr_t)catcher;
    546 	tf->tf_npc = (const vaddr_t)catcher + 4;
    547 	tf->tf_out[0] = sig;
    548 	tf->tf_out[1] = (vaddr_t)&fp->sf_si;
    549 	tf->tf_out[2] = (vaddr_t)&fp->sf_uc;
    550 	tf->tf_out[6] = (vaddr_t)newsp - STACK_OFFSET;
    551 	tf->tf_out[7] = (vaddr_t)ps->sa_sigdesc[sig].sd_tramp - 8;
    552 
    553 	/* Remember that we're now on the signal stack. */
    554 	if (onstack)
    555 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    556 }
    557 
    558 struct pcb dumppcb;
    559 
    560 static void
    561 maybe_dump(int howto)
    562 {
    563 	int s;
    564 
    565 	/* Disable interrupts. */
    566 	s = splhigh();
    567 
    568 	/* Do a dump if requested. */
    569 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
    570 		dumpsys();
    571 
    572 	splx(s);
    573 }
    574 
    575 void
    576 cpu_reboot(int howto, char *user_boot_string)
    577 {
    578 	static bool syncdone = false;
    579 	int i;
    580 	static char str[128];
    581 	struct lwp *l;
    582 
    583 	l = (curlwp == NULL) ? &lwp0 : curlwp;
    584 
    585 	if (cold) {
    586 		howto |= RB_HALT;
    587 		goto haltsys;
    588 	}
    589 
    590 #if NFB > 0
    591 	fb_unblank();
    592 #endif
    593 	boothowto = howto;
    594 
    595 	/* If rebooting and a dump is requested, do it.
    596 	 *
    597 	 * XXX used to dump after vfs_shutdown() and before
    598 	 * detaching devices / shutdown hooks / pmf_system_shutdown().
    599 	 */
    600 	maybe_dump(howto);
    601 
    602 	if ((howto & RB_NOSYNC) == 0 && !syncdone) {
    603 		if (!syncdone) {
    604 		syncdone = true;
    605 		vfs_shutdown();
    606 			/* XXX used to force unmount as well, here */
    607 			vfs_sync_all(l);
    608 			/*
    609 			 * If we've been adjusting the clock, the todr
    610 			 * will be out of synch; adjust it now.
    611 			 *
    612 			 * resettodr will only do this only if inittodr()
    613 			 * has already been called.
    614 			 *
    615 			 * XXX used to do this after unmounting all
    616 			 * filesystems with vfs_shutdown().
    617 			 */
    618 			resettodr();
    619 		}
    620 
    621 		while (vfs_unmountall1(l, false, false) ||
    622 		       config_detach_all(boothowto) ||
    623 		       vfs_unmount_forceone(l))
    624 			;	/* do nothing */
    625 	} else
    626 		suspendsched();
    627 
    628 	pmf_system_shutdown(boothowto);
    629 
    630 	splhigh();
    631 
    632 haltsys:
    633 
    634 #ifdef MULTIPROCESSOR
    635 	/* Stop all secondary cpus */
    636 	mp_halt_cpus();
    637 #endif
    638 
    639 	/* If powerdown was requested, do it. */
    640 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
    641 #ifdef MULTIPROCESSOR
    642 		printf("cpu%d: powered down\n\n", cpu_number());
    643 #else
    644 		printf("powered down\n\n");
    645 #endif
    646 		/* Let the OBP do the work. */
    647 		OF_poweroff();
    648 		printf("WARNING: powerdown failed!\n");
    649 		/*
    650 		 * RB_POWERDOWN implies RB_HALT... fall into it...
    651 		 */
    652 	}
    653 
    654 	if (howto & RB_HALT) {
    655 #ifdef MULTIPROCESSOR
    656 		printf("cpu%d: halted\n\n", cpu_number());
    657 #else
    658 		printf("halted\n\n");
    659 #endif
    660 		OF_exit();
    661 		panic("PROM exit failed");
    662 	}
    663 
    664 #ifdef MULTIPROCESSOR
    665 	printf("cpu%d: rebooting\n\n", cpu_number());
    666 #else
    667 	printf("rebooting\n\n");
    668 #endif
    669 	if (user_boot_string && *user_boot_string) {
    670 		i = strlen(user_boot_string);
    671 		if (i > sizeof(str))
    672 			OF_boot(user_boot_string);	/* XXX */
    673 		memcpy(str, user_boot_string, i);
    674 	} else {
    675 		i = 1;
    676 		str[0] = '\0';
    677 	}
    678 
    679 	if (howto & RB_SINGLE)
    680 		str[i++] = 's';
    681 	if (howto & RB_KDB)
    682 		str[i++] = 'd';
    683 	if (i > 1) {
    684 		if (str[0] == '\0')
    685 			str[0] = '-';
    686 		str[i] = 0;
    687 	} else
    688 		str[0] = 0;
    689 	OF_boot(str);
    690 	panic("cpu_reboot -- failed");
    691 	/*NOTREACHED*/
    692 }
    693 
    694 uint32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
    695 int	dumpsize = 0;		/* also for savecore */
    696 long	dumplo = 0;
    697 
    698 void
    699 cpu_dumpconf(void)
    700 {
    701 	int nblks, dumpblks;
    702 
    703 	if (dumpdev == NODEV)
    704 		/* No usable dump device */
    705 		return;
    706 	nblks = bdev_size(dumpdev);
    707 
    708 	dumpblks = ctod(physmem) + pmap_dumpsize();
    709 	if (dumpblks > (nblks - ctod(1)))
    710 		/*
    711 		 * dump size is too big for the partition.
    712 		 * Note, we safeguard a click at the front for a
    713 		 * possible disk label.
    714 		 */
    715 		return;
    716 
    717 	/* Put the dump at the end of the partition */
    718 	dumplo = nblks - dumpblks;
    719 
    720 	/*
    721 	 * savecore(8) expects dumpsize to be the number of pages
    722 	 * of actual core dumped (i.e. excluding the MMU stuff).
    723 	 */
    724 	dumpsize = physmem;
    725 }
    726 
    727 #define	BYTES_PER_DUMP	MAXPHYS		/* must be a multiple of pagesize */
    728 static vaddr_t dumpspace;
    729 
    730 void *
    731 reserve_dumppages(void *p)
    732 {
    733 
    734 	dumpspace = (vaddr_t)p;
    735 	return (char *)p + BYTES_PER_DUMP;
    736 }
    737 
    738 /*
    739  * Write a crash dump.
    740  */
    741 void
    742 dumpsys(void)
    743 {
    744 	const struct bdevsw *bdev;
    745 	int psize;
    746 	daddr_t blkno;
    747 	int (*dump)(dev_t, daddr_t, void *, size_t);
    748 	int j, error = 0;
    749 	uint64_t todo;
    750 	struct mem_region *mp;
    751 
    752 	/* copy registers to dumppcb and flush windows */
    753 	memset(&dumppcb, 0, sizeof(struct pcb));
    754 	snapshot(&dumppcb);
    755 	stackdump();
    756 
    757 	if (dumpdev == NODEV)
    758 		return;
    759 	bdev = bdevsw_lookup(dumpdev);
    760 	if (bdev == NULL || bdev->d_psize == NULL)
    761 		return;
    762 
    763 	/*
    764 	 * For dumps during autoconfiguration,
    765 	 * if dump device has already configured...
    766 	 */
    767 	if (dumpsize == 0)
    768 		cpu_dumpconf();
    769 	if (!dumpspace) {
    770 		printf("\nno address space available, dump not possible\n");
    771 		return;
    772 	}
    773 	if (dumplo <= 0) {
    774 		printf("\ndump to dev %" PRId32 ",%" PRId32 " not possible ("
    775 		    "partition too small?)\n", major(dumpdev), minor(dumpdev));
    776 		return;
    777 	}
    778 	printf("\ndumping to dev %" PRId32 ",%" PRId32 " offset %ld\n",
    779 	    major(dumpdev), minor(dumpdev), dumplo);
    780 
    781 	psize = bdev_size(dumpdev);
    782 	if (psize == -1) {
    783 		printf("dump area unavailable\n");
    784 		return;
    785 	}
    786 	blkno = dumplo;
    787 	dump = bdev->d_dump;
    788 
    789 	error = pmap_dumpmmu(dump, blkno);
    790 	blkno += pmap_dumpsize();
    791 
    792 	/* calculate total size of dump */
    793 	for (todo = 0, j = 0; j < phys_installed_size; j++)
    794 		todo += phys_installed[j].size;
    795 
    796 	for (mp = &phys_installed[0], j = 0; j < phys_installed_size;
    797 			j++, mp = &phys_installed[j]) {
    798 		uint64_t i = 0, n, off;
    799 		paddr_t maddr = mp->start;
    800 
    801 		for (; i < mp->size; i += n) {
    802 			n = mp->size - i;
    803 			if (n > BYTES_PER_DUMP)
    804 				 n = BYTES_PER_DUMP;
    805 
    806 			/* print out how many MBs we still have to dump */
    807 			if ((todo % (1024*1024)) == 0)
    808 				printf_nolog("\r%6" PRIu64 " M ",
    809 				    todo / (1024*1024));
    810 			for (off = 0; off < n; off += PAGE_SIZE)
    811 				pmap_kenter_pa(dumpspace+off, maddr+off,
    812 				    VM_PROT_READ, 0);
    813 			error = (*dump)(dumpdev, blkno,
    814 					(void *)dumpspace, (size_t)n);
    815 			pmap_kremove(dumpspace, n);
    816 			if (error)
    817 				break;
    818 			maddr += n;
    819 			todo -= n;
    820 			blkno += btodb(n);
    821 		}
    822 	}
    823 
    824 	switch (error) {
    825 
    826 	case ENXIO:
    827 		printf("- device bad\n");
    828 		break;
    829 
    830 	case EFAULT:
    831 		printf("- device not ready\n");
    832 		break;
    833 
    834 	case EINVAL:
    835 		printf("- area improper\n");
    836 		break;
    837 
    838 	case EIO:
    839 		printf("- i/o error\n");
    840 		break;
    841 
    842 	case 0:
    843 		printf("\rdump succeeded\n");
    844 		break;
    845 
    846 	default:
    847 		printf("- error %d\n", error);
    848 		break;
    849 	}
    850 }
    851 
    852 void trapdump(struct trapframe64*);
    853 /*
    854  * dump out a trapframe.
    855  */
    856 void
    857 trapdump(struct trapframe64* tf)
    858 {
    859 	printf("TRAPFRAME: tstate=%llx pc=%llx npc=%llx y=%x\n",
    860 	       (unsigned long long)tf->tf_tstate, (unsigned long long)tf->tf_pc,
    861 	       (unsigned long long)tf->tf_npc, (unsigned)tf->tf_y);
    862 	printf("%%g1-7: %llx %llx %llx %llx %llx %llx %llx\n",
    863 	       (unsigned long long)tf->tf_global[1],
    864 	       (unsigned long long)tf->tf_global[2],
    865 	       (unsigned long long)tf->tf_global[3],
    866 	       (unsigned long long)tf->tf_global[4],
    867 	       (unsigned long long)tf->tf_global[5],
    868 	       (unsigned long long)tf->tf_global[6],
    869 	       (unsigned long long)tf->tf_global[7]);
    870 	printf("%%o0-7: %llx %llx %llx %llx\n %llx %llx %llx %llx\n",
    871 	       (unsigned long long)tf->tf_out[0],
    872 	       (unsigned long long)tf->tf_out[1],
    873 	       (unsigned long long)tf->tf_out[2],
    874 	       (unsigned long long)tf->tf_out[3],
    875 	       (unsigned long long)tf->tf_out[4],
    876 	       (unsigned long long)tf->tf_out[5],
    877 	       (unsigned long long)tf->tf_out[6],
    878 	       (unsigned long long)tf->tf_out[7]);
    879 }
    880 
    881 static void
    882 get_symbol_and_offset(const char **mod, const char **sym, vaddr_t *offset, vaddr_t pc)
    883 {
    884 	static char symbuf[256];
    885 	unsigned long symaddr;
    886 
    887 #if NKSYMS || defined(DDB) || defined(MODULAR)
    888 	if (ksyms_getname(mod, sym, pc,
    889 			  KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY) == 0) {
    890 		if (ksyms_getval(*mod, *sym, &symaddr,
    891 				 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY) != 0)
    892 			goto failed;
    893 
    894 		*offset = (vaddr_t)(pc - symaddr);
    895 		return;
    896 	}
    897 #endif
    898  failed:
    899 	snprintf(symbuf, sizeof symbuf, "%llx", (unsigned long long)pc);
    900 	*mod = "netbsd";
    901 	*sym = symbuf;
    902 	*offset = 0;
    903 }
    904 
    905 /*
    906  * get the fp and dump the stack as best we can.  don't leave the
    907  * current stack page
    908  */
    909 void
    910 stackdump(void)
    911 {
    912 	struct frame32 *fp = (struct frame32 *)getfp(), *sfp;
    913 	struct frame64 *fp64;
    914 	const char *mod, *sym;
    915 	vaddr_t offset;
    916 
    917 	sfp = fp;
    918 	printf("Frame pointer is at %p\n", fp);
    919 	printf("Call traceback:\n");
    920 	while (fp && ((u_long)fp >> PGSHIFT) == ((u_long)sfp >> PGSHIFT)) {
    921 		if( ((long)fp) & 1 ) {
    922 			fp64 = (struct frame64*)(((char*)fp)+BIAS);
    923 			/* 64-bit frame */
    924 			get_symbol_and_offset(&mod, &sym, &offset, fp64->fr_pc);
    925 			printf(" %s:%s+%#llx(%llx, %llx, %llx, %llx, %llx, %llx) fp = %llx\n",
    926 			       mod, sym,
    927 			       (unsigned long long)offset,
    928 			       (unsigned long long)fp64->fr_arg[0],
    929 			       (unsigned long long)fp64->fr_arg[1],
    930 			       (unsigned long long)fp64->fr_arg[2],
    931 			       (unsigned long long)fp64->fr_arg[3],
    932 			       (unsigned long long)fp64->fr_arg[4],
    933 			       (unsigned long long)fp64->fr_arg[5],
    934 			       (unsigned long long)fp64->fr_fp);
    935 			fp = (struct frame32 *)(u_long)fp64->fr_fp;
    936 		} else {
    937 			/* 32-bit frame */
    938 			get_symbol_and_offset(&mod, &sym, &offset, fp->fr_pc);
    939 			printf(" %s:%s+%#lx(%x, %x, %x, %x, %x, %x) fp = %x\n",
    940 			       mod, sym,
    941 			       (unsigned long)offset,
    942 			       fp->fr_arg[0],
    943 			       fp->fr_arg[1],
    944 			       fp->fr_arg[2],
    945 			       fp->fr_arg[3],
    946 			       fp->fr_arg[4],
    947 			       fp->fr_arg[5],
    948 			       fp->fr_fp);
    949 			fp = (struct frame32*)(u_long)fp->fr_fp;
    950 		}
    951 	}
    952 }
    953 
    954 
    955 int
    956 cpu_exec_aout_makecmds(struct lwp *l, struct exec_package *epp)
    957 {
    958 	return (ENOEXEC);
    959 }
    960 
    961 /*
    962  * Common function for DMA map creation.  May be called by bus-specific
    963  * DMA map creation functions.
    964  */
    965 int
    966 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
    967 	bus_size_t maxsegsz, bus_size_t boundary, int flags,
    968 	bus_dmamap_t *dmamp)
    969 {
    970 	struct sparc_bus_dmamap *map;
    971 	void *mapstore;
    972 	size_t mapsize;
    973 
    974 	/*
    975 	 * Allocate and initialize the DMA map.  The end of the map
    976 	 * is a variable-sized array of segments, so we allocate enough
    977 	 * room for them in one shot.
    978 	 *
    979 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
    980 	 * of ALLOCNOW notifies others that we've reserved these resources,
    981 	 * and they are not to be freed.
    982 	 *
    983 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
    984 	 * the (nsegments - 1).
    985 	 */
    986 	mapsize = sizeof(struct sparc_bus_dmamap) +
    987 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
    988 	if ((mapstore = malloc(mapsize, M_DMAMAP,
    989 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
    990 		return (ENOMEM);
    991 
    992 	memset(mapstore, 0, mapsize);
    993 	map = (struct sparc_bus_dmamap *)mapstore;
    994 	map->_dm_size = size;
    995 	map->_dm_segcnt = nsegments;
    996 	map->_dm_maxmaxsegsz = maxsegsz;
    997 	map->_dm_boundary = boundary;
    998 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT|BUS_DMA_COHERENT|
    999 				   BUS_DMA_NOWRITE|BUS_DMA_NOCACHE);
   1000 	map->dm_maxsegsz = maxsegsz;
   1001 	map->dm_mapsize = 0;		/* no valid mappings */
   1002 	map->dm_nsegs = 0;
   1003 
   1004 	*dmamp = map;
   1005 	return (0);
   1006 }
   1007 
   1008 /*
   1009  * Common function for DMA map destruction.  May be called by bus-specific
   1010  * DMA map destruction functions.
   1011  */
   1012 void
   1013 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
   1014 {
   1015 	if (map->dm_nsegs)
   1016 		bus_dmamap_unload(t, map);
   1017 	free(map, M_DMAMAP);
   1018 }
   1019 
   1020 /*
   1021  * Common function for loading a DMA map with a linear buffer.  May
   1022  * be called by bus-specific DMA map load functions.
   1023  *
   1024  * Most SPARCs have IOMMUs in the bus controllers.  In those cases
   1025  * they only need one segment and will use virtual addresses for DVMA.
   1026  * Those bus controllers should intercept these vectors and should
   1027  * *NEVER* call _bus_dmamap_load() which is used only by devices that
   1028  * bypass DVMA.
   1029  */
   1030 int
   1031 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *sbuf,
   1032 	bus_size_t buflen, struct proc *p, int flags)
   1033 {
   1034 	bus_size_t sgsize;
   1035 	vaddr_t vaddr = (vaddr_t)sbuf;
   1036 	long incr;
   1037 	int i;
   1038 
   1039 	/*
   1040 	 * Make sure that on error condition we return "no valid mappings".
   1041 	 */
   1042 	map->dm_nsegs = 0;
   1043 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
   1044 
   1045 	if (buflen > map->_dm_size)
   1046 	{
   1047 #ifdef DEBUG
   1048 		printf("_bus_dmamap_load(): error %lu > %lu -- map size exceeded!\n",
   1049 		    (unsigned long)buflen, (unsigned long)map->_dm_size);
   1050 #ifdef DDB
   1051 		Debugger();
   1052 #endif
   1053 #endif
   1054 		return (EINVAL);
   1055 	}
   1056 
   1057 	sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
   1058 
   1059 	/*
   1060 	 * We always use just one segment.
   1061 	 */
   1062 	i = 0;
   1063 	map->dm_segs[i].ds_addr = 0UL;
   1064 	map->dm_segs[i].ds_len = 0;
   1065 
   1066 	incr = PAGE_SIZE - (vaddr & PGOFSET);
   1067 	while (sgsize > 0) {
   1068 		paddr_t pa;
   1069 
   1070 		incr = min(sgsize, incr);
   1071 
   1072 		(void) pmap_extract(pmap_kernel(), vaddr, &pa);
   1073 		if (map->dm_segs[i].ds_len == 0)
   1074 			map->dm_segs[i].ds_addr = pa;
   1075 		if (pa == (map->dm_segs[i].ds_addr + map->dm_segs[i].ds_len)
   1076 		    && ((map->dm_segs[i].ds_len + incr) <= map->dm_maxsegsz)) {
   1077 			/* Hey, waddyaknow, they're contiguous */
   1078 			map->dm_segs[i].ds_len += incr;
   1079 		} else {
   1080 			if (++i >= map->_dm_segcnt)
   1081 				return (EFBIG);
   1082 			map->dm_segs[i].ds_addr = pa;
   1083 			map->dm_segs[i].ds_len = incr;
   1084 		}
   1085 		sgsize -= incr;
   1086 		vaddr += incr;
   1087 		incr = PAGE_SIZE;
   1088 	}
   1089 	map->dm_nsegs = i + 1;
   1090 	map->dm_mapsize = buflen;
   1091 	/* Mapping is bus dependent */
   1092 	return (0);
   1093 }
   1094 
   1095 /*
   1096  * Like _bus_dmamap_load(), but for mbufs.
   1097  */
   1098 int
   1099 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
   1100 	int flags)
   1101 {
   1102 	bus_dma_segment_t segs[MAX_DMA_SEGS];
   1103 	int i;
   1104 	size_t len;
   1105 
   1106 	/*
   1107 	 * Make sure that on error condition we return "no valid mappings".
   1108 	 */
   1109 	map->dm_nsegs = 0;
   1110 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
   1111 
   1112 	if (m->m_pkthdr.len > map->_dm_size)
   1113 		return EINVAL;
   1114 
   1115 	/* Record mbuf for *_unload */
   1116 	map->_dm_type = _DM_TYPE_MBUF;
   1117 	map->_dm_source = (void *)m;
   1118 
   1119 	i = 0;
   1120 	len = 0;
   1121 	while (m) {
   1122 		vaddr_t vaddr = mtod(m, vaddr_t);
   1123 		long buflen = (long)m->m_len;
   1124 
   1125 		len += buflen;
   1126 		while (buflen > 0 && i < MAX_DMA_SEGS) {
   1127 			paddr_t pa;
   1128 			long incr;
   1129 
   1130 			incr = PAGE_SIZE - (vaddr & PGOFSET);
   1131 			incr = min(buflen, incr);
   1132 
   1133 			if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE) {
   1134 #ifdef DIAGNOSTIC
   1135 				printf("_bus_dmamap_load_mbuf: pmap_extract failed %lx\n",
   1136 				       vaddr);
   1137 #endif
   1138 				map->_dm_type = 0;
   1139 				map->_dm_source = NULL;
   1140 				return EINVAL;
   1141 			}
   1142 
   1143 			buflen -= incr;
   1144 			vaddr += incr;
   1145 
   1146 			if (i > 0 &&
   1147 				pa == (segs[i-1].ds_addr + segs[i-1].ds_len) &&
   1148 				((segs[i-1].ds_len + incr) <=
   1149 					map->dm_maxsegsz)) {
   1150 				/* Hey, waddyaknow, they're contiguous */
   1151 				segs[i-1].ds_len += incr;
   1152 				continue;
   1153 			}
   1154 			segs[i].ds_addr = pa;
   1155 			segs[i].ds_len = incr;
   1156 			segs[i]._ds_boundary = 0;
   1157 			segs[i]._ds_align = 0;
   1158 			segs[i]._ds_mlist = NULL;
   1159 			i++;
   1160 		}
   1161 		m = m->m_next;
   1162 		if (m && i >= MAX_DMA_SEGS) {
   1163 			/* Exceeded the size of our dmamap */
   1164 			map->_dm_type = 0;
   1165 			map->_dm_source = NULL;
   1166 			return EFBIG;
   1167 		}
   1168 	}
   1169 
   1170 #ifdef DEBUG
   1171 	{
   1172 		size_t mbuflen, sglen;
   1173 		int j;
   1174 		int retval;
   1175 
   1176 		mbuflen = 0;
   1177 		for (m = (struct mbuf *)map->_dm_source; m; m = m->m_next)
   1178 			mbuflen += (long)m->m_len;
   1179 		sglen = 0;
   1180 		for (j = 0; j < i; j++)
   1181 			sglen += segs[j].ds_len;
   1182 		if (sglen != mbuflen)
   1183 			panic("load_mbuf: sglen %ld != mbuflen %lx\n",
   1184 				sglen, mbuflen);
   1185 		if (sglen != len)
   1186 			panic("load_mbuf: sglen %ld != len %lx\n",
   1187 				sglen, len);
   1188 		retval = bus_dmamap_load_raw(t, map, segs, i,
   1189 			(bus_size_t)len, flags);
   1190 		if (retval == 0) {
   1191 			if (map->dm_mapsize != len)
   1192 				panic("load_mbuf: mapsize %ld != len %lx\n",
   1193 					(long)map->dm_mapsize, len);
   1194 			sglen = 0;
   1195 			for (j = 0; j < map->dm_nsegs; j++)
   1196 				sglen += map->dm_segs[j].ds_len;
   1197 			if (sglen != len)
   1198 				panic("load_mbuf: dmamap sglen %ld != len %lx\n",
   1199 					sglen, len);
   1200 		}
   1201 		return (retval);
   1202 	}
   1203 #endif
   1204 	return (bus_dmamap_load_raw(t, map, segs, i, (bus_size_t)len, flags));
   1205 }
   1206 
   1207 /*
   1208  * Like _bus_dmamap_load(), but for uios.
   1209  */
   1210 int
   1211 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
   1212 	int flags)
   1213 {
   1214 /*
   1215  * XXXXXXX The problem with this routine is that it needs to
   1216  * lock the user address space that is being loaded, but there
   1217  * is no real way for us to unlock it during the unload process.
   1218  */
   1219 #if 0
   1220 	bus_dma_segment_t segs[MAX_DMA_SEGS];
   1221 	int i, j;
   1222 	size_t len;
   1223 	struct proc *p = uio->uio_lwp->l_proc;
   1224 	struct pmap *pm;
   1225 
   1226 	/*
   1227 	 * Make sure that on error condition we return "no valid mappings".
   1228 	 */
   1229 	map->dm_nsegs = 0;
   1230 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
   1231 
   1232 	if (uio->uio_segflg == UIO_USERSPACE) {
   1233 		pm = p->p_vmspace->vm_map.pmap;
   1234 	} else
   1235 		pm = pmap_kernel();
   1236 
   1237 	i = 0;
   1238 	len = 0;
   1239 	for (j = 0; j < uio->uio_iovcnt; j++) {
   1240 		struct iovec *iov = &uio->uio_iov[j];
   1241 		vaddr_t vaddr = (vaddr_t)iov->iov_base;
   1242 		bus_size_t buflen = iov->iov_len;
   1243 
   1244 		/*
   1245 		 * Lock the part of the user address space involved
   1246 		 *    in the transfer.
   1247 		 */
   1248 		if (__predict_false(uvm_vslock(p->p_vmspace, vaddr, buflen,
   1249 			    (uio->uio_rw == UIO_WRITE) ?
   1250 			    VM_PROT_WRITE : VM_PROT_READ) != 0)) {
   1251 				goto after_vsunlock;
   1252 			}
   1253 
   1254 		len += buflen;
   1255 		while (buflen > 0 && i < MAX_DMA_SEGS) {
   1256 			paddr_t pa;
   1257 			long incr;
   1258 
   1259 			incr = min(buflen, PAGE_SIZE);
   1260 			(void) pmap_extract(pm, vaddr, &pa);
   1261 			buflen -= incr;
   1262 			vaddr += incr;
   1263 			if (segs[i].ds_len == 0)
   1264 				segs[i].ds_addr = pa;
   1265 
   1266 
   1267 			if (i > 0 && pa == (segs[i-1].ds_addr + segs[i-1].ds_len)
   1268 			    && ((segs[i-1].ds_len + incr) <= map->dm_maxsegsz)) {
   1269 				/* Hey, waddyaknow, they're contiguous */
   1270 				segs[i-1].ds_len += incr;
   1271 				continue;
   1272 			}
   1273 			segs[i].ds_addr = pa;
   1274 			segs[i].ds_len = incr;
   1275 			segs[i]._ds_boundary = 0;
   1276 			segs[i]._ds_align = 0;
   1277 			segs[i]._ds_mlist = NULL;
   1278 			i++;
   1279 		}
   1280 		uvm_vsunlock(p->p_vmspace, bp->b_data, todo);
   1281  		if (buflen > 0 && i >= MAX_DMA_SEGS)
   1282 			/* Exceeded the size of our dmamap */
   1283 			return EFBIG;
   1284 	}
   1285 	map->_dm_type = DM_TYPE_UIO;
   1286 	map->_dm_source = (void *)uio;
   1287 	return (bus_dmamap_load_raw(t, map, segs, i,
   1288 				    (bus_size_t)len, flags));
   1289 #endif
   1290 	return 0;
   1291 }
   1292 
   1293 /*
   1294  * Like _bus_dmamap_load(), but for raw memory allocated with
   1295  * bus_dmamem_alloc().
   1296  */
   1297 int
   1298 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
   1299 	int nsegs, bus_size_t size, int flags)
   1300 {
   1301 
   1302 	panic("_bus_dmamap_load_raw: not implemented");
   1303 }
   1304 
   1305 /*
   1306  * Common function for unloading a DMA map.  May be called by
   1307  * bus-specific DMA map unload functions.
   1308  */
   1309 void
   1310 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
   1311 {
   1312 	int i;
   1313 	struct vm_page *pg;
   1314 	struct pglist *pglist;
   1315 	paddr_t pa;
   1316 
   1317 	for (i = 0; i < map->dm_nsegs; i++) {
   1318 		if ((pglist = map->dm_segs[i]._ds_mlist) == NULL) {
   1319 
   1320 			/*
   1321 			 * We were asked to load random VAs and lost the
   1322 			 * PA info so just blow the entire cache away.
   1323 			 */
   1324 			blast_dcache();
   1325 			break;
   1326 		}
   1327 		TAILQ_FOREACH(pg, pglist, pageq.queue) {
   1328 			pa = VM_PAGE_TO_PHYS(pg);
   1329 
   1330 			/*
   1331 			 * We should be flushing a subrange, but we
   1332 			 * don't know where the segments starts.
   1333 			 */
   1334 			dcache_flush_page_all(pa);
   1335 		}
   1336 	}
   1337 
   1338 	/* Mark the mappings as invalid. */
   1339 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
   1340 	map->dm_mapsize = 0;
   1341 	map->dm_nsegs = 0;
   1342 
   1343 }
   1344 
   1345 /*
   1346  * Common function for DMA map synchronization.  May be called
   1347  * by bus-specific DMA map synchronization functions.
   1348  */
   1349 void
   1350 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
   1351 	bus_size_t len, int ops)
   1352 {
   1353 	int i;
   1354 	struct vm_page *pg;
   1355 	struct pglist *pglist;
   1356 
   1357 	/*
   1358 	 * We sync out our caches, but the bus must do the same.
   1359 	 *
   1360 	 * Actually a #Sync is expensive.  We should optimize.
   1361 	 */
   1362 	if ((ops & BUS_DMASYNC_PREREAD) || (ops & BUS_DMASYNC_PREWRITE)) {
   1363 
   1364 		/*
   1365 		 * Don't really need to do anything, but flush any pending
   1366 		 * writes anyway.
   1367 		 */
   1368 		membar_Sync();
   1369 	}
   1370 	if (ops & BUS_DMASYNC_POSTREAD) {
   1371 		/* Invalidate the vcache */
   1372 		for (i = 0; i < map->dm_nsegs; i++) {
   1373 			if ((pglist = map->dm_segs[i]._ds_mlist) == NULL)
   1374 				/* Should not really happen. */
   1375 				continue;
   1376 			TAILQ_FOREACH(pg, pglist, pageq.queue) {
   1377 				paddr_t start;
   1378 				psize_t size = PAGE_SIZE;
   1379 
   1380 				if (offset < PAGE_SIZE) {
   1381 					start = VM_PAGE_TO_PHYS(pg) + offset;
   1382 					size -= offset;
   1383 					if (size > len)
   1384 						size = len;
   1385 					cache_flush_phys(start, size, 0);
   1386 					len -= size;
   1387 					if (len == 0)
   1388 						goto done;
   1389 					offset = 0;
   1390 					continue;
   1391 				}
   1392 				offset -= size;
   1393 			}
   1394 		}
   1395 	}
   1396  done:
   1397 	if (ops & BUS_DMASYNC_POSTWRITE) {
   1398 		/* Nothing to do.  Handled by the bus controller. */
   1399 	}
   1400 }
   1401 
   1402 extern paddr_t   vm_first_phys, vm_num_phys;
   1403 /*
   1404  * Common function for DMA-safe memory allocation.  May be called
   1405  * by bus-specific DMA memory allocation functions.
   1406  */
   1407 int
   1408 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
   1409 	bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
   1410 	int flags)
   1411 {
   1412 	vaddr_t low, high;
   1413 	struct pglist *pglist;
   1414 	int error;
   1415 
   1416 	/* Always round the size. */
   1417 	size = round_page(size);
   1418 	low = vm_first_phys;
   1419 	high = vm_first_phys + vm_num_phys - PAGE_SIZE;
   1420 
   1421 	if ((pglist = malloc(sizeof(*pglist), M_DEVBUF,
   1422 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
   1423 		return (ENOMEM);
   1424 
   1425 	/*
   1426 	 * If the bus uses DVMA then ignore boundary and alignment.
   1427 	 */
   1428 	segs[0]._ds_boundary = boundary;
   1429 	segs[0]._ds_align = alignment;
   1430 	if (flags & BUS_DMA_DVMA) {
   1431 		boundary = 0;
   1432 		alignment = 0;
   1433 	}
   1434 
   1435 	/*
   1436 	 * Allocate pages from the VM system.
   1437 	 */
   1438 	error = uvm_pglistalloc(size, low, high,
   1439 	    alignment, boundary, pglist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
   1440 	if (error)
   1441 		return (error);
   1442 
   1443 	/*
   1444 	 * Compute the location, size, and number of segments actually
   1445 	 * returned by the VM code.
   1446 	 */
   1447 	segs[0].ds_addr = 0UL; /* UPA does not map things */
   1448 	segs[0].ds_len = size;
   1449 	*rsegs = 1;
   1450 
   1451 	/*
   1452 	 * Simply keep a pointer around to the linked list, so
   1453 	 * bus_dmamap_free() can return it.
   1454 	 *
   1455 	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
   1456 	 * ARE IN OUR CUSTODY.
   1457 	 */
   1458 	segs[0]._ds_mlist = pglist;
   1459 
   1460 	/* The bus driver should do the actual mapping */
   1461 	return (0);
   1462 }
   1463 
   1464 /*
   1465  * Common function for freeing DMA-safe memory.  May be called by
   1466  * bus-specific DMA memory free functions.
   1467  */
   1468 void
   1469 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
   1470 {
   1471 
   1472 	if (nsegs != 1)
   1473 		panic("bus_dmamem_free: nsegs = %d", nsegs);
   1474 
   1475 	/*
   1476 	 * Return the list of pages back to the VM system.
   1477 	 */
   1478 	uvm_pglistfree(segs[0]._ds_mlist);
   1479 	free(segs[0]._ds_mlist, M_DEVBUF);
   1480 }
   1481 
   1482 /*
   1483  * Common function for mapping DMA-safe memory.  May be called by
   1484  * bus-specific DMA memory map functions.
   1485  */
   1486 int
   1487 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
   1488 	size_t size, void **kvap, int flags)
   1489 {
   1490 	vaddr_t va, sva;
   1491 	int r;
   1492 	size_t oversize;
   1493 	u_long align;
   1494 
   1495 	if (nsegs != 1)
   1496 		panic("_bus_dmamem_map: nsegs = %d", nsegs);
   1497 
   1498 	align = PAGE_SIZE;
   1499 
   1500 	size = round_page(size);
   1501 
   1502 	/*
   1503 	 * Find a region of kernel virtual addresses that can accommodate
   1504 	 * our aligment requirements.
   1505 	 */
   1506 	oversize = size + align - PAGE_SIZE;
   1507 	r = uvm_map(kernel_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET, 0,
   1508 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
   1509 	    UVM_ADV_NORMAL, 0));
   1510 	if (r != 0)
   1511 		return (ENOMEM);
   1512 
   1513 	/* Compute start of aligned region */
   1514 	va = sva;
   1515 	va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
   1516 
   1517 	/* Return excess virtual addresses */
   1518 	if (va != sva)
   1519 		uvm_unmap(kernel_map, sva, va);
   1520 	if (va + size != sva + oversize)
   1521 		uvm_unmap(kernel_map, va + size, sva + oversize);
   1522 
   1523 	*kvap = (void *)va;
   1524 	return (0);
   1525 }
   1526 
   1527 /*
   1528  * Common function for unmapping DMA-safe memory.  May be called by
   1529  * bus-specific DMA memory unmapping functions.
   1530  */
   1531 void
   1532 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
   1533 {
   1534 
   1535 #ifdef DIAGNOSTIC
   1536 	if ((u_long)kva & PGOFSET)
   1537 		panic("_bus_dmamem_unmap");
   1538 #endif
   1539 
   1540 	size = round_page(size);
   1541 	uvm_unmap(kernel_map, (vaddr_t)kva, (vaddr_t)kva + size);
   1542 }
   1543 
   1544 /*
   1545  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
   1546  * bus-specific DMA mmap(2)'ing functions.
   1547  */
   1548 paddr_t
   1549 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
   1550 	int prot, int flags)
   1551 {
   1552 	int i;
   1553 
   1554 	for (i = 0; i < nsegs; i++) {
   1555 #ifdef DIAGNOSTIC
   1556 		if (off & PGOFSET)
   1557 			panic("_bus_dmamem_mmap: offset unaligned");
   1558 		if (segs[i].ds_addr & PGOFSET)
   1559 			panic("_bus_dmamem_mmap: segment unaligned");
   1560 		if (segs[i].ds_len & PGOFSET)
   1561 			panic("_bus_dmamem_mmap: segment size not multiple"
   1562 			    " of page size");
   1563 #endif
   1564 		if (off >= segs[i].ds_len) {
   1565 			off -= segs[i].ds_len;
   1566 			continue;
   1567 		}
   1568 
   1569 		return (atop(segs[i].ds_addr + off));
   1570 	}
   1571 
   1572 	/* Page not found. */
   1573 	return (-1);
   1574 }
   1575 
   1576 
   1577 struct sparc_bus_dma_tag mainbus_dma_tag = {
   1578 	NULL,
   1579 	NULL,
   1580 	_bus_dmamap_create,
   1581 	_bus_dmamap_destroy,
   1582 	_bus_dmamap_load,
   1583 	_bus_dmamap_load_mbuf,
   1584 	_bus_dmamap_load_uio,
   1585 	_bus_dmamap_load_raw,
   1586 	_bus_dmamap_unload,
   1587 	_bus_dmamap_sync,
   1588 
   1589 	_bus_dmamem_alloc,
   1590 	_bus_dmamem_free,
   1591 	_bus_dmamem_map,
   1592 	_bus_dmamem_unmap,
   1593 	_bus_dmamem_mmap
   1594 };
   1595 
   1596 
   1597 /*
   1598  * Base bus space handlers.
   1599  */
   1600 static int	sparc_bus_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
   1601 	vaddr_t, bus_space_handle_t *);
   1602 static int	sparc_bus_unmap(bus_space_tag_t, bus_space_handle_t, bus_size_t);
   1603 static int	sparc_bus_subregion(bus_space_tag_t, bus_space_handle_t, bus_size_t,
   1604 	bus_size_t, bus_space_handle_t *);
   1605 static paddr_t	sparc_bus_mmap(bus_space_tag_t, bus_addr_t, off_t, int, int);
   1606 static void	*sparc_mainbus_intr_establish(bus_space_tag_t, int, int,
   1607 	int (*)(void *), void *, void (*)(void));
   1608 static int	sparc_bus_alloc(bus_space_tag_t, bus_addr_t, bus_addr_t, bus_size_t,
   1609 	bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
   1610 static void	sparc_bus_free(bus_space_tag_t, bus_space_handle_t, bus_size_t);
   1611 
   1612 struct extent *io_space = NULL;
   1613 
   1614 int
   1615 bus_space_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
   1616 	bus_size_t a, bus_size_t b, int f, bus_addr_t *ap,
   1617 	bus_space_handle_t *hp)
   1618 {
   1619 	_BS_CALL(t, sparc_bus_alloc)(t, rs, re, s, a, b, f, ap, hp);
   1620 }
   1621 
   1622 void
   1623 bus_space_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
   1624 {
   1625 	_BS_CALL(t, sparc_bus_free)(t, h, s);
   1626 }
   1627 
   1628 int
   1629 bus_space_map(bus_space_tag_t t, bus_addr_t a, bus_size_t s, int f,
   1630 	bus_space_handle_t *hp)
   1631 {
   1632 	_BS_CALL(t, sparc_bus_map)(t, a, s, f, 0, hp);
   1633 }
   1634 
   1635 void
   1636 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
   1637 {
   1638 	_BS_VOID_CALL(t, sparc_bus_unmap)(t, h, s);
   1639 }
   1640 
   1641 int
   1642 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   1643 	bus_size_t s, bus_space_handle_t *hp)
   1644 {
   1645 	_BS_CALL(t, sparc_bus_subregion)(t, h, o, s, hp);
   1646 }
   1647 
   1648 paddr_t
   1649 bus_space_mmap(bus_space_tag_t t, bus_addr_t a, off_t o, int p, int f)
   1650 {
   1651 	_BS_CALL(t, sparc_bus_mmap)(t, a, o, p, f);
   1652 }
   1653 
   1654 /*
   1655  *	void bus_space_read_multi_N(bus_space_tag_t tag,
   1656  *	    bus_space_handle_t bsh, bus_size_t offset,
   1657  *	    uintN_t *addr, bus_size_t count);
   1658  *
   1659  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
   1660  * described by tag/handle/offset and copy into buffer provided.
   1661  */
   1662 void
   1663 bus_space_read_multi_1(bus_space_tag_t t, bus_space_handle_t h,
   1664 	bus_size_t o, uint8_t * a, bus_size_t c)
   1665 {
   1666 	while (c-- > 0)
   1667 		*a++ = bus_space_read_1(t, h, o);
   1668 }
   1669 
   1670 void
   1671 bus_space_read_multi_2(bus_space_tag_t t, bus_space_handle_t h,
   1672 	bus_size_t o, uint16_t * a, bus_size_t c)
   1673 {
   1674 	while (c-- > 0)
   1675 		*a++ = bus_space_read_2(t, h, o);
   1676 }
   1677 
   1678 void
   1679 bus_space_read_multi_4(bus_space_tag_t t, bus_space_handle_t h,
   1680 	bus_size_t o, uint32_t * a, bus_size_t c)
   1681 {
   1682 	while (c-- > 0)
   1683 		*a++ = bus_space_read_4(t, h, o);
   1684 }
   1685 
   1686 void
   1687 bus_space_read_multi_8(bus_space_tag_t t, bus_space_handle_t h,
   1688 	bus_size_t o, uint64_t * a, bus_size_t c)
   1689 {
   1690 	while (c-- > 0)
   1691 		*a++ = bus_space_read_8(t, h, o);
   1692 }
   1693 
   1694 /*
   1695  *	void bus_space_write_multi_N(bus_space_tag_t tag,
   1696  *	    bus_space_handle_t bsh, bus_size_t offset,
   1697  *	    const uintN_t *addr, bus_size_t count);
   1698  *
   1699  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
   1700  * provided to bus space described by tag/handle/offset.
   1701  */
   1702 void
   1703 bus_space_write_multi_1(bus_space_tag_t t,
   1704 	bus_space_handle_t h, bus_size_t o,
   1705 	const uint8_t *a, bus_size_t c)
   1706 {
   1707 	while (c-- > 0)
   1708 		bus_space_write_1(t, h, o, *a++);
   1709 }
   1710 
   1711 void
   1712 bus_space_write_multi_2(bus_space_tag_t t,
   1713 	bus_space_handle_t h, bus_size_t o,
   1714 	const uint16_t *a, bus_size_t c)
   1715 {
   1716 	while (c-- > 0)
   1717 		bus_space_write_2(t, h, o, *a++);
   1718 }
   1719 
   1720 void
   1721 bus_space_write_multi_4(bus_space_tag_t t,
   1722 	bus_space_handle_t h, bus_size_t o,
   1723 	const uint32_t *a, bus_size_t c)
   1724 {
   1725 	while (c-- > 0)
   1726 		bus_space_write_4(t, h, o, *a++);
   1727 }
   1728 
   1729 void
   1730 bus_space_write_multi_8(bus_space_tag_t t,
   1731 	bus_space_handle_t h, bus_size_t o,
   1732 	const uint64_t *a, bus_size_t c)
   1733 {
   1734 	while (c-- > 0)
   1735 		bus_space_write_8(t, h, o, *a++);
   1736 }
   1737 
   1738 /*
   1739  *	void bus_space_set_multi_stream_N(bus_space_tag_t tag,
   1740  *	    bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
   1741  *	    bus_size_t count);
   1742  *
   1743  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
   1744  * by tag/handle/offset `count' times.
   1745  */
   1746 void
   1747 bus_space_set_multi_stream_1(bus_space_tag_t t,
   1748 	bus_space_handle_t h, bus_size_t o, uint8_t v,
   1749 	bus_size_t c)
   1750 {
   1751 	while (c-- > 0)
   1752 		bus_space_write_stream_1(t, h, o, v);
   1753 }
   1754 
   1755 void
   1756 bus_space_set_multi_stream_2(bus_space_tag_t t,
   1757 	bus_space_handle_t h, bus_size_t o, uint16_t v,
   1758 	bus_size_t c)
   1759 {
   1760 	while (c-- > 0)
   1761 		bus_space_write_stream_2(t, h, o, v);
   1762 }
   1763 
   1764 void
   1765 bus_space_set_multi_stream_4(bus_space_tag_t t,
   1766 	bus_space_handle_t h, bus_size_t o, uint32_t v,
   1767 	bus_size_t c)
   1768 {
   1769 	while (c-- > 0)
   1770 		bus_space_write_stream_4(t, h, o, v);
   1771 }
   1772 
   1773 void
   1774 bus_space_set_multi_stream_8(bus_space_tag_t t,
   1775 	bus_space_handle_t h, bus_size_t o, uint64_t v,
   1776 	bus_size_t c)
   1777 {
   1778 	while (c-- > 0)
   1779 		bus_space_write_stream_8(t, h, o, v);
   1780 }
   1781 
   1782 /*
   1783  *	void bus_space_copy_region_stream_N(bus_space_tag_t tag,
   1784  *	    bus_space_handle_t bsh1, bus_size_t off1,
   1785  *	    bus_space_handle_t bsh2, bus_size_t off2,
   1786  *	    bus_size_t count);
   1787  *
   1788  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
   1789  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
   1790  */
   1791 void
   1792 bus_space_copy_region_stream_1(bus_space_tag_t t, bus_space_handle_t h1,
   1793 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   1794 {
   1795 	for (; c; c--, o1++, o2++)
   1796 	    bus_space_write_stream_1(t, h1, o1, bus_space_read_stream_1(t, h2, o2));
   1797 }
   1798 
   1799 void
   1800 bus_space_copy_region_stream_2(bus_space_tag_t t, bus_space_handle_t h1,
   1801 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   1802 {
   1803 	for (; c; c--, o1+=2, o2+=2)
   1804 	    bus_space_write_stream_2(t, h1, o1, bus_space_read_stream_2(t, h2, o2));
   1805 }
   1806 
   1807 void
   1808 bus_space_copy_region_stream_4(bus_space_tag_t t, bus_space_handle_t h1,
   1809 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   1810 {
   1811 	for (; c; c--, o1+=4, o2+=4)
   1812 	    bus_space_write_stream_4(t, h1, o1, bus_space_read_stream_4(t, h2, o2));
   1813 }
   1814 
   1815 void
   1816 bus_space_copy_region_stream_8(bus_space_tag_t t, bus_space_handle_t h1,
   1817 	bus_size_t o1, bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   1818 {
   1819 	for (; c; c--, o1+=8, o2+=8)
   1820 	    bus_space_write_stream_8(t, h1, o1, bus_space_read_8(t, h2, o2));
   1821 }
   1822 
   1823 /*
   1824  *	void bus_space_set_region_stream_N(bus_space_tag_t tag,
   1825  *	    bus_space_handle_t bsh, bus_size_t off,
   1826  *	    uintN_t *addr, bus_size_t count);
   1827  *
   1828  */
   1829 void
   1830 bus_space_set_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
   1831 	bus_size_t o, const uint8_t v, bus_size_t c)
   1832 {
   1833 	for (; c; c--, o++)
   1834 		bus_space_write_stream_1(t, h, o, v);
   1835 }
   1836 
   1837 void
   1838 bus_space_set_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
   1839 	bus_size_t o, const uint16_t v, bus_size_t c)
   1840 {
   1841 	for (; c; c--, o+=2)
   1842 		bus_space_write_stream_2(t, h, o, v);
   1843 }
   1844 
   1845 void
   1846 bus_space_set_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
   1847 	bus_size_t o, const uint32_t v, bus_size_t c)
   1848 {
   1849 	for (; c; c--, o+=4)
   1850 		bus_space_write_stream_4(t, h, o, v);
   1851 }
   1852 
   1853 void
   1854 bus_space_set_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
   1855 	bus_size_t o, const uint64_t v, bus_size_t c)
   1856 {
   1857 	for (; c; c--, o+=8)
   1858 		bus_space_write_stream_8(t, h, o, v);
   1859 }
   1860 
   1861 
   1862 /*
   1863  *	void bus_space_read_multi_stream_N(bus_space_tag_t tag,
   1864  *	    bus_space_handle_t bsh, bus_size_t offset,
   1865  *	    uintN_t *addr, bus_size_t count);
   1866  *
   1867  * Read `count' 1, 2, 4, or 8 byte quantities from bus space
   1868  * described by tag/handle/offset and copy into buffer provided.
   1869  */
   1870 void
   1871 bus_space_read_multi_stream_1(bus_space_tag_t t,
   1872 	bus_space_handle_t h, bus_size_t o,
   1873 	uint8_t *a, bus_size_t c)
   1874 {
   1875 	while (c-- > 0)
   1876 		*a++ = bus_space_read_stream_1(t, h, o);
   1877 }
   1878 
   1879 void
   1880 bus_space_read_multi_stream_2(bus_space_tag_t t,
   1881 	bus_space_handle_t h, bus_size_t o,
   1882 	uint16_t *a, bus_size_t c)
   1883 {
   1884 	while (c-- > 0)
   1885 		*a++ = bus_space_read_stream_2(t, h, o);
   1886 }
   1887 
   1888 void
   1889 bus_space_read_multi_stream_4(bus_space_tag_t t,
   1890 	bus_space_handle_t h, bus_size_t o,
   1891 	uint32_t *a, bus_size_t c)
   1892 {
   1893 	while (c-- > 0)
   1894 		*a++ = bus_space_read_stream_4(t, h, o);
   1895 }
   1896 
   1897 void
   1898 bus_space_read_multi_stream_8(bus_space_tag_t t,
   1899 	bus_space_handle_t h, bus_size_t o,
   1900 	uint64_t *a, bus_size_t c)
   1901 {
   1902 	while (c-- > 0)
   1903 		*a++ = bus_space_read_stream_8(t, h, o);
   1904 }
   1905 
   1906 /*
   1907  *	void bus_space_read_region_stream_N(bus_space_tag_t tag,
   1908  *	    bus_space_handle_t bsh, bus_size_t off,
   1909  *	    uintN_t *addr, bus_size_t count);
   1910  *
   1911  */
   1912 void
   1913 bus_space_read_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
   1914 	bus_size_t o, uint8_t *a, bus_size_t c)
   1915 {
   1916 	for (; c; a++, c--, o++)
   1917 		*a = bus_space_read_stream_1(t, h, o);
   1918 }
   1919 void
   1920 bus_space_read_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
   1921 	bus_size_t o, uint16_t *a, bus_size_t c)
   1922 {
   1923 	for (; c; a++, c--, o+=2)
   1924 		*a = bus_space_read_stream_2(t, h, o);
   1925  }
   1926 void
   1927 bus_space_read_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
   1928 	bus_size_t o, uint32_t *a, bus_size_t c)
   1929 {
   1930 	for (; c; a++, c--, o+=4)
   1931 		*a = bus_space_read_stream_4(t, h, o);
   1932 }
   1933 void
   1934 bus_space_read_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
   1935 	bus_size_t o, uint64_t *a, bus_size_t c)
   1936 {
   1937 	for (; c; a++, c--, o+=8)
   1938 		*a = bus_space_read_stream_8(t, h, o);
   1939 }
   1940 
   1941 /*
   1942  *	void bus_space_write_multi_stream_N(bus_space_tag_t tag,
   1943  *	    bus_space_handle_t bsh, bus_size_t offset,
   1944  *	    const uintN_t *addr, bus_size_t count);
   1945  *
   1946  * Write `count' 1, 2, 4, or 8 byte quantities from the buffer
   1947  * provided to bus space described by tag/handle/offset.
   1948  */
   1949 void
   1950 bus_space_write_multi_stream_1(bus_space_tag_t t,
   1951 	bus_space_handle_t h, bus_size_t o,
   1952 	const uint8_t *a, bus_size_t c)
   1953 {
   1954 	while (c-- > 0)
   1955 		bus_space_write_stream_1(t, h, o, *a++);
   1956 }
   1957 
   1958 void
   1959 bus_space_write_multi_stream_2(bus_space_tag_t t,
   1960 	bus_space_handle_t h, bus_size_t o,
   1961 	const uint16_t *a, bus_size_t c)
   1962 {
   1963 	while (c-- > 0)
   1964 		bus_space_write_stream_2(t, h, o, *a++);
   1965 }
   1966 
   1967 void
   1968 bus_space_write_multi_stream_4(bus_space_tag_t t,
   1969 	bus_space_handle_t h, bus_size_t o,
   1970 	const uint32_t *a, bus_size_t c)
   1971 {
   1972 	while (c-- > 0)
   1973 		bus_space_write_stream_4(t, h, o, *a++);
   1974 }
   1975 
   1976 void
   1977 bus_space_write_multi_stream_8(bus_space_tag_t t,
   1978 	bus_space_handle_t h, bus_size_t o,
   1979 	const uint64_t *a, bus_size_t c)
   1980 {
   1981 	while (c-- > 0)
   1982 		bus_space_write_stream_8(t, h, o, *a++);
   1983 }
   1984 
   1985 /*
   1986  *	void bus_space_copy_region_N(bus_space_tag_t tag,
   1987  *	    bus_space_handle_t bsh1, bus_size_t off1,
   1988  *	    bus_space_handle_t bsh2, bus_size_t off2,
   1989  *	    bus_size_t count);
   1990  *
   1991  * Copy `count' 1, 2, 4, or 8 byte values from bus space starting
   1992  * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
   1993  */
   1994 void
   1995 bus_space_copy_region_1(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
   1996 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   1997 {
   1998 	for (; c; c--, o1++, o2++)
   1999 	    bus_space_write_1(t, h1, o1, bus_space_read_1(t, h2, o2));
   2000 }
   2001 
   2002 void
   2003 bus_space_copy_region_2(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
   2004 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   2005 {
   2006 	for (; c; c--, o1+=2, o2+=2)
   2007 	    bus_space_write_2(t, h1, o1, bus_space_read_2(t, h2, o2));
   2008 }
   2009 
   2010 void
   2011 bus_space_copy_region_4(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
   2012 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   2013 {
   2014 	for (; c; c--, o1+=4, o2+=4)
   2015 	    bus_space_write_4(t, h1, o1, bus_space_read_4(t, h2, o2));
   2016 }
   2017 
   2018 void
   2019 bus_space_copy_region_8(bus_space_tag_t t, bus_space_handle_t h1, bus_size_t o1,
   2020 	bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
   2021 {
   2022 	for (; c; c--, o1+=8, o2+=8)
   2023 	    bus_space_write_8(t, h1, o1, bus_space_read_8(t, h2, o2));
   2024 }
   2025 
   2026 /*
   2027  *	void bus_space_set_region_N(bus_space_tag_t tag,
   2028  *	    bus_space_handle_t bsh, bus_size_t off,
   2029  *	    uintN_t *addr, bus_size_t count);
   2030  *
   2031  */
   2032 void
   2033 bus_space_set_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2034 	const uint8_t v, bus_size_t c)
   2035 {
   2036 	for (; c; c--, o++)
   2037 		bus_space_write_1(t, h, o, v);
   2038 }
   2039 
   2040 void
   2041 bus_space_set_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2042 	const uint16_t v, bus_size_t c)
   2043 {
   2044 	for (; c; c--, o+=2)
   2045 		bus_space_write_2(t, h, o, v);
   2046 }
   2047 
   2048 void
   2049 bus_space_set_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2050 	const uint32_t v, bus_size_t c)
   2051 {
   2052 	for (; c; c--, o+=4)
   2053 		bus_space_write_4(t, h, o, v);
   2054 }
   2055 
   2056 void
   2057 bus_space_set_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2058 	const uint64_t v, bus_size_t c)
   2059 {
   2060 	for (; c; c--, o+=8)
   2061 		bus_space_write_8(t, h, o, v);
   2062 }
   2063 
   2064 
   2065 /*
   2066  *	void bus_space_set_multi_N(bus_space_tag_t tag,
   2067  *	    bus_space_handle_t bsh, bus_size_t offset, uintN_t val,
   2068  *	    bus_size_t count);
   2069  *
   2070  * Write the 1, 2, 4, or 8 byte value `val' to bus space described
   2071  * by tag/handle/offset `count' times.
   2072  */
   2073 void
   2074 bus_space_set_multi_1(bus_space_tag_t t,
   2075 	bus_space_handle_t h, bus_size_t o, uint8_t v,
   2076 	bus_size_t c)
   2077 {
   2078 	while (c-- > 0)
   2079 		bus_space_write_1(t, h, o, v);
   2080 }
   2081 
   2082 void
   2083 bus_space_set_multi_2(bus_space_tag_t t,
   2084 	bus_space_handle_t h, bus_size_t o, uint16_t v,
   2085 	bus_size_t c)
   2086 {
   2087 	while (c-- > 0)
   2088 		bus_space_write_2(t, h, o, v);
   2089 }
   2090 
   2091 void
   2092 bus_space_set_multi_4(bus_space_tag_t t,
   2093 	bus_space_handle_t h, bus_size_t o, uint32_t v,
   2094 	bus_size_t c)
   2095 {
   2096 	while (c-- > 0)
   2097 		bus_space_write_4(t, h, o, v);
   2098 }
   2099 
   2100 void
   2101 bus_space_set_multi_8(bus_space_tag_t t,
   2102 	bus_space_handle_t h, bus_size_t o, uint64_t v,
   2103 	bus_size_t c)
   2104 {
   2105 	while (c-- > 0)
   2106 		bus_space_write_8(t, h, o, v);
   2107 }
   2108 
   2109 /*
   2110  *	void bus_space_write_region_N(bus_space_tag_t tag,
   2111  *	    bus_space_handle_t bsh, bus_size_t off,
   2112  *	    uintN_t *addr, bus_size_t count);
   2113  *
   2114  */
   2115 void
   2116 bus_space_write_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2117 	const uint8_t *a, bus_size_t c)
   2118 {
   2119 	for (; c; a++, c--, o++)
   2120 		bus_space_write_1(t, h, o, *a);
   2121 }
   2122 
   2123 void
   2124 bus_space_write_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2125 	const uint16_t *a, bus_size_t c)
   2126 {
   2127 	for (; c; a++, c--, o+=2)
   2128 		bus_space_write_2(t, h, o, *a);
   2129 }
   2130 
   2131 void
   2132 bus_space_write_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2133 	const uint32_t *a, bus_size_t c)
   2134 {
   2135 	for (; c; a++, c--, o+=4)
   2136 		bus_space_write_4(t, h, o, *a);
   2137 }
   2138 
   2139 void
   2140 bus_space_write_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2141 	const uint64_t *a, bus_size_t c)
   2142 {
   2143 	for (; c; a++, c--, o+=8)
   2144 		bus_space_write_8(t, h, o, *a);
   2145 }
   2146 
   2147 
   2148 /*
   2149  *	void bus_space_read_region_N(bus_space_tag_t tag,
   2150  *	    bus_space_handle_t bsh, bus_size_t off,
   2151  *	    uintN_t *addr, bus_size_t count);
   2152  *
   2153  */
   2154 void
   2155 bus_space_read_region_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2156 	uint8_t *a, bus_size_t c)
   2157 {
   2158 	for (; c; a++, c--, o++)
   2159 		*a = bus_space_read_1(t, h, o);
   2160 }
   2161 void
   2162 bus_space_read_region_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2163 	uint16_t *a, bus_size_t c)
   2164 {
   2165 	for (; c; a++, c--, o+=2)
   2166 		*a = bus_space_read_2(t, h, o);
   2167  }
   2168 void
   2169 bus_space_read_region_4(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2170 	uint32_t *a, bus_size_t c)
   2171 {
   2172 	for (; c; a++, c--, o+=4)
   2173 		*a = bus_space_read_4(t, h, o);
   2174 }
   2175 void
   2176 bus_space_read_region_8(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o,
   2177 	uint64_t *a, bus_size_t c)
   2178 {
   2179 	for (; c; a++, c--, o+=8)
   2180 		*a = bus_space_read_8(t, h, o);
   2181 }
   2182 
   2183 /*
   2184  *	void bus_space_write_region_stream_N(bus_space_tag_t tag,
   2185  *	    bus_space_handle_t bsh, bus_size_t off,
   2186  *	    uintN_t *addr, bus_size_t count);
   2187  *
   2188  */
   2189 void
   2190 bus_space_write_region_stream_1(bus_space_tag_t t, bus_space_handle_t h,
   2191 	bus_size_t o, const uint8_t *a, bus_size_t c)
   2192 {
   2193 	for (; c; a++, c--, o++)
   2194 		bus_space_write_stream_1(t, h, o, *a);
   2195 }
   2196 
   2197 void
   2198 bus_space_write_region_stream_2(bus_space_tag_t t, bus_space_handle_t h,
   2199 	bus_size_t o, const uint16_t *a, bus_size_t c)
   2200 {
   2201 	for (; c; a++, c--, o+=2)
   2202 		bus_space_write_stream_2(t, h, o, *a);
   2203 }
   2204 
   2205 void
   2206 bus_space_write_region_stream_4(bus_space_tag_t t, bus_space_handle_t h,
   2207 	bus_size_t o, const uint32_t *a, bus_size_t c)
   2208 {
   2209 	for (; c; a++, c--, o+=4)
   2210 		bus_space_write_stream_4(t, h, o, *a);
   2211 }
   2212 
   2213 void
   2214 bus_space_write_region_stream_8(bus_space_tag_t t, bus_space_handle_t h,
   2215 	bus_size_t o, const uint64_t *a, bus_size_t c)
   2216 {
   2217 	for (; c; a++, c--, o+=8)
   2218 		bus_space_write_stream_8(t, h, o, *a);
   2219 }
   2220 
   2221 /*
   2222  * Allocate a new bus tag and have it inherit the methods of the
   2223  * given parent.
   2224  */
   2225 bus_space_tag_t
   2226 bus_space_tag_alloc(bus_space_tag_t parent, void *cookie)
   2227 {
   2228 	struct sparc_bus_space_tag *sbt;
   2229 
   2230 	sbt = malloc(sizeof(struct sparc_bus_space_tag),
   2231 		     M_DEVBUF, M_NOWAIT|M_ZERO);
   2232 	if (sbt == NULL)
   2233 		return (NULL);
   2234 
   2235 	if (parent) {
   2236 		memcpy(sbt, parent, sizeof(*sbt));
   2237 		sbt->parent = parent;
   2238 		sbt->ranges = NULL;
   2239 		sbt->nranges = 0;
   2240 	}
   2241 
   2242 	sbt->cookie = cookie;
   2243 	return (sbt);
   2244 }
   2245 
   2246 /*
   2247  * Generic routine to translate an address using OpenPROM `ranges'.
   2248  */
   2249 int
   2250 bus_space_translate_address_generic(struct openprom_range *ranges, int nranges,
   2251     bus_addr_t *bap)
   2252 {
   2253 	int i, space = BUS_ADDR_IOSPACE(*bap);
   2254 
   2255 	for (i = 0; i < nranges; i++) {
   2256 		struct openprom_range *rp = &ranges[i];
   2257 
   2258 		if (rp->or_child_space != space)
   2259 			continue;
   2260 
   2261 		/* We've found the connection to the parent bus. */
   2262 		*bap = BUS_ADDR(rp->or_parent_space,
   2263 		    rp->or_parent_base + BUS_ADDR_PADDR(*bap));
   2264 		return (0);
   2265 	}
   2266 
   2267 	return (EINVAL);
   2268 }
   2269 
   2270 int
   2271 sparc_bus_map(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
   2272 	int flags, vaddr_t unused, bus_space_handle_t *hp)
   2273 {
   2274 	vaddr_t v;
   2275 	uint64_t pa;
   2276 	paddr_t	pm_flags = 0;
   2277 	vm_prot_t pm_prot = VM_PROT_READ;
   2278 	int err, map_little = 0;
   2279 
   2280 	if (io_space == NULL)
   2281 		/*
   2282 		 * And set up IOSPACE extents.
   2283 		 */
   2284 		io_space = extent_create("IOSPACE",
   2285 					 (u_long)IODEV_BASE, (u_long)IODEV_END,
   2286 					 0, 0, EX_NOWAIT);
   2287 
   2288 
   2289 	size = round_page(size);
   2290 	if (size == 0) {
   2291 		printf("sparc_bus_map: zero size\n");
   2292 		return (EINVAL);
   2293 	}
   2294 	switch (t->type) {
   2295 	case PCI_CONFIG_BUS_SPACE:
   2296 		/*
   2297 		 * PCI config space is special.
   2298 		 *
   2299 		 * It's really big and seldom used.  In order not to run
   2300 		 * out of IO mappings, config space will not be mapped in,
   2301 		 * rather it will be accessed through MMU bypass ASI accesses.
   2302 		 */
   2303 		if (flags & BUS_SPACE_MAP_LINEAR)
   2304 			return (-1);
   2305 		hp->_ptr = addr;
   2306 		hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
   2307 		hp->_sasi = ASI_PHYS_NON_CACHED;
   2308 		DPRINTF(BSDB_MAP, ("\n%s: config type %x flags %x "
   2309 			"addr %016llx size %016llx virt %llx\n", __func__,
   2310 			(int)t->type, (int) flags, (unsigned long long)addr,
   2311 			(unsigned long long)size,
   2312 			(unsigned long long)hp->_ptr));
   2313 		return (0);
   2314 	case PCI_IO_BUS_SPACE:
   2315 		map_little = 1;
   2316 		break;
   2317 	case PCI_MEMORY_BUS_SPACE:
   2318 		map_little = 1;
   2319 		break;
   2320 	default:
   2321 		map_little = 0;
   2322 		break;
   2323 	}
   2324 
   2325 #ifdef _LP64
   2326 	/* If it's not LINEAR don't bother to map it.  Use phys accesses. */
   2327 	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
   2328 		hp->_ptr = addr;
   2329 		if (map_little)
   2330 			hp->_asi = ASI_PHYS_NON_CACHED_LITTLE;
   2331 		else
   2332 			hp->_asi = ASI_PHYS_NON_CACHED;
   2333 		hp->_sasi = ASI_PHYS_NON_CACHED;
   2334 		return (0);
   2335 	}
   2336 #endif
   2337 
   2338 	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
   2339 		pm_flags |= PMAP_NC;
   2340 
   2341 	if ((err = extent_alloc(io_space, size, PAGE_SIZE,
   2342 		0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
   2343 			panic("sparc_bus_map: cannot allocate io_space: %d", err);
   2344 
   2345 	/* note: preserve page offset */
   2346 	hp->_ptr = (v | ((u_long)addr & PGOFSET));
   2347 	hp->_sasi = ASI_PRIMARY;
   2348 	if (map_little)
   2349 		hp->_asi = ASI_PRIMARY_LITTLE;
   2350 	else
   2351 		hp->_asi = ASI_PRIMARY;
   2352 
   2353 	pa = trunc_page(addr);
   2354 	if (!(flags&BUS_SPACE_MAP_READONLY))
   2355 		pm_prot |= VM_PROT_WRITE;
   2356 
   2357 	DPRINTF(BSDB_MAP, ("\n%s: type %x flags %x addr %016llx prot %02x "
   2358 		"pm_flags %x size %016llx virt %llx paddr %016llx\n", __func__,
   2359 		(int)t->type, (int)flags, (unsigned long long)addr, pm_prot,
   2360 		(int)pm_flags, (unsigned long long)size,
   2361 		(unsigned long long)hp->_ptr, (unsigned long long)pa));
   2362 
   2363 	do {
   2364 		DPRINTF(BSDB_MAP, ("%s: phys %llx virt %p hp %llx\n",
   2365 			__func__,
   2366 			(unsigned long long)pa, (char *)v,
   2367 			(unsigned long long)hp->_ptr));
   2368 		pmap_kenter_pa(v, pa | pm_flags, pm_prot, 0);
   2369 		v += PAGE_SIZE;
   2370 		pa += PAGE_SIZE;
   2371 	} while ((size -= PAGE_SIZE) > 0);
   2372 	return (0);
   2373 }
   2374 
   2375 int
   2376 sparc_bus_subregion(bus_space_tag_t tag, bus_space_handle_t handle,
   2377 	bus_size_t offset, bus_size_t size, bus_space_handle_t *nhandlep)
   2378 {
   2379 	nhandlep->_ptr = handle._ptr + offset;
   2380 	nhandlep->_asi = handle._asi;
   2381 	nhandlep->_sasi = handle._sasi;
   2382 	return (0);
   2383 }
   2384 
   2385 int
   2386 sparc_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
   2387 {
   2388 	vaddr_t va = trunc_page((vaddr_t)bh._ptr);
   2389 	vaddr_t endva = va + round_page(size);
   2390 	int error = 0;
   2391 
   2392 	if (PHYS_ASI(bh._asi)) return (0);
   2393 
   2394 	error = extent_free(io_space, va, size, EX_NOWAIT);
   2395 	if (error) printf("sparc_bus_unmap: extent_free returned %d\n", error);
   2396 
   2397 	pmap_remove(pmap_kernel(), va, endva);
   2398 	return (0);
   2399 }
   2400 
   2401 paddr_t
   2402 sparc_bus_mmap(bus_space_tag_t t, bus_addr_t paddr, off_t off, int prot,
   2403 	int flags)
   2404 {
   2405 	/* Devices are un-cached... although the driver should do that */
   2406 	return ((paddr+off)|PMAP_NC);
   2407 }
   2408 
   2409 
   2410 void *
   2411 sparc_mainbus_intr_establish(bus_space_tag_t t, int pil, int level,
   2412 	int	(*handler)(void *), void *arg, void	(*fastvec)(void) /* ignored */)
   2413 {
   2414 	struct intrhand *ih;
   2415 
   2416 	ih = (struct intrhand *)
   2417 		malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
   2418 	if (ih == NULL)
   2419 		return (NULL);
   2420 
   2421 	ih->ih_fun = handler;
   2422 	ih->ih_arg = arg;
   2423 	intr_establish(pil, level != IPL_VM, ih);
   2424 	return (ih);
   2425 }
   2426 
   2427 int
   2428 sparc_bus_alloc(bus_space_tag_t t, bus_addr_t rs, bus_addr_t re, bus_size_t s,
   2429 	bus_size_t a, bus_size_t b, int f, bus_addr_t *ap, bus_space_handle_t *hp)
   2430 {
   2431 	return (ENOTTY);
   2432 }
   2433 
   2434 void
   2435 sparc_bus_free(bus_space_tag_t t, bus_space_handle_t h, bus_size_t s)
   2436 {
   2437 	return;
   2438 }
   2439 
   2440 struct sparc_bus_space_tag mainbus_space_tag = {
   2441 	NULL,				/* cookie */
   2442 	NULL,				/* parent bus tag */
   2443 	NULL,				/* ranges */
   2444 	0,				/* nranges */
   2445 	UPA_BUS_SPACE,			/* type */
   2446 	sparc_bus_alloc,
   2447 	sparc_bus_free,
   2448 	sparc_bus_map,			/* bus_space_map */
   2449 	sparc_bus_unmap,		/* bus_space_unmap */
   2450 	sparc_bus_subregion,		/* bus_space_subregion */
   2451 	sparc_bus_mmap,			/* bus_space_mmap */
   2452 	sparc_mainbus_intr_establish	/* bus_intr_establish */
   2453 };
   2454 
   2455 
   2456 void
   2457 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
   2458 {
   2459 	__greg_t *gr = mcp->__gregs;
   2460 	__greg_t ras_pc;
   2461 	const struct trapframe64 *tf = l->l_md.md_tf;
   2462 
   2463 	/* First ensure consistent stack state (see sendsig). */ /* XXX? */
   2464 	write_user_windows();
   2465 	if (rwindow_save(l)) {
   2466 		mutex_enter(l->l_proc->p_lock);
   2467 		sigexit(l, SIGILL);
   2468 	}
   2469 
   2470 	/* For now: Erase any random indicators for optional state. */
   2471 	(void)memset(mcp, 0, sizeof (*mcp));
   2472 
   2473 	/* Save general register context. */
   2474 #ifdef __arch64__
   2475 	gr[_REG_CCR] = (tf->tf_tstate & TSTATE_CCR) >> TSTATE_CCR_SHIFT;
   2476 #else
   2477 	gr[_REG_PSR] = TSTATECCR_TO_PSR(tf->tf_tstate);
   2478 #endif
   2479 	gr[_REG_PC]  = tf->tf_pc;
   2480 	gr[_REG_nPC] = tf->tf_npc;
   2481 	gr[_REG_Y]   = tf->tf_y;
   2482 	gr[_REG_G1]  = tf->tf_global[1];
   2483 	gr[_REG_G2]  = tf->tf_global[2];
   2484 	gr[_REG_G3]  = tf->tf_global[3];
   2485 	gr[_REG_G4]  = tf->tf_global[4];
   2486 	gr[_REG_G5]  = tf->tf_global[5];
   2487 	gr[_REG_G6]  = tf->tf_global[6];
   2488 	gr[_REG_G7]  = tf->tf_global[7];
   2489 	gr[_REG_O0]  = tf->tf_out[0];
   2490 	gr[_REG_O1]  = tf->tf_out[1];
   2491 	gr[_REG_O2]  = tf->tf_out[2];
   2492 	gr[_REG_O3]  = tf->tf_out[3];
   2493 	gr[_REG_O4]  = tf->tf_out[4];
   2494 	gr[_REG_O5]  = tf->tf_out[5];
   2495 	gr[_REG_O6]  = tf->tf_out[6];
   2496 	gr[_REG_O7]  = tf->tf_out[7];
   2497 #ifdef __arch64__
   2498 	gr[_REG_ASI] = (tf->tf_tstate & TSTATE_ASI) >> TSTATE_ASI_SHIFT;
   2499 #if 0 /* not yet supported */
   2500 	gr[_REG_FPRS] = ;
   2501 #endif
   2502 #endif /* __arch64__ */
   2503 
   2504 	if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
   2505 	    (void *) gr[_REG_PC])) != -1) {
   2506 		gr[_REG_PC] = ras_pc;
   2507 		gr[_REG_nPC] = ras_pc + 4;
   2508 	}
   2509 
   2510 	*flags |= (_UC_CPU|_UC_TLSBASE);
   2511 
   2512 	mcp->__gwins = NULL;
   2513 
   2514 
   2515 	/* Save FP register context, if any. */
   2516 	if (l->l_md.md_fpstate != NULL) {
   2517 		struct fpstate64 *fsp;
   2518 		__fpregset_t *fpr = &mcp->__fpregs;
   2519 
   2520 		/*
   2521 		 * If our FP context is currently held in the FPU, take a
   2522 		 * private snapshot - lazy FPU context switching can deal
   2523 		 * with it later when it becomes necessary.
   2524 		 * Otherwise, get it from the process's save area.
   2525 		 */
   2526 		fpusave_lwp(l, true);
   2527 		fsp = l->l_md.md_fpstate;
   2528 		memcpy(&fpr->__fpu_fr, fsp->fs_regs, sizeof (fpr->__fpu_fr));
   2529 		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
   2530 		mcp->__fpregs.__fpu_fsr = fsp->fs_fsr;
   2531 		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
   2532 		mcp->__fpregs.__fpu_q_entrysize =
   2533 		    (unsigned char) sizeof (*mcp->__fpregs.__fpu_q);
   2534 		mcp->__fpregs.__fpu_en = 1;
   2535 		*flags |= _UC_FPU;
   2536 	} else {
   2537 		mcp->__fpregs.__fpu_en = 0;
   2538 	}
   2539 
   2540 	mcp->__xrs.__xrs_id = 0;	/* Solaris extension? */
   2541 }
   2542 
   2543 int
   2544 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mc)
   2545 {
   2546 	const __greg_t *gr = mc->__gregs;
   2547 
   2548 	/*
   2549  	 * Only the icc bits in the psr are used, so it need not be
   2550  	 * verified.  pc and npc must be multiples of 4.  This is all
   2551  	 * that is required; if it holds, just do it.
   2552 	 */
   2553 	if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
   2554 	    gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
   2555 		return EINVAL;
   2556 
   2557 	return 0;
   2558 }
   2559 
   2560 int
   2561 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
   2562 {
   2563 	const __greg_t *gr = mcp->__gregs;
   2564 	struct trapframe64 *tf = l->l_md.md_tf;
   2565 	struct proc *p = l->l_proc;
   2566 	int error;
   2567 
   2568 	/* First ensure consistent stack state (see sendsig). */
   2569 	write_user_windows();
   2570 	if (rwindow_save(l)) {
   2571 		mutex_enter(p->p_lock);
   2572 		sigexit(l, SIGILL);
   2573 	}
   2574 
   2575 	if ((flags & _UC_CPU) != 0) {
   2576 		error = cpu_mcontext_validate(l, mcp);
   2577 		if (error)
   2578 			return error;
   2579 
   2580 		/* Restore general register context. */
   2581 		/* take only tstate CCR (and ASI) fields */
   2582 #ifdef __arch64__
   2583 		tf->tf_tstate = (tf->tf_tstate & ~(TSTATE_CCR | TSTATE_ASI)) |
   2584 		    ((gr[_REG_CCR] << TSTATE_CCR_SHIFT) & TSTATE_CCR) |
   2585 		    ((gr[_REG_ASI] << TSTATE_ASI_SHIFT) & TSTATE_ASI);
   2586 #else
   2587 		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
   2588 		    PSRCC_TO_TSTATE(gr[_REG_PSR]);
   2589 #endif
   2590 		tf->tf_pc        = (uint64_t)gr[_REG_PC];
   2591 		tf->tf_npc       = (uint64_t)gr[_REG_nPC];
   2592 		tf->tf_y         = (uint64_t)gr[_REG_Y];
   2593 		tf->tf_global[1] = (uint64_t)gr[_REG_G1];
   2594 		tf->tf_global[2] = (uint64_t)gr[_REG_G2];
   2595 		tf->tf_global[3] = (uint64_t)gr[_REG_G3];
   2596 		tf->tf_global[4] = (uint64_t)gr[_REG_G4];
   2597 		tf->tf_global[5] = (uint64_t)gr[_REG_G5];
   2598 		tf->tf_global[6] = (uint64_t)gr[_REG_G6];
   2599 		/* done in lwp_setprivate */
   2600 		/* tf->tf_global[7] = (uint64_t)gr[_REG_G7]; */
   2601 		tf->tf_out[0]    = (uint64_t)gr[_REG_O0];
   2602 		tf->tf_out[1]    = (uint64_t)gr[_REG_O1];
   2603 		tf->tf_out[2]    = (uint64_t)gr[_REG_O2];
   2604 		tf->tf_out[3]    = (uint64_t)gr[_REG_O3];
   2605 		tf->tf_out[4]    = (uint64_t)gr[_REG_O4];
   2606 		tf->tf_out[5]    = (uint64_t)gr[_REG_O5];
   2607 		tf->tf_out[6]    = (uint64_t)gr[_REG_O6];
   2608 		tf->tf_out[7]    = (uint64_t)gr[_REG_O7];
   2609 		/* %asi restored above; %fprs not yet supported. */
   2610 
   2611 		/* XXX mcp->__gwins */
   2612 
   2613 		if (flags & _UC_TLSBASE)
   2614 			lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_G7]);
   2615 	}
   2616 
   2617 	/* Restore FP register context, if any. */
   2618 	if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) {
   2619 		struct fpstate64 *fsp;
   2620 		const __fpregset_t *fpr = &mcp->__fpregs;
   2621 
   2622 		/*
   2623 		 * If we're the current FPU owner, simply reload it from
   2624 		 * the supplied context.  Otherwise, store it into the
   2625 		 * process' FPU save area (which is used to restore from
   2626 		 * by lazy FPU context switching); allocate it if necessary.
   2627 		 */
   2628 		if ((fsp = l->l_md.md_fpstate) == NULL) {
   2629 			fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
   2630 			l->l_md.md_fpstate = fsp;
   2631 		} else {
   2632 			/* Drop the live context on the floor. */
   2633 			fpusave_lwp(l, false);
   2634 		}
   2635 		/* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
   2636 		memcpy(fsp->fs_regs, &fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
   2637 		fsp->fs_fsr = mcp->__fpregs.__fpu_fsr;
   2638 		fsp->fs_qsize = 0;
   2639 
   2640 #if 0
   2641 		/* Need more info! */
   2642 		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
   2643 		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
   2644 #endif
   2645 	}
   2646 
   2647 	/* XXX mcp->__xrs */
   2648 	/* XXX mcp->__asrs */
   2649 
   2650 	mutex_enter(p->p_lock);
   2651 	if (flags & _UC_SETSTACK)
   2652 		l->l_sigstk.ss_flags |= SS_ONSTACK;
   2653 	if (flags & _UC_CLRSTACK)
   2654 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
   2655 	mutex_exit(p->p_lock);
   2656 
   2657 	return 0;
   2658 }
   2659 
   2660 /*
   2661  * Preempt the current process if in interrupt from user mode,
   2662  * or after the current trap/syscall if in system mode.
   2663  */
   2664 void
   2665 cpu_need_resched(struct cpu_info *ci, int flags)
   2666 {
   2667 
   2668 	ci->ci_want_resched = 1;
   2669 	ci->ci_want_ast = 1;
   2670 
   2671 #ifdef MULTIPROCESSOR
   2672 	if (ci == curcpu())
   2673 		return;
   2674 	/* Just interrupt the target CPU, so it can notice its AST */
   2675 	if ((flags & RESCHED_IMMED) != 0 &&
   2676 	    ci->ci_data.cpu_onproc != ci->ci_data.cpu_idlelwp)
   2677 		sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
   2678 #endif
   2679 }
   2680 
   2681 /*
   2682  * Notify an LWP that it has a signal pending, process as soon as possible.
   2683  */
   2684 void
   2685 cpu_signotify(struct lwp *l)
   2686 {
   2687 	struct cpu_info *ci = l->l_cpu;
   2688 
   2689 	ci->ci_want_ast = 1;
   2690 #ifdef MULTIPROCESSOR
   2691 	if (ci != curcpu())
   2692 		sparc64_send_ipi(ci->ci_cpuid, sparc64_ipi_nop, 0, 0);
   2693 #endif
   2694 }
   2695 
   2696 bool
   2697 cpu_intr_p(void)
   2698 {
   2699 
   2700 	return curcpu()->ci_idepth >= 0;
   2701 }
   2702 
   2703 #ifdef MODULAR
   2704 void
   2705 module_init_md(void)
   2706 {
   2707 }
   2708 #endif
   2709 
   2710 int
   2711 mm_md_physacc(paddr_t pa, vm_prot_t prot)
   2712 {
   2713 
   2714 	return pmap_pa_exists(pa) ? 0 : EFAULT;
   2715 }
   2716 
   2717 int
   2718 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
   2719 {
   2720 	/* XXX: Don't know where PROMs are on Ultras.  Think it's at f000000 */
   2721 	const vaddr_t prom_vstart = 0xf000000, prom_vend = 0xf0100000;
   2722 	const vaddr_t msgbufpv = (vaddr_t)msgbufp, v = (vaddr_t)ptr;
   2723 	const size_t msgbufsz = msgbufp->msg_bufs +
   2724 	    offsetof(struct kern_msgbuf, msg_bufc);
   2725 
   2726 	*handled = (v >= msgbufpv && v < msgbufpv + msgbufsz) ||
   2727 	    (v >= prom_vstart && v < prom_vend && (prot & VM_PROT_WRITE) == 0);
   2728 	return 0;
   2729 }
   2730 
   2731 int
   2732 mm_md_readwrite(dev_t dev, struct uio *uio)
   2733 {
   2734 
   2735 	return ENXIO;
   2736 }
   2737 
   2738 #ifdef __arch64__
   2739 void
   2740 sparc64_elf_mcmodel_check(struct exec_package *epp, const char *model,
   2741     size_t len)
   2742 {
   2743 	/* no model specific execution for 32bit processes */
   2744 	if (epp->ep_flags & EXEC_32)
   2745 		return;
   2746 
   2747 #ifdef __USE_TOPDOWN_VM
   2748 	/*
   2749 	 * we allow TOPDOWN_VM for all processes where the binary is compiled
   2750 	 * with the medany or medmid code model.
   2751 	 */
   2752 	if (strncmp(model, "medany", len) == 0 ||
   2753 	    strncmp(model, "medmid", len) == 0)
   2754 		epp->ep_flags |= EXEC_TOPDOWN_VM;
   2755 #endif
   2756 }
   2757 #endif
   2758