Home | History | Annotate | Line # | Download | only in vax
      1 /* $NetBSD: machdep.c,v 1.201 2025/03/09 18:27:39 hans Exp $	 */
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
      5  * All rights reserved.
      6  *
      7  * Changed for the VAX port (and for readability) /IC
      8  *
      9  * This code is derived from software contributed to Berkeley by the Systems
     10  * Programming Group of the University of Utah Computer Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  * from: Utah Hdr: machdep.c 1.63 91/04/24
     37  *
     38  * @(#)machdep.c	7.16 (Berkeley) 6/3/91
     39  */
     40 
     41 /*
     42  * Copyright (c) 2002, Hugh Graham.
     43  * Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
     44  * Copyright (c) 1993 Adam Glass
     45  * Copyright (c) 1988 University of Utah.
     46  *
     47  * Changed for the VAX port (and for readability) /IC
     48  *
     49  * This code is derived from software contributed to Berkeley by the Systems
     50  * Programming Group of the University of Utah Computer Science Department.
     51  *
     52  * Redistribution and use in source and binary forms, with or without
     53  * modification, are permitted provided that the following conditions
     54  * are met:
     55  * 1. Redistributions of source code must retain the above copyright
     56  *    notice, this list of conditions and the following disclaimer.
     57  * 2. Redistributions in binary form must reproduce the above copyright
     58  *    notice, this list of conditions and the following disclaimer in the
     59  *    documentation and/or other materials provided with the distribution.
     60  * 3. All advertising materials mentioning features or use of this software
     61  *    must display the following acknowledgement:
     62  *	This product includes software developed by the University of
     63  *	California, Berkeley and its contributors.
     64  * 4. Neither the name of the University nor the names of its contributors
     65  *    may be used to endorse or promote products derived from this software
     66  *    without specific prior written permission.
     67  *
     68  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     69  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     70  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     71  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     72  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     73  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     74  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     75  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     76  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     77  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     78  * SUCH DAMAGE.
     79  *
     80  * from: Utah Hdr: machdep.c 1.63 91/04/24
     81  *
     82  * @(#)machdep.c	7.16 (Berkeley) 6/3/91
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.201 2025/03/09 18:27:39 hans Exp $");
     87 
     88 #include "opt_ddb.h"
     89 #include "opt_compat_netbsd.h"
     90 #include "opt_compat_ultrix.h"
     91 #include "opt_modular.h"
     92 #include "opt_multiprocessor.h"
     93 #include "opt_lockdebug.h"
     94 
     95 #include <sys/param.h>
     96 #include <sys/systm.h>
     97 #include <sys/buf.h>
     98 #include <sys/conf.h>
     99 #include <sys/cpu.h>
    100 #include <sys/device.h>
    101 #include <sys/extent.h>
    102 #include <sys/kernel.h>
    103 #include <sys/ksyms.h>
    104 #include <sys/mount.h>
    105 #include <sys/msgbuf.h>
    106 #include <sys/mbuf.h>
    107 #include <sys/proc.h>
    108 #include <sys/ptrace.h>
    109 #include <sys/reboot.h>
    110 #include <sys/kauth.h>
    111 #include <sys/sysctl.h>
    112 #include <sys/time.h>
    113 
    114 #include <dev/cons.h>
    115 #include <dev/mm.h>
    116 
    117 #include <uvm/uvm_extern.h>
    118 
    119 #include <machine/sid.h>
    120 #include <machine/macros.h>
    121 #include <machine/nexus.h>
    122 #include <machine/reg.h>
    123 #include <machine/scb.h>
    124 #include <machine/leds.h>
    125 #include <vax/vax/gencons.h>
    126 
    127 #ifdef DDB
    128 #include <machine/db_machdep.h>
    129 #include <ddb/db_sym.h>
    130 #include <ddb/db_extern.h>
    131 #endif
    132 
    133 #include "leds.h"
    134 #include "smg.h"
    135 #include "ksyms.h"
    136 
    137 #define DEV_LEDS	13	/* minor device 13 is leds */
    138 
    139 extern vaddr_t virtual_avail, virtual_end;
    140 extern paddr_t avail_end;
    141 
    142 /*
    143  * We do these external declarations here, maybe they should be done
    144  * somewhere else...
    145  */
    146 char		machine[] = MACHINE;		/* from <machine/param.h> */
    147 char		machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
    148 void *		msgbufaddr;
    149 int		*symtab_start;
    150 int		*symtab_end;
    151 int		symtab_nsyms;
    152 struct cpmbx	*cpmbx;		/* Console program mailbox address */
    153 
    154 /*
    155  * Extent map to manage I/O register space.  We allocate storage for
    156  * 32 regions in the map.  iomap_ex_malloc_safe will indicate that it's
    157  * safe to use malloc() to dynamically allocate region descriptors in
    158  * case we run out.
    159  */
    160 static long iomap_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) / sizeof(long)];
    161 static struct extent *iomap_ex;
    162 static int iomap_ex_malloc_safe;
    163 
    164 struct vm_map *phys_map = NULL;
    165 
    166 #ifdef DEBUG
    167 int iospace_inited = 0;
    168 #endif
    169 
    170 void
    171 cpu_startup(void)
    172 {
    173 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
    174 	vaddr_t		minaddr, maxaddr;
    175 #endif
    176 	char pbuf[9];
    177 
    178 	/*
    179 	 * Initialize error message buffer.
    180 	 */
    181 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
    182 
    183 	/*
    184 	 * Good {morning,afternoon,evening,night}.
    185 	 * Also call CPU init on systems that need that.
    186 	 */
    187 	printf("%s%s", copyright, version);
    188 	printf("%s\n", cpu_getmodel());
    189         if (dep_call->cpu_conf)
    190                 (*dep_call->cpu_conf)();
    191 
    192 	format_bytes(pbuf, sizeof(pbuf), avail_end);
    193 	printf("total memory = %s\n", pbuf);
    194 	panicstr = NULL;
    195 	mtpr(AST_NO, PR_ASTLVL);
    196 	spl0();
    197 
    198 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
    199 	minaddr = 0;
    200 
    201 	/*
    202 	 * Allocate a submap for physio.  This map effectively limits the
    203 	 * number of processes doing physio at any one time.
    204 	 */
    205 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    206 				   VM_PHYS_SIZE, 0, false, NULL);
    207 #endif
    208 
    209 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
    210 	printf("avail memory = %s\n", pbuf);
    211 
    212 #ifdef DDB
    213 	if (boothowto & RB_KDB)
    214 		Debugger();
    215 #endif
    216 
    217 	iomap_ex_malloc_safe = 1;
    218 }
    219 
    220 uint32_t dumpmag = 0x8fca0101;
    221 int	dumpsize = 0;
    222 long	dumplo = 0;
    223 
    224 void
    225 cpu_dumpconf(void)
    226 {
    227 	int	nblks;
    228 
    229 	/*
    230 	 * XXX include the final RAM page which is not included in physmem.
    231 	 */
    232 	if (dumpdev == NODEV)
    233 		return;
    234 	nblks = bdev_size(dumpdev);
    235 	if (nblks > 0) {
    236 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
    237 			dumpsize = btoc(dbtob(nblks - dumplo));
    238 		else if (dumplo == 0)
    239 			dumplo = nblks - btodb(ctob(dumpsize));
    240 	}
    241 	/*
    242 	 * Don't dump on the first PAGE_SIZE (why PAGE_SIZE?) in case the dump
    243 	 * device includes a disk label.
    244 	 */
    245 	if (dumplo < btodb(PAGE_SIZE))
    246 		dumplo = btodb(PAGE_SIZE);
    247 
    248 	/*
    249 	 * If we have nothing to dump (XXX implement crash dumps),
    250 	 * make it clear for savecore that there is no dump.
    251 	 */
    252 	if (dumpsize <= 0)
    253 		dumplo = 0;
    254 }
    255 
    256 static int
    257 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
    258 {
    259 	struct sysctlnode node = *rnode;
    260 
    261 	if (booted_device == NULL)
    262 		return (EOPNOTSUPP);
    263 	node.sysctl_data = __UNCONST(device_xname(booted_device));
    264 	node.sysctl_size = strlen(device_xname(booted_device)) + 1;
    265 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    266 }
    267 
    268 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    269 {
    270 
    271 	sysctl_createv(clog, 0, NULL, NULL,
    272 		       CTLFLAG_PERMANENT,
    273 		       CTLTYPE_NODE, "machdep", NULL,
    274 		       NULL, 0, NULL, 0,
    275 		       CTL_MACHDEP, CTL_EOL);
    276 
    277 	sysctl_createv(clog, 0, NULL, NULL,
    278 		       CTLFLAG_PERMANENT,
    279 		       CTLTYPE_INT, "printfataltraps", NULL,
    280 		       NULL, 0, &cpu_printfataltraps, 0,
    281 		       CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
    282 	sysctl_createv(clog, 0, NULL, NULL,
    283 		       CTLFLAG_PERMANENT,
    284 		       CTLTYPE_STRUCT, "console_device", NULL,
    285 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
    286 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
    287 	sysctl_createv(clog, 0, NULL, NULL,
    288 		       CTLFLAG_PERMANENT,
    289 		       CTLTYPE_STRUCT, "booted_device", NULL,
    290 		       sysctl_machdep_booted_device, 0, NULL, 0,
    291 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
    292 	/*
    293 	 * I don't think CPU_BOOTED_KERNEL is available to the kernel.
    294 	 */
    295 }
    296 
    297 void
    298 setstatclockrate(int hzrate)
    299 {
    300 }
    301 
    302 void
    303 consinit(void)
    304 {
    305 	extern vaddr_t iospace;
    306 
    307 	/*
    308 	 * Init I/O memory extent map. Must be done before cninit()
    309 	 * is called; we may want to use iospace in the console routines.
    310 	 *
    311 	 * NOTE: We need to reserve the first vax-page of iospace
    312 	 * for the console routines.
    313 	 */
    314 	KASSERT(iospace != 0);
    315 	iomap_ex = extent_create("iomap", iospace + VAX_NBPG,
    316 	    iospace + ((IOSPSZ * VAX_NBPG) - 1),
    317 	    (void *) iomap_ex_storage, sizeof(iomap_ex_storage),
    318 	    EX_NOCOALESCE|EX_NOWAIT);
    319 #ifdef DEBUG
    320 	iospace_inited = 1;
    321 #endif
    322 	config_init();
    323 	cninit();
    324 #if NKSYMS || defined(DDB) || defined(MODULAR)
    325 	if (symtab_start != NULL && symtab_nsyms != 0 && symtab_end != NULL) {
    326 		ksyms_addsyms_elf(symtab_nsyms, symtab_start, symtab_end);
    327 	}
    328 #endif
    329 #ifdef DEBUG
    330 	if (sizeof(struct pcb) > REDZONEADDR)
    331 		panic("struct pcb inside red zone");
    332 #endif
    333 }
    334 
    335 int	waittime = -1;
    336 static	volatile int showto; /* Must be volatile to survive MM on -> MM off */
    337 
    338 void
    339 cpu_reboot(int howto, char *b)
    340 {
    341 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
    342 		waittime = 0;
    343 		vfs_shutdown();
    344 	}
    345 	splhigh();		/* extreme priority */
    346 	if (howto & RB_HALT) {
    347 		doshutdownhooks();
    348 		pmf_system_shutdown(boothowto);
    349 		if (dep_call->cpu_halt)
    350 			(*dep_call->cpu_halt) ();
    351 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
    352 		for (;;)
    353 			;
    354 	} else {
    355 		showto = howto;
    356 #ifdef notyet
    357 		/*
    358 		 * If we are provided with a bootstring, parse it and send
    359 		 * it to the boot program.
    360 		 */
    361 		if (b)
    362 			while (*b) {
    363 				showto |= (*b == 'a' ? RB_ASKBOOT : (*b == 'd' ?
    364 				    RB_DEBUG : (*b == 's' ? RB_SINGLE : 0)));
    365 				b++;
    366 			}
    367 #endif
    368 		/*
    369 		 * Now it's time to:
    370 		 *  0. Save some registers that are needed in new world.
    371 		 *  1. Change stack to somewhere that will survive MM off.
    372 		 * (RPB page is good page to save things in).
    373 		 *  2. Actually turn MM off.
    374 		 *  3. Dump away memory to disk, if asked.
    375 		 *  4. Reboot as asked.
    376 		 * The RPB page is _always_ first page in memory, we can
    377 		 * rely on that.
    378 		 */
    379 #ifdef notyet
    380 		__asm(	"\tmovl	%sp, (0x80000200)\n"
    381 			"\tmovl	0x80000200, %sp\n"
    382 			"\tmfpr	$0x10, -(%sp)\n"	/* PR_PCBB */
    383 			"\tmfpr	$0x11, -(%sp)\n"	/* PR_SCBB */
    384 			"\tmfpr	$0xc, -(%sp)\n"		/* PR_SBR */
    385 			"\tmfpr	$0xd, -(%sp)\n"		/* PR_SLR */
    386 			"\tmtpr	$0, $0x38\n"		/* PR_MAPEN */
    387 		);
    388 #endif
    389 
    390 		if (showto & RB_DUMP)
    391 			dumpsys();
    392 		if (dep_call->cpu_reboot)
    393 			(*dep_call->cpu_reboot)(showto);
    394 
    395 		/* cpus that don't handle reboots get the standard reboot. */
    396 		while ((mfpr(PR_TXCS) & GC_RDY) == 0)
    397 			;
    398 
    399 		mtpr(GC_CONS|GC_BTFL, PR_TXDB);
    400 	}
    401 	__asm("movl %0, %%r5":: "g" (showto)); /* How to boot */
    402 	__asm("movl %0, %%r11":: "r"(showto)); /* ??? */
    403 	__asm("halt");
    404 	panic("Halt sket sej");
    405 }
    406 
    407 void
    408 dumpsys(void)
    409 {
    410 	const struct bdevsw *bdev;
    411 
    412 	if (dumpdev == NODEV)
    413 		return;
    414 	bdev = bdevsw_lookup(dumpdev);
    415 	if (bdev == NULL)
    416 		return;
    417 	/*
    418 	 * For dumps during autoconfiguration, if dump device has already
    419 	 * configured...
    420 	 */
    421 	if (dumpsize == 0)
    422 		cpu_dumpconf();
    423 	if (dumplo <= 0) {
    424 		printf("\ndump to dev %u,%u not possible\n",
    425 		    major(dumpdev), minor(dumpdev));
    426 		return;
    427 	}
    428 	printf("\ndumping to dev %u,%u offset %ld\n",
    429 	    major(dumpdev), minor(dumpdev), dumplo);
    430 	printf("dump ");
    431 	switch ((*bdev->d_dump) (dumpdev, 0, 0, 0)) {
    432 
    433 	case ENXIO:
    434 		printf("device bad\n");
    435 		break;
    436 
    437 	case EFAULT:
    438 		printf("device not ready\n");
    439 		break;
    440 
    441 	case EINVAL:
    442 		printf("area improper\n");
    443 		break;
    444 
    445 	case EIO:
    446 		printf("i/o error\n");
    447 		break;
    448 
    449 	default:
    450 		printf("succeeded\n");
    451 		break;
    452 	}
    453 }
    454 
    455 int
    456 process_read_regs(struct lwp *l, struct reg *regs)
    457 {
    458 	struct trapframe * const tf = l->l_md.md_utf;
    459 
    460 	memcpy(&regs->r0, &tf->tf_r0, 12 * sizeof(int));
    461 	regs->ap = tf->tf_ap;
    462 	regs->fp = tf->tf_fp;
    463 	regs->sp = tf->tf_sp;
    464 	regs->pc = tf->tf_pc;
    465 	regs->psl = tf->tf_psl;
    466 	return 0;
    467 }
    468 
    469 int
    470 process_write_regs(struct lwp *l, const struct reg *regs)
    471 {
    472 	struct trapframe * const tf = l->l_md.md_utf;
    473 
    474 	memcpy(&tf->tf_r0, &regs->r0, 12 * sizeof(int));
    475 	tf->tf_ap = regs->ap;
    476 	tf->tf_fp = regs->fp;
    477 	tf->tf_sp = regs->sp;
    478 	tf->tf_pc = regs->pc;
    479 	tf->tf_psl = (regs->psl|PSL_U|PSL_PREVU) &
    480 	    ~(PSL_MBZ|PSL_IS|PSL_IPL1F|PSL_CM); /* Allow compat mode? */
    481 	return 0;
    482 }
    483 
    484 int
    485 process_set_pc(struct lwp *l, void *addr)
    486 {
    487 	l->l_md.md_utf->tf_pc = (uintptr_t) addr;
    488 
    489 	return (0);
    490 }
    491 
    492 int
    493 process_sstep(struct lwp *l, int sstep)
    494 {
    495 	struct trapframe * const tf = l->l_md.md_utf;
    496 
    497 	if (sstep)
    498 		tf->tf_psl |= PSL_T;
    499 	else
    500 		tf->tf_psl &= ~PSL_T;
    501 
    502 	return (0);
    503 }
    504 
    505 #undef PHYSMEMDEBUG
    506 /*
    507  * Allocates a virtual range suitable for mapping in physical memory.
    508  * This differs from the bus_space routines in that it allocates on
    509  * physical page sizes instead of logical sizes. This implementation
    510  * uses resource maps when allocating space, which is allocated from
    511  * the IOMAP submap. The implementation is similar to the uba resource
    512  * map handling. Size is given in pages.
    513  * If the page requested is bigger than a logical page, space is
    514  * allocated from the kernel map instead.
    515  *
    516  * It is known that the first page in the iospace area is unused; it may
    517  * be use by console device drivers (before the map system is inited).
    518  */
    519 vaddr_t
    520 vax_map_physmem(paddr_t phys, size_t size)
    521 {
    522 	vaddr_t addr;
    523 	int error;
    524 	static int warned = 0;
    525 
    526 #ifdef DEBUG
    527 	if (!iospace_inited)
    528 		panic("vax_map_physmem: called before rminit()?!?");
    529 #endif
    530 	if (size >= LTOHPN) {
    531 		addr = uvm_km_alloc(kernel_map, size * VAX_NBPG, 0,
    532 		    UVM_KMF_VAONLY);
    533 		if (addr == 0)
    534 			panic("vax_map_physmem: kernel map full");
    535 	} else {
    536 		error = extent_alloc(iomap_ex, size * VAX_NBPG, VAX_NBPG, 0,
    537 		    EX_FAST | EX_NOWAIT |
    538 		    (iomap_ex_malloc_safe ? EX_MALLOCOK : 0), &addr);
    539 		if (error) {
    540 			if (warned++ == 0) /* Warn only once */
    541 				printf("vax_map_physmem: iomap too small");
    542 			return 0;
    543 		}
    544 	}
    545 	ioaccess(addr, phys, size);
    546 #ifdef PHYSMEMDEBUG
    547 	printf("vax_map_physmem: alloc'ed %d pages for paddr %lx, at %lx\n",
    548 	    size, phys, addr);
    549 #endif
    550 	return addr | (phys & VAX_PGOFSET);
    551 }
    552 
    553 /*
    554  * Unmaps the previous mapped (addr, size) pair.
    555  */
    556 void
    557 vax_unmap_physmem(vaddr_t addr, size_t size)
    558 {
    559 #ifdef PHYSMEMDEBUG
    560 	printf("vax_unmap_physmem: unmapping %zu pages at addr %lx\n",
    561 	    size, addr);
    562 #endif
    563 	addr &= ~VAX_PGOFSET;
    564 	iounaccess(addr, size);
    565 	if (size >= LTOHPN)
    566 		uvm_km_free(kernel_map, addr, size * VAX_NBPG, UVM_KMF_VAONLY);
    567 	else if (extent_free(iomap_ex, addr, size * VAX_NBPG,
    568 			     EX_NOWAIT |
    569 			     (iomap_ex_malloc_safe ? EX_MALLOCOK : 0)))
    570 		printf("vax_unmap_physmem: addr 0x%lx size %zu vpg: "
    571 		    "can't free region\n", addr, size);
    572 }
    573 
    574 #define	SOFTINT_IPLS	((IPL_SOFTCLOCK << (SOFTINT_CLOCK * 5))		\
    575 			 | (IPL_SOFTBIO << (SOFTINT_BIO * 5))		\
    576 			 | (IPL_SOFTNET << (SOFTINT_NET * 5))		\
    577 			 | (IPL_SOFTSERIAL << (SOFTINT_SERIAL * 5)))
    578 
    579 void
    580 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
    581 {
    582 	const int ipl = (SOFTINT_IPLS >> (5 * level)) & 0x1F;
    583 	l->l_cpu->ci_softlwps[level] = l;
    584 
    585 	*machdep = ipl;
    586 }
    587 
    588 #include <dev/bi/bivar.h>
    589 /*
    590  * This should be somewhere else.
    591  */
    592 void
    593 bi_intr_establish(void *icookie, int vec, void (*func)(void *), void *arg,
    594 	struct evcnt *ev)
    595 {
    596 	scb_vecalloc(vec, func, arg, SCB_ISTACK, ev);
    597 }
    598 
    599 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    600 /*
    601  * Called from locore.
    602  */
    603 void	krnlock(void);
    604 void	krnunlock(void);
    605 
    606 void
    607 krnlock(void)
    608 {
    609 	KERNEL_LOCK(1, NULL);
    610 }
    611 
    612 void
    613 krnunlock(void)
    614 {
    615 	KERNEL_UNLOCK_ONE(NULL);
    616 }
    617 #endif
    618 
    619 void
    620 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
    621 {
    622 	const struct trapframe * const tf = l->l_md.md_utf;
    623 	__greg_t *gr = mcp->__gregs;
    624 
    625 	gr[_REG_R0] = tf->tf_r0;
    626 	gr[_REG_R1] = tf->tf_r1;
    627 	gr[_REG_R2] = tf->tf_r2;
    628 	gr[_REG_R3] = tf->tf_r3;
    629 	gr[_REG_R4] = tf->tf_r4;
    630 	gr[_REG_R5] = tf->tf_r5;
    631 	gr[_REG_R6] = tf->tf_r6;
    632 	gr[_REG_R7] = tf->tf_r7;
    633 	gr[_REG_R8] = tf->tf_r8;
    634 	gr[_REG_R9] = tf->tf_r9;
    635 	gr[_REG_R10] = tf->tf_r10;
    636 	gr[_REG_R11] = tf->tf_r11;
    637 	gr[_REG_AP] = tf->tf_ap;
    638 	gr[_REG_FP] = tf->tf_fp;
    639 	gr[_REG_SP] = tf->tf_sp;
    640 	gr[_REG_PC] = tf->tf_pc;
    641 	gr[_REG_PSL] = tf->tf_psl;
    642 	*flags |= _UC_CPU;
    643 }
    644 
    645 int
    646 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    647 {
    648 	const __greg_t *gr = mcp->__gregs;
    649 
    650 	if ((gr[_REG_PSL] & (PSL_IPL | PSL_IS)) ||
    651 	    ((gr[_REG_PSL] & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) ||
    652 	    (gr[_REG_PSL] & PSL_CM))
    653 		return EINVAL;
    654 
    655 	return 0;
    656 }
    657 
    658 int
    659 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    660 {
    661 	struct trapframe * const tf = l->l_md.md_utf;
    662 	const __greg_t *gr = mcp->__gregs;
    663 	int error;
    664 
    665 	if ((flags & _UC_CPU) == 0)
    666 		return 0;
    667 
    668 	error = cpu_mcontext_validate(l, mcp);
    669 	if (error)
    670 		return error;
    671 
    672 	tf->tf_r0 = gr[_REG_R0];
    673 	tf->tf_r1 = gr[_REG_R1];
    674 	tf->tf_r2 = gr[_REG_R2];
    675 	tf->tf_r3 = gr[_REG_R3];
    676 	tf->tf_r4 = gr[_REG_R4];
    677 	tf->tf_r5 = gr[_REG_R5];
    678 	tf->tf_r6 = gr[_REG_R6];
    679 	tf->tf_r7 = gr[_REG_R7];
    680 	tf->tf_r8 = gr[_REG_R8];
    681 	tf->tf_r9 = gr[_REG_R9];
    682 	tf->tf_r10 = gr[_REG_R10];
    683 	tf->tf_r11 = gr[_REG_R11];
    684 	tf->tf_ap = gr[_REG_AP];
    685 	tf->tf_fp = gr[_REG_FP];
    686 	tf->tf_sp = gr[_REG_SP];
    687 	tf->tf_pc = gr[_REG_PC];
    688 	tf->tf_psl = gr[_REG_PSL];
    689 
    690 	if (flags & _UC_TLSBASE) {
    691 		void *tlsbase;
    692 
    693 		error = copyin((void *)tf->tf_sp, &tlsbase, sizeof(tlsbase));
    694 		if (error) {
    695 			return error;
    696 		}
    697 		lwp_setprivate(l, tlsbase);
    698 		tf->tf_sp += sizeof(tlsbase);
    699 	}
    700 
    701 	mutex_enter(l->l_proc->p_lock);
    702 	if (flags & _UC_SETSTACK)
    703 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    704 	if (flags & _UC_CLRSTACK)
    705 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    706 	mutex_exit(l->l_proc->p_lock);
    707 
    708 	return 0;
    709 }
    710 
    711 /*
    712  * Generic routines for machines with "console program mailbox".
    713  */
    714 void
    715 generic_halt(void)
    716 {
    717 	if (cpmbx == NULL)  /* Too late to complain here, but avoid panic */
    718 		__asm("halt");
    719 
    720 	if (cpmbx->user_halt != UHALT_DEFAULT) {
    721 		if (cpmbx->mbox_halt != 0)
    722 			cpmbx->mbox_halt = 0;   /* let console override */
    723 	} else if (cpmbx->mbox_halt != MHALT_HALT)
    724 		cpmbx->mbox_halt = MHALT_HALT;  /* the os decides */
    725 
    726 	__asm("halt");
    727 }
    728 
    729 void
    730 generic_reboot(int arg)
    731 {
    732 	if (cpmbx == NULL)  /* Too late to complain here, but avoid panic */
    733 		__asm("halt");
    734 
    735 	if (cpmbx->user_halt != UHALT_DEFAULT) {
    736 		if (cpmbx->mbox_halt != 0)
    737 			cpmbx->mbox_halt = 0;
    738 	} else if (cpmbx->mbox_halt != MHALT_REBOOT)
    739 		cpmbx->mbox_halt = MHALT_REBOOT;
    740 
    741 	__asm("halt");
    742 }
    743 
    744 bool
    745 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
    746 {
    747 
    748 	*vaddr = paddr + KERNBASE;
    749 	return true;
    750 }
    751 
    752 int
    753 mm_md_physacc(paddr_t pa, vm_prot_t prot)
    754 {
    755 
    756 	return (pa < avail_end) ? 0 : EFAULT;
    757 }
    758 
    759 int
    760 mm_md_readwrite(dev_t dev, struct uio *uio)
    761 {
    762 
    763 	switch (minor(dev)) {
    764 #if NLEDS
    765 	case DEV_LEDS:
    766 		return leds_uio(uio);
    767 #endif
    768 	default:
    769 		return ENXIO;
    770 	}
    771 }
    772 
    773 /*
    774  * Set max virtual size a process may allocate.
    775  * This could be tuned based on amount of physical memory.
    776  */
    777 void
    778 machdep_init(void)
    779 {
    780 	proc0.p_rlimit[RLIMIT_AS].rlim_cur = MAXDSIZ;
    781 	proc0.p_rlimit[RLIMIT_AS].rlim_max = MAXDSIZ;
    782 }
    783