Home | History | Annotate | Line # | Download | only in vax
      1 /* $NetBSD: machdep.c,v 1.202 2025/11/30 01:31:35 thorpej Exp $	 */
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
      5  * All rights reserved.
      6  *
      7  * Changed for the VAX port (and for readability) /IC
      8  *
      9  * This code is derived from software contributed to Berkeley by the Systems
     10  * Programming Group of the University of Utah Computer Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  * from: Utah Hdr: machdep.c 1.63 91/04/24
     37  *
     38  * @(#)machdep.c	7.16 (Berkeley) 6/3/91
     39  */
     40 
     41 /*
     42  * Copyright (c) 2002, Hugh Graham.
     43  * Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
     44  * Copyright (c) 1993 Adam Glass
     45  * Copyright (c) 1988 University of Utah.
     46  *
     47  * Changed for the VAX port (and for readability) /IC
     48  *
     49  * This code is derived from software contributed to Berkeley by the Systems
     50  * Programming Group of the University of Utah Computer Science Department.
     51  *
     52  * Redistribution and use in source and binary forms, with or without
     53  * modification, are permitted provided that the following conditions
     54  * are met:
     55  * 1. Redistributions of source code must retain the above copyright
     56  *    notice, this list of conditions and the following disclaimer.
     57  * 2. Redistributions in binary form must reproduce the above copyright
     58  *    notice, this list of conditions and the following disclaimer in the
     59  *    documentation and/or other materials provided with the distribution.
     60  * 3. All advertising materials mentioning features or use of this software
     61  *    must display the following acknowledgement:
     62  *	This product includes software developed by the University of
     63  *	California, Berkeley and its contributors.
     64  * 4. Neither the name of the University nor the names of its contributors
     65  *    may be used to endorse or promote products derived from this software
     66  *    without specific prior written permission.
     67  *
     68  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     69  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     70  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     71  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     72  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     73  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     74  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     75  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     76  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     77  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     78  * SUCH DAMAGE.
     79  *
     80  * from: Utah Hdr: machdep.c 1.63 91/04/24
     81  *
     82  * @(#)machdep.c	7.16 (Berkeley) 6/3/91
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.202 2025/11/30 01:31:35 thorpej Exp $");
     87 
     88 #include "opt_ddb.h"
     89 #include "opt_compat_netbsd.h"
     90 #include "opt_compat_ultrix.h"
     91 #include "opt_modular.h"
     92 #include "opt_multiprocessor.h"
     93 #include "opt_lockdebug.h"
     94 
     95 #include <sys/param.h>
     96 #include <sys/systm.h>
     97 #include <sys/buf.h>
     98 #include <sys/conf.h>
     99 #include <sys/cpu.h>
    100 #include <sys/device.h>
    101 #include <sys/kernel.h>
    102 #include <sys/ksyms.h>
    103 #include <sys/mount.h>
    104 #include <sys/msgbuf.h>
    105 #include <sys/mbuf.h>
    106 #include <sys/proc.h>
    107 #include <sys/ptrace.h>
    108 #include <sys/reboot.h>
    109 #include <sys/kauth.h>
    110 #include <sys/sysctl.h>
    111 #include <sys/time.h>
    112 #include <sys/vmem.h>
    113 #include <sys/vmem_impl.h>
    114 
    115 #include <dev/cons.h>
    116 #include <dev/mm.h>
    117 
    118 #include <uvm/uvm_extern.h>
    119 
    120 #include <machine/sid.h>
    121 #include <machine/macros.h>
    122 #include <machine/nexus.h>
    123 #include <machine/reg.h>
    124 #include <machine/scb.h>
    125 #include <machine/leds.h>
    126 #include <vax/vax/gencons.h>
    127 
    128 #ifdef DDB
    129 #include <machine/db_machdep.h>
    130 #include <ddb/db_sym.h>
    131 #include <ddb/db_extern.h>
    132 #endif
    133 
    134 #include "leds.h"
    135 #include "smg.h"
    136 #include "ksyms.h"
    137 
    138 #define DEV_LEDS	13	/* minor device 13 is leds */
    139 
    140 extern vaddr_t virtual_avail, virtual_end;
    141 extern paddr_t avail_end;
    142 
    143 /*
    144  * We do these external declarations here, maybe they should be done
    145  * somewhere else...
    146  */
    147 char		machine[] = MACHINE;		/* from <machine/param.h> */
    148 char		machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
    149 void *		msgbufaddr;
    150 int		*symtab_start;
    151 int		*symtab_end;
    152 int		symtab_nsyms;
    153 struct cpmbx	*cpmbx;		/* Console program mailbox address */
    154 
    155 /*
    156  * Vmem arena to manage I/O register space.  We allocate storage for
    157  * 32 regions in the map.
    158  */
    159 #define	IOMAP_BTAG_COUNT	VMEM_EST_BTCOUNT(1, 32)
    160 static struct vmem iomap_arena_store;
    161 static struct vmem_btag iomap_btag_store[IOMAP_BTAG_COUNT];
    162 static vmem_t *iomap_arena;
    163 
    164 struct vm_map *phys_map = NULL;
    165 
    166 #ifdef DEBUG
    167 int iospace_inited = 0;
    168 #endif
    169 
    170 void
    171 cpu_startup(void)
    172 {
    173 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
    174 	vaddr_t		minaddr, maxaddr;
    175 #endif
    176 	char pbuf[9];
    177 
    178 	/*
    179 	 * Initialize error message buffer.
    180 	 */
    181 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
    182 
    183 	/*
    184 	 * Good {morning,afternoon,evening,night}.
    185 	 * Also call CPU init on systems that need that.
    186 	 */
    187 	printf("%s%s", copyright, version);
    188 	printf("%s\n", cpu_getmodel());
    189         if (dep_call->cpu_conf)
    190                 (*dep_call->cpu_conf)();
    191 
    192 	format_bytes(pbuf, sizeof(pbuf), avail_end);
    193 	printf("total memory = %s\n", pbuf);
    194 	panicstr = NULL;
    195 	mtpr(AST_NO, PR_ASTLVL);
    196 	spl0();
    197 
    198 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
    199 	minaddr = 0;
    200 
    201 	/*
    202 	 * Allocate a submap for physio.  This map effectively limits the
    203 	 * number of processes doing physio at any one time.
    204 	 */
    205 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    206 				   VM_PHYS_SIZE, 0, false, NULL);
    207 #endif
    208 
    209 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
    210 	printf("avail memory = %s\n", pbuf);
    211 
    212 #ifdef DDB
    213 	if (boothowto & RB_KDB)
    214 		Debugger();
    215 #endif
    216 }
    217 
    218 uint32_t dumpmag = 0x8fca0101;
    219 int	dumpsize = 0;
    220 long	dumplo = 0;
    221 
    222 void
    223 cpu_dumpconf(void)
    224 {
    225 	int	nblks;
    226 
    227 	/*
    228 	 * XXX include the final RAM page which is not included in physmem.
    229 	 */
    230 	if (dumpdev == NODEV)
    231 		return;
    232 	nblks = bdev_size(dumpdev);
    233 	if (nblks > 0) {
    234 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
    235 			dumpsize = btoc(dbtob(nblks - dumplo));
    236 		else if (dumplo == 0)
    237 			dumplo = nblks - btodb(ctob(dumpsize));
    238 	}
    239 	/*
    240 	 * Don't dump on the first PAGE_SIZE (why PAGE_SIZE?) in case the dump
    241 	 * device includes a disk label.
    242 	 */
    243 	if (dumplo < btodb(PAGE_SIZE))
    244 		dumplo = btodb(PAGE_SIZE);
    245 
    246 	/*
    247 	 * If we have nothing to dump (XXX implement crash dumps),
    248 	 * make it clear for savecore that there is no dump.
    249 	 */
    250 	if (dumpsize <= 0)
    251 		dumplo = 0;
    252 }
    253 
    254 static int
    255 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
    256 {
    257 	struct sysctlnode node = *rnode;
    258 
    259 	if (booted_device == NULL)
    260 		return (EOPNOTSUPP);
    261 	node.sysctl_data = __UNCONST(device_xname(booted_device));
    262 	node.sysctl_size = strlen(device_xname(booted_device)) + 1;
    263 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    264 }
    265 
    266 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    267 {
    268 
    269 	sysctl_createv(clog, 0, NULL, NULL,
    270 		       CTLFLAG_PERMANENT,
    271 		       CTLTYPE_NODE, "machdep", NULL,
    272 		       NULL, 0, NULL, 0,
    273 		       CTL_MACHDEP, CTL_EOL);
    274 
    275 	sysctl_createv(clog, 0, NULL, NULL,
    276 		       CTLFLAG_PERMANENT,
    277 		       CTLTYPE_INT, "printfataltraps", NULL,
    278 		       NULL, 0, &cpu_printfataltraps, 0,
    279 		       CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
    280 	sysctl_createv(clog, 0, NULL, NULL,
    281 		       CTLFLAG_PERMANENT,
    282 		       CTLTYPE_STRUCT, "console_device", NULL,
    283 		       sysctl_consdev, 0, NULL, sizeof(dev_t),
    284 		       CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
    285 	sysctl_createv(clog, 0, NULL, NULL,
    286 		       CTLFLAG_PERMANENT,
    287 		       CTLTYPE_STRUCT, "booted_device", NULL,
    288 		       sysctl_machdep_booted_device, 0, NULL, 0,
    289 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
    290 	/*
    291 	 * I don't think CPU_BOOTED_KERNEL is available to the kernel.
    292 	 */
    293 }
    294 
    295 void
    296 setstatclockrate(int hzrate)
    297 {
    298 }
    299 
    300 void
    301 consinit(void)
    302 {
    303 	extern vaddr_t iospace;
    304 
    305 	/*
    306 	 * Init I/O memory vmem arena. Must be done before cninit()
    307 	 * is called; we may want to use iospace in the console routines.
    308 	 *
    309 	 * NOTE: We need to reserve the first vax-page of iospace
    310 	 * for the console routines.
    311 	 */
    312 	KASSERT(iospace != 0);
    313 	iomap_arena = vmem_init(&iomap_arena_store,
    314 				"iomap",		/* name */
    315 				0,			/* addr */
    316 				0,			/* size */
    317 				VAX_NBPG,		/* quantum */
    318 				NULL,			/* importfn */
    319 				NULL,			/* releasefn */
    320 				NULL,			/* source */
    321 				0,			/* qcache_max */
    322 				VM_NOSLEEP | VM_PRIVTAGS,
    323 				IPL_NONE);
    324 	KASSERT(iomap_arena != NULL);
    325 
    326 	vmem_add_bts(iomap_arena, iomap_btag_store, IOMAP_BTAG_COUNT);
    327 	int error = vmem_add(iomap_arena, iospace, IOSPSZ * VAX_NBPG,
    328 	    VM_NOSLEEP);
    329 	KASSERT(error == 0);
    330 #ifdef DEBUG
    331 	iospace_inited = 1;
    332 #endif
    333 	config_init();
    334 	cninit();
    335 #if NKSYMS || defined(DDB) || defined(MODULAR)
    336 	if (symtab_start != NULL && symtab_nsyms != 0 && symtab_end != NULL) {
    337 		ksyms_addsyms_elf(symtab_nsyms, symtab_start, symtab_end);
    338 	}
    339 #endif
    340 #ifdef DEBUG
    341 	if (sizeof(struct pcb) > REDZONEADDR)
    342 		panic("struct pcb inside red zone");
    343 #endif
    344 }
    345 
    346 int	waittime = -1;
    347 static	volatile int showto; /* Must be volatile to survive MM on -> MM off */
    348 
    349 void
    350 cpu_reboot(int howto, char *b)
    351 {
    352 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
    353 		waittime = 0;
    354 		vfs_shutdown();
    355 	}
    356 	splhigh();		/* extreme priority */
    357 	if (howto & RB_HALT) {
    358 		doshutdownhooks();
    359 		pmf_system_shutdown(boothowto);
    360 		if (dep_call->cpu_halt)
    361 			(*dep_call->cpu_halt) ();
    362 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
    363 		for (;;)
    364 			;
    365 	} else {
    366 		showto = howto;
    367 #ifdef notyet
    368 		/*
    369 		 * If we are provided with a bootstring, parse it and send
    370 		 * it to the boot program.
    371 		 */
    372 		if (b)
    373 			while (*b) {
    374 				showto |= (*b == 'a' ? RB_ASKBOOT : (*b == 'd' ?
    375 				    RB_DEBUG : (*b == 's' ? RB_SINGLE : 0)));
    376 				b++;
    377 			}
    378 #endif
    379 		/*
    380 		 * Now it's time to:
    381 		 *  0. Save some registers that are needed in new world.
    382 		 *  1. Change stack to somewhere that will survive MM off.
    383 		 * (RPB page is good page to save things in).
    384 		 *  2. Actually turn MM off.
    385 		 *  3. Dump away memory to disk, if asked.
    386 		 *  4. Reboot as asked.
    387 		 * The RPB page is _always_ first page in memory, we can
    388 		 * rely on that.
    389 		 */
    390 #ifdef notyet
    391 		__asm(	"\tmovl	%sp, (0x80000200)\n"
    392 			"\tmovl	0x80000200, %sp\n"
    393 			"\tmfpr	$0x10, -(%sp)\n"	/* PR_PCBB */
    394 			"\tmfpr	$0x11, -(%sp)\n"	/* PR_SCBB */
    395 			"\tmfpr	$0xc, -(%sp)\n"		/* PR_SBR */
    396 			"\tmfpr	$0xd, -(%sp)\n"		/* PR_SLR */
    397 			"\tmtpr	$0, $0x38\n"		/* PR_MAPEN */
    398 		);
    399 #endif
    400 
    401 		if (showto & RB_DUMP)
    402 			dumpsys();
    403 		if (dep_call->cpu_reboot)
    404 			(*dep_call->cpu_reboot)(showto);
    405 
    406 		/* cpus that don't handle reboots get the standard reboot. */
    407 		while ((mfpr(PR_TXCS) & GC_RDY) == 0)
    408 			;
    409 
    410 		mtpr(GC_CONS|GC_BTFL, PR_TXDB);
    411 	}
    412 	__asm("movl %0, %%r5":: "g" (showto)); /* How to boot */
    413 	__asm("movl %0, %%r11":: "r"(showto)); /* ??? */
    414 	__asm("halt");
    415 	panic("Halt sket sej");
    416 }
    417 
    418 void
    419 dumpsys(void)
    420 {
    421 	const struct bdevsw *bdev;
    422 
    423 	if (dumpdev == NODEV)
    424 		return;
    425 	bdev = bdevsw_lookup(dumpdev);
    426 	if (bdev == NULL)
    427 		return;
    428 	/*
    429 	 * For dumps during autoconfiguration, if dump device has already
    430 	 * configured...
    431 	 */
    432 	if (dumpsize == 0)
    433 		cpu_dumpconf();
    434 	if (dumplo <= 0) {
    435 		printf("\ndump to dev %u,%u not possible\n",
    436 		    major(dumpdev), minor(dumpdev));
    437 		return;
    438 	}
    439 	printf("\ndumping to dev %u,%u offset %ld\n",
    440 	    major(dumpdev), minor(dumpdev), dumplo);
    441 	printf("dump ");
    442 	switch ((*bdev->d_dump) (dumpdev, 0, 0, 0)) {
    443 
    444 	case ENXIO:
    445 		printf("device bad\n");
    446 		break;
    447 
    448 	case EFAULT:
    449 		printf("device not ready\n");
    450 		break;
    451 
    452 	case EINVAL:
    453 		printf("area improper\n");
    454 		break;
    455 
    456 	case EIO:
    457 		printf("i/o error\n");
    458 		break;
    459 
    460 	default:
    461 		printf("succeeded\n");
    462 		break;
    463 	}
    464 }
    465 
    466 int
    467 process_read_regs(struct lwp *l, struct reg *regs)
    468 {
    469 	struct trapframe * const tf = l->l_md.md_utf;
    470 
    471 	memcpy(&regs->r0, &tf->tf_r0, 12 * sizeof(int));
    472 	regs->ap = tf->tf_ap;
    473 	regs->fp = tf->tf_fp;
    474 	regs->sp = tf->tf_sp;
    475 	regs->pc = tf->tf_pc;
    476 	regs->psl = tf->tf_psl;
    477 	return 0;
    478 }
    479 
    480 int
    481 process_write_regs(struct lwp *l, const struct reg *regs)
    482 {
    483 	struct trapframe * const tf = l->l_md.md_utf;
    484 
    485 	memcpy(&tf->tf_r0, &regs->r0, 12 * sizeof(int));
    486 	tf->tf_ap = regs->ap;
    487 	tf->tf_fp = regs->fp;
    488 	tf->tf_sp = regs->sp;
    489 	tf->tf_pc = regs->pc;
    490 	tf->tf_psl = (regs->psl|PSL_U|PSL_PREVU) &
    491 	    ~(PSL_MBZ|PSL_IS|PSL_IPL1F|PSL_CM); /* Allow compat mode? */
    492 	return 0;
    493 }
    494 
    495 int
    496 process_set_pc(struct lwp *l, void *addr)
    497 {
    498 	l->l_md.md_utf->tf_pc = (uintptr_t) addr;
    499 
    500 	return (0);
    501 }
    502 
    503 int
    504 process_sstep(struct lwp *l, int sstep)
    505 {
    506 	struct trapframe * const tf = l->l_md.md_utf;
    507 
    508 	if (sstep)
    509 		tf->tf_psl |= PSL_T;
    510 	else
    511 		tf->tf_psl &= ~PSL_T;
    512 
    513 	return (0);
    514 }
    515 
    516 #undef PHYSMEMDEBUG
    517 /*
    518  * Allocates a virtual range suitable for mapping in physical memory.
    519  * This differs from the bus_space routines in that it allocates on
    520  * physical page sizes instead of logical sizes. This implementation
    521  * uses resource maps when allocating space, which is allocated from
    522  * the IOMAP submap. The implementation is similar to the uba resource
    523  * map handling. Size is given in pages.
    524  * If the page requested is bigger than a logical page, space is
    525  * allocated from the kernel map instead.
    526  *
    527  * It is known that the first page in the iospace area is unused; it may
    528  * be use by console device drivers (before the map system is inited).
    529  */
    530 vaddr_t
    531 vax_map_physmem(paddr_t phys, size_t size)
    532 {
    533 	vmem_addr_t addr;
    534 	int error;
    535 	static int warned = 0;
    536 
    537 	KDASSERT(iospace_inited);
    538 	if (size >= LTOHPN) {
    539 		addr = uvm_km_alloc(kernel_map, size * VAX_NBPG, 0,
    540 		    UVM_KMF_VAONLY);
    541 		if (addr == 0)
    542 			panic("vax_map_physmem: kernel map full");
    543 	} else {
    544 		error = vmem_alloc(iomap_arena, size * VAX_NBPG,
    545 		    VM_BESTFIT | VM_NOSLEEP, &addr);
    546 		if (error) {
    547 			if (warned++ == 0) /* Warn only once */
    548 				printf("vax_map_physmem: iomap too small");
    549 			return 0;
    550 		}
    551 	}
    552 	ioaccess(addr, phys, size);
    553 #ifdef PHYSMEMDEBUG
    554 	printf("vax_map_physmem: alloc'ed %d pages for paddr %lx, at %lx\n",
    555 	    size, phys, addr);
    556 #endif
    557 	return addr | (phys & VAX_PGOFSET);
    558 }
    559 
    560 /*
    561  * Unmaps the previous mapped (addr, size) pair.
    562  */
    563 void
    564 vax_unmap_physmem(vaddr_t addr, size_t size)
    565 {
    566 #ifdef PHYSMEMDEBUG
    567 	printf("vax_unmap_physmem: unmapping %zu pages at addr %lx\n",
    568 	    size, addr);
    569 #endif
    570 	addr &= ~VAX_PGOFSET;
    571 	iounaccess(addr, size);
    572 	if (size >= LTOHPN)
    573 		uvm_km_free(kernel_map, addr, size * VAX_NBPG, UVM_KMF_VAONLY);
    574 	else
    575 		vmem_free(iomap_arena, addr, size * VAX_NBPG);
    576 }
    577 
    578 #define	SOFTINT_IPLS	((IPL_SOFTCLOCK << (SOFTINT_CLOCK * 5))		\
    579 			 | (IPL_SOFTBIO << (SOFTINT_BIO * 5))		\
    580 			 | (IPL_SOFTNET << (SOFTINT_NET * 5))		\
    581 			 | (IPL_SOFTSERIAL << (SOFTINT_SERIAL * 5)))
    582 
    583 void
    584 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
    585 {
    586 	const int ipl = (SOFTINT_IPLS >> (5 * level)) & 0x1F;
    587 	l->l_cpu->ci_softlwps[level] = l;
    588 
    589 	*machdep = ipl;
    590 }
    591 
    592 #include <dev/bi/bivar.h>
    593 /*
    594  * This should be somewhere else.
    595  */
    596 void
    597 bi_intr_establish(void *icookie, int vec, void (*func)(void *), void *arg,
    598 	struct evcnt *ev)
    599 {
    600 	scb_vecalloc(vec, func, arg, SCB_ISTACK, ev);
    601 }
    602 
    603 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    604 /*
    605  * Called from locore.
    606  */
    607 void	krnlock(void);
    608 void	krnunlock(void);
    609 
    610 void
    611 krnlock(void)
    612 {
    613 	KERNEL_LOCK(1, NULL);
    614 }
    615 
    616 void
    617 krnunlock(void)
    618 {
    619 	KERNEL_UNLOCK_ONE(NULL);
    620 }
    621 #endif
    622 
    623 void
    624 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
    625 {
    626 	const struct trapframe * const tf = l->l_md.md_utf;
    627 	__greg_t *gr = mcp->__gregs;
    628 
    629 	gr[_REG_R0] = tf->tf_r0;
    630 	gr[_REG_R1] = tf->tf_r1;
    631 	gr[_REG_R2] = tf->tf_r2;
    632 	gr[_REG_R3] = tf->tf_r3;
    633 	gr[_REG_R4] = tf->tf_r4;
    634 	gr[_REG_R5] = tf->tf_r5;
    635 	gr[_REG_R6] = tf->tf_r6;
    636 	gr[_REG_R7] = tf->tf_r7;
    637 	gr[_REG_R8] = tf->tf_r8;
    638 	gr[_REG_R9] = tf->tf_r9;
    639 	gr[_REG_R10] = tf->tf_r10;
    640 	gr[_REG_R11] = tf->tf_r11;
    641 	gr[_REG_AP] = tf->tf_ap;
    642 	gr[_REG_FP] = tf->tf_fp;
    643 	gr[_REG_SP] = tf->tf_sp;
    644 	gr[_REG_PC] = tf->tf_pc;
    645 	gr[_REG_PSL] = tf->tf_psl;
    646 	*flags |= _UC_CPU;
    647 }
    648 
    649 int
    650 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
    651 {
    652 	const __greg_t *gr = mcp->__gregs;
    653 
    654 	if ((gr[_REG_PSL] & (PSL_IPL | PSL_IS)) ||
    655 	    ((gr[_REG_PSL] & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) ||
    656 	    (gr[_REG_PSL] & PSL_CM))
    657 		return EINVAL;
    658 
    659 	return 0;
    660 }
    661 
    662 int
    663 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
    664 {
    665 	struct trapframe * const tf = l->l_md.md_utf;
    666 	const __greg_t *gr = mcp->__gregs;
    667 	int error;
    668 
    669 	if ((flags & _UC_CPU) == 0)
    670 		return 0;
    671 
    672 	error = cpu_mcontext_validate(l, mcp);
    673 	if (error)
    674 		return error;
    675 
    676 	tf->tf_r0 = gr[_REG_R0];
    677 	tf->tf_r1 = gr[_REG_R1];
    678 	tf->tf_r2 = gr[_REG_R2];
    679 	tf->tf_r3 = gr[_REG_R3];
    680 	tf->tf_r4 = gr[_REG_R4];
    681 	tf->tf_r5 = gr[_REG_R5];
    682 	tf->tf_r6 = gr[_REG_R6];
    683 	tf->tf_r7 = gr[_REG_R7];
    684 	tf->tf_r8 = gr[_REG_R8];
    685 	tf->tf_r9 = gr[_REG_R9];
    686 	tf->tf_r10 = gr[_REG_R10];
    687 	tf->tf_r11 = gr[_REG_R11];
    688 	tf->tf_ap = gr[_REG_AP];
    689 	tf->tf_fp = gr[_REG_FP];
    690 	tf->tf_sp = gr[_REG_SP];
    691 	tf->tf_pc = gr[_REG_PC];
    692 	tf->tf_psl = gr[_REG_PSL];
    693 
    694 	if (flags & _UC_TLSBASE) {
    695 		void *tlsbase;
    696 
    697 		error = copyin((void *)tf->tf_sp, &tlsbase, sizeof(tlsbase));
    698 		if (error) {
    699 			return error;
    700 		}
    701 		lwp_setprivate(l, tlsbase);
    702 		tf->tf_sp += sizeof(tlsbase);
    703 	}
    704 
    705 	mutex_enter(l->l_proc->p_lock);
    706 	if (flags & _UC_SETSTACK)
    707 		l->l_sigstk.ss_flags |= SS_ONSTACK;
    708 	if (flags & _UC_CLRSTACK)
    709 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
    710 	mutex_exit(l->l_proc->p_lock);
    711 
    712 	return 0;
    713 }
    714 
    715 /*
    716  * Generic routines for machines with "console program mailbox".
    717  */
    718 void
    719 generic_halt(void)
    720 {
    721 	if (cpmbx == NULL)  /* Too late to complain here, but avoid panic */
    722 		__asm("halt");
    723 
    724 	if (cpmbx->user_halt != UHALT_DEFAULT) {
    725 		if (cpmbx->mbox_halt != 0)
    726 			cpmbx->mbox_halt = 0;   /* let console override */
    727 	} else if (cpmbx->mbox_halt != MHALT_HALT)
    728 		cpmbx->mbox_halt = MHALT_HALT;  /* the os decides */
    729 
    730 	__asm("halt");
    731 }
    732 
    733 void
    734 generic_reboot(int arg)
    735 {
    736 	if (cpmbx == NULL)  /* Too late to complain here, but avoid panic */
    737 		__asm("halt");
    738 
    739 	if (cpmbx->user_halt != UHALT_DEFAULT) {
    740 		if (cpmbx->mbox_halt != 0)
    741 			cpmbx->mbox_halt = 0;
    742 	} else if (cpmbx->mbox_halt != MHALT_REBOOT)
    743 		cpmbx->mbox_halt = MHALT_REBOOT;
    744 
    745 	__asm("halt");
    746 }
    747 
    748 bool
    749 mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
    750 {
    751 
    752 	*vaddr = paddr + KERNBASE;
    753 	return true;
    754 }
    755 
    756 int
    757 mm_md_physacc(paddr_t pa, vm_prot_t prot)
    758 {
    759 
    760 	return (pa < avail_end) ? 0 : EFAULT;
    761 }
    762 
    763 int
    764 mm_md_readwrite(dev_t dev, struct uio *uio)
    765 {
    766 
    767 	switch (minor(dev)) {
    768 #if NLEDS
    769 	case DEV_LEDS:
    770 		return leds_uio(uio);
    771 #endif
    772 	default:
    773 		return ENXIO;
    774 	}
    775 }
    776 
    777 /*
    778  * Set max virtual size a process may allocate.
    779  * This could be tuned based on amount of physical memory.
    780  */
    781 void
    782 machdep_init(void)
    783 {
    784 	proc0.p_rlimit[RLIMIT_AS].rlim_cur = MAXDSIZ;
    785 	proc0.p_rlimit[RLIMIT_AS].rlim_max = MAXDSIZ;
    786 }
    787