Home | History | Annotate | Line # | Download | only in powerpc
      1 /*	$NetBSD: powerpc_machdep.c,v 1.87 2026/01/09 22:54:34 jmcneill Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      5  * Copyright (C) 1995, 1996 TooLs GmbH.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by TooLs GmbH.
     19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: powerpc_machdep.c,v 1.87 2026/01/09 22:54:34 jmcneill Exp $");
     36 
     37 #ifdef _KERNEL_OPT
     38 #include "opt_altivec.h"
     39 #include "opt_ddb.h"
     40 #include "opt_modular.h"
     41 #include "opt_multiprocessor.h"
     42 #include "opt_ppcarch.h"
     43 #include "opt_ppcopts.h"
     44 #endif
     45 
     46 #include <sys/param.h>
     47 #include <sys/conf.h>
     48 #include <sys/disklabel.h>
     49 #include <sys/exec.h>
     50 #include <sys/kauth.h>
     51 #include <sys/pool.h>
     52 #include <sys/proc.h>
     53 #include <sys/signal.h>
     54 #include <sys/sysctl.h>
     55 #include <sys/ucontext.h>
     56 #include <sys/cpu.h>
     57 #include <sys/module.h>
     58 #include <sys/device.h>
     59 #include <sys/pcu.h>
     60 #include <sys/atomic.h>
     61 #include <sys/kmem.h>
     62 #include <sys/xcall.h>
     63 #include <sys/ipi.h>
     64 
     65 #include <dev/mm.h>
     66 
     67 #include <powerpc/fpu.h>
     68 #include <powerpc/pcb.h>
     69 #include <powerpc/psl.h>
     70 #include <powerpc/userret.h>
     71 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
     72 #include <powerpc/altivec.h>
     73 #endif
     74 
     75 #ifdef MULTIPROCESSOR
     76 #include <powerpc/pic/ipivar.h>
     77 #include <machine/cpu_counter.h>
     78 #endif
     79 
     80 #ifdef DDB
     81 #include <machine/db_machdep.h>
     82 #include <ddb/db_output.h>
     83 #endif
     84 
     85 int cpu_timebase;
     86 int cpu_printfataltraps = 1;
     87 #if !defined(PPC_IBM4XX)
     88 extern int powersave;
     89 #endif
     90 
     91 /* exported variable to be filled in by the bootloaders */
     92 char *booted_kernel;
     93 
     94 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
     95 	[PCU_FPU] = &fpu_ops,
     96 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
     97 	[PCU_VEC] = &vec_ops,
     98 #endif
     99 };
    100 
    101 #ifdef MULTIPROCESSOR
    102 struct cpuset_info cpuset_info;
    103 #endif
    104 
    105 /*
    106  * Set set up registers on exec.
    107  */
    108 void
    109 setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
    110 {
    111 	struct proc * const p = l->l_proc;
    112 	struct trapframe * const tf = l->l_md.md_utf;
    113 	struct pcb * const pcb = lwp_getpcb(l);
    114 	struct ps_strings arginfo;
    115 	vaddr_t func = epp->ep_entry;
    116 
    117 	memset(tf, 0, sizeof *tf);
    118 	tf->tf_fixreg[1] = -roundup(-stack + 8, 16);
    119 
    120 	/*
    121 	 * XXX Machine-independent code has already copied arguments and
    122 	 * XXX environment to userland.  Get them back here.
    123 	 */
    124 	(void)copyin_psstrings(p, &arginfo);
    125 
    126 	/*
    127 	 * Set up arguments for _start():
    128 	 *	_start(argc, argv, envp, obj, cleanup, ps_strings);
    129 	 *
    130 	 * Notes:
    131 	 *	- obj and cleanup are the auxiliary and termination
    132 	 *	  vectors.  They are fixed up by ld.elf_so.
    133 	 *	- ps_strings is a NetBSD extension, and will be
    134 	 * 	  ignored by executables which are strictly
    135 	 *	  compliant with the SVR4 ABI.
    136 	 *
    137 	 * XXX We have to set both regs and retval here due to different
    138 	 * XXX calling convention in trap.c and init_main.c.
    139 	 */
    140 	tf->tf_fixreg[3] = arginfo.ps_nargvstr;
    141 	tf->tf_fixreg[4] = (register_t)arginfo.ps_argvstr;
    142 	tf->tf_fixreg[5] = (register_t)arginfo.ps_envstr;
    143 	tf->tf_fixreg[6] = 0;			/* auxiliary vector */
    144 	tf->tf_fixreg[7] = 0;			/* termination vector */
    145 	tf->tf_fixreg[8] = p->p_psstrp;		/* NetBSD extension */
    146 
    147 #ifdef _LP64
    148 	/*
    149 	 * For native ELF64, entry point to the function
    150 	 * descriptor which contains the real function address
    151 	 * and its TOC base address.
    152 	 */
    153 	uintptr_t fdesc[3] = { [0] = func, [1] = 0, [2] = 0 };
    154 	copyin((void *)func, fdesc, sizeof(fdesc));
    155 	tf->tf_fixreg[2] = fdesc[1] + epp->ep_entryoffset;
    156 	func = fdesc[0] + epp->ep_entryoffset;
    157 #endif
    158 	tf->tf_srr0 = func;
    159 	tf->tf_srr1 = PSL_MBO | PSL_USERSET;
    160 #ifdef ALTIVEC
    161 	tf->tf_vrsave = 0;
    162 #endif
    163 	pcb->pcb_flags = PSL_FE_DFLT;
    164 
    165 #if defined(PPC_BOOKE) || defined(PPC_IBM4XX)
    166 	p->p_md.md_ss_addr[0] = p->p_md.md_ss_addr[1] = 0;
    167 	p->p_md.md_ss_insn[0] = p->p_md.md_ss_insn[1] = 0;
    168 #endif
    169 }
    170 
    171 /*
    172  * Machine dependent system variables.
    173  */
    174 static int
    175 sysctl_machdep_cacheinfo(SYSCTLFN_ARGS)
    176 {
    177 	struct sysctlnode node = *rnode;
    178 
    179 	node.sysctl_data = &curcpu()->ci_ci;
    180 	node.sysctl_size = sizeof(curcpu()->ci_ci);
    181 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    182 }
    183 
    184 #if !defined (PPC_IBM4XX)
    185 static int
    186 sysctl_machdep_powersave(SYSCTLFN_ARGS)
    187 {
    188 	struct sysctlnode node = *rnode;
    189 
    190 	if (powersave < 0)
    191 		node.sysctl_flags &= ~CTLFLAG_READWRITE;
    192 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    193 }
    194 #endif
    195 
    196 static int
    197 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
    198 {
    199 	struct sysctlnode node;
    200 
    201 	if (booted_device == NULL)
    202 		return (EOPNOTSUPP);
    203 
    204 	const char * const xname = device_xname(booted_device);
    205 
    206 	node = *rnode;
    207 	node.sysctl_data = __UNCONST(xname);
    208 	node.sysctl_size = strlen(xname) + 1;
    209 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    210 }
    211 
    212 static int
    213 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
    214 {
    215 	struct sysctlnode node;
    216 
    217 	if (booted_kernel == NULL || booted_kernel[0] == '\0')
    218 		return (EOPNOTSUPP);
    219 
    220 	node = *rnode;
    221 	node.sysctl_data = booted_kernel;
    222 	node.sysctl_size = strlen(booted_kernel) + 1;
    223 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
    224 }
    225 
    226 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
    227 {
    228 
    229 	sysctl_createv(clog, 0, NULL, NULL,
    230 		       CTLFLAG_PERMANENT,
    231 		       CTLTYPE_NODE, "machdep", NULL,
    232 		       NULL, 0, NULL, 0,
    233 		       CTL_MACHDEP, CTL_EOL);
    234 
    235 	/* Deprecated */
    236 	sysctl_createv(clog, 0, NULL, NULL,
    237 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    238 		       CTLTYPE_INT, "cachelinesize", NULL,
    239 		       NULL, curcpu()->ci_ci.dcache_line_size, NULL, 0,
    240 		       CTL_MACHDEP, CPU_CACHELINE, CTL_EOL);
    241 	sysctl_createv(clog, 0, NULL, NULL,
    242 		       CTLFLAG_PERMANENT,
    243 		       CTLTYPE_INT, "timebase", NULL,
    244 		       NULL, 0, &cpu_timebase, 0,
    245 		       CTL_MACHDEP, CPU_TIMEBASE, CTL_EOL);
    246 	sysctl_createv(clog, 0, NULL, NULL,
    247 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    248 		       CTLTYPE_INT, "printfataltraps", NULL,
    249 		       NULL, 0, &cpu_printfataltraps, 0,
    250 		       CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
    251 	/* Use this instead of CPU_CACHELINE */
    252 	sysctl_createv(clog, 0, NULL, NULL,
    253 		       CTLFLAG_PERMANENT,
    254 		       CTLTYPE_STRUCT, "cacheinfo", NULL,
    255 		       sysctl_machdep_cacheinfo, 0, NULL, 0,
    256 		       CTL_MACHDEP, CPU_CACHEINFO, CTL_EOL);
    257 #if !defined (PPC_IBM4XX)
    258 	sysctl_createv(clog, 0, NULL, NULL,
    259 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    260 		       CTLTYPE_INT, "powersave", NULL,
    261 		       sysctl_machdep_powersave, 0, &powersave, 0,
    262 		       CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
    263 #endif
    264 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
    265 	sysctl_createv(clog, 0, NULL, NULL,
    266 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    267 		       CTLTYPE_INT, "altivec", NULL,
    268 		       NULL, 0, NULL, 0,
    269 		       CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
    270 #else
    271 	sysctl_createv(clog, 0, NULL, NULL,
    272 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    273 		       CTLTYPE_INT, "altivec", NULL,
    274 		       NULL, cpu_altivec, NULL, 0,
    275 		       CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
    276 #endif
    277 #ifdef PPC_BOOKE
    278 	sysctl_createv(clog, 0, NULL, NULL,
    279 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    280 		       CTLTYPE_INT, "execprot", NULL,
    281 		       NULL, 1, NULL, 0,
    282 		       CTL_MACHDEP, CPU_EXECPROT, CTL_EOL);
    283 #endif
    284 	sysctl_createv(clog, 0, NULL, NULL,
    285 		       CTLFLAG_PERMANENT,
    286 		       CTLTYPE_STRING, "booted_device", NULL,
    287 		       sysctl_machdep_booted_device, 0, NULL, 0,
    288 		       CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
    289 	sysctl_createv(clog, 0, NULL, NULL,
    290 		       CTLFLAG_PERMANENT,
    291 		       CTLTYPE_STRING, "booted_kernel", NULL,
    292 		       sysctl_machdep_booted_kernel, 0, NULL, 0,
    293 		       CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
    294 	sysctl_createv(clog, 0, NULL, NULL,
    295 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    296 		       CTLTYPE_INT, "fpu_present", NULL,
    297 		       NULL,
    298 #if defined(PPC_HAVE_FPU)
    299 		       1,
    300 #else
    301 		       0,
    302 #endif
    303 		       NULL, 0,
    304 		       CTL_MACHDEP, CPU_FPU, CTL_EOL);
    305 	sysctl_createv(clog, 0, NULL, NULL,
    306 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    307 		       CTLTYPE_INT, "no_unaligned", NULL,
    308 		       NULL,
    309 #if defined(PPC_NO_UNALIGNED)
    310 		       1,
    311 #else
    312 		       0,
    313 #endif
    314 		       NULL, 0,
    315 		       CTL_MACHDEP, CPU_NO_UNALIGNED, CTL_EOL);
    316 	sysctl_createv(clog, 0, NULL, NULL,
    317 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
    318 		       CTLTYPE_INT, "pvr", NULL,
    319 		       NULL,
    320 		       mfpvr(),
    321 		       NULL, 0,
    322 		       CTL_MACHDEP, CPU_PVR, CTL_EOL);
    323 }
    324 
    325 /*
    326  * Crash dump handling.
    327  */
    328 u_int32_t dumpmag = 0x8fca0101;		/* magic number */
    329 int dumpsize = 0;			/* size of dump in pages */
    330 long dumplo = -1;			/* blocks */
    331 
    332 /*
    333  * This is called by main to set dumplo and dumpsize.
    334  */
    335 void
    336 cpu_dumpconf(void)
    337 {
    338 	int nblks;		/* size of dump device */
    339 	int skip;
    340 
    341 	if (dumpdev == NODEV)
    342 		return;
    343 	nblks = bdev_size(dumpdev);
    344 	if (nblks <= ctod(1))
    345 		return;
    346 
    347 	dumpsize = physmem;
    348 
    349 	/* Skip enough blocks at start of disk to preserve an eventual disklabel. */
    350 	skip = LABELSECTOR + 1;
    351 	skip += ctod(1) - 1;
    352 	skip = ctod(dtoc(skip));
    353 	if (dumplo < skip)
    354 		dumplo = skip;
    355 
    356 	/* Put dump at end of partition */
    357 	if (dumpsize > dtoc(nblks - dumplo))
    358 		dumpsize = dtoc(nblks - dumplo);
    359 	if (dumplo < nblks - ctod(dumpsize))
    360 		dumplo = nblks - ctod(dumpsize);
    361 }
    362 
    363 /*
    364  * Start a new LWP
    365  */
    366 void
    367 startlwp(void *arg)
    368 {
    369 	ucontext_t * const uc = arg;
    370 	lwp_t * const l = curlwp;
    371 	struct trapframe * const tf = l->l_md.md_utf;
    372 	int error __diagused;
    373 
    374 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
    375 	KASSERT(error == 0);
    376 
    377 	kmem_free(uc, sizeof(ucontext_t));
    378 	userret(l, tf);
    379 }
    380 
    381 /*
    382  * Process the tail end of a posix_spawn() for the child.
    383  */
    384 void
    385 cpu_spawn_return(struct lwp *l)
    386 {
    387 	struct trapframe * const tf = l->l_md.md_utf;
    388 
    389 	userret(l, tf);
    390 }
    391 
    392 bool
    393 cpu_intr_p(void)
    394 {
    395 
    396 	return curcpu()->ci_idepth >= 0;
    397 }
    398 
    399 void
    400 cpu_idle(void)
    401 {
    402 	KASSERT(mfmsr() & PSL_EE);
    403 	KASSERTMSG(curcpu()->ci_cpl == IPL_NONE,
    404 	    "ci_cpl = %d", curcpu()->ci_cpl);
    405 	(*curcpu()->ci_idlespin)();
    406 }
    407 
    408 void
    409 cpu_ast(struct lwp *l, struct cpu_info *ci)
    410 {
    411 	l->l_md.md_astpending = 0;	/* we are about to do it */
    412 	if (l->l_pflag & LP_OWEUPC) {
    413 		l->l_pflag &= ~LP_OWEUPC;
    414 		ADDUPROF(l);
    415 	}
    416 }
    417 
    418 void
    419 cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
    420 {
    421 	KASSERT(kpreempt_disabled());
    422 
    423 #ifdef __HAVE_PREEMPTION
    424 	if ((flags & RESCHED_KPREEMPT) != 0) {
    425 		if ((flags & RESCHED_REMOTE) != 0) {
    426 			cpu_send_ipi(cpu_index(ci), IPI_KPREEMPT);
    427 		} else {
    428 			softint_trigger(SOFTINT_KPREEMPT);
    429 		}
    430 		return;
    431 	}
    432 #endif
    433 	if ((flags & RESCHED_REMOTE) != 0) {
    434 #if defined(MULTIPROCESSOR)
    435 		cpu_send_ipi(cpu_index(ci), IPI_AST);
    436 #endif
    437 	} else {
    438 		l->l_md.md_astpending = 1;	/* force call to cpu_ast() */
    439 	}
    440 }
    441 
    442 void
    443 cpu_need_proftick(lwp_t *l)
    444 {
    445 	l->l_pflag |= LP_OWEUPC;
    446 	l->l_md.md_astpending = 1;
    447 }
    448 
    449 void
    450 cpu_signotify(lwp_t *l)
    451 {
    452 	if (l->l_cpu != curcpu()) {
    453 #if defined(MULTIPROCESSOR)
    454 		cpu_send_ipi(cpu_index(l->l_cpu), IPI_AST);
    455 #endif
    456 	} else {
    457 		l->l_md.md_astpending = 1;
    458 	}
    459 }
    460 
    461 vaddr_t
    462 cpu_lwp_pc(lwp_t *l)
    463 {
    464 	return l->l_md.md_utf->tf_srr0;
    465 }
    466 
    467 bool
    468 cpu_clkf_usermode(const struct clockframe *cf)
    469 {
    470 	return (cf->cf_srr1 & PSL_PR) != 0;
    471 }
    472 
    473 vaddr_t
    474 cpu_clkf_pc(const struct clockframe *cf)
    475 {
    476 	return cf->cf_srr0;
    477 }
    478 
    479 bool
    480 cpu_clkf_intr(const struct clockframe *cf)
    481 {
    482 	return cf->cf_idepth > 0;
    483 }
    484 
    485 #ifdef MULTIPROCESSOR
    486 /*
    487  * MD support for xcall(9) interface.
    488  */
    489 
    490 void
    491 xc_send_ipi(struct cpu_info *ci)
    492 {
    493 	KASSERT(kpreempt_disabled());
    494 	KASSERT(curcpu() != ci);
    495 
    496 	cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
    497 
    498 	/* Unicast: remote CPU. */
    499 	/* Broadcast: all, but local CPU (caller will handle it). */
    500 	cpu_send_ipi(target, IPI_XCALL);
    501 }
    502 
    503 void
    504 cpu_ipi(struct cpu_info *ci)
    505 {
    506 	KASSERT(kpreempt_disabled());
    507 	KASSERT(curcpu() != ci);
    508 
    509 	cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
    510 
    511 	/* Unicast: remote CPU. */
    512 	/* Broadcast: all, but local CPU (caller will handle it). */
    513 	cpu_send_ipi(target, IPI_GENERIC);
    514 }
    515 
    516 /* XXX kcpuset_create(9), kcpuset_clone(9) couldn't use interrupt context */
    517 typedef uint32_t __cpuset_t;
    518 CTASSERT(MAXCPUS <= 32);
    519 
    520 #define	CPUSET_SINGLE(cpu)		((__cpuset_t)1 << (cpu))
    521 
    522 #define	CPUSET_ADD(set, cpu)		atomic_or_32(&(set), CPUSET_SINGLE(cpu))
    523 #define	CPUSET_DEL(set, cpu)		atomic_and_32(&(set), ~CPUSET_SINGLE(cpu))
    524 #define	CPUSET_SUB(set1, set2)		atomic_and_32(&(set1), ~(set2))
    525 
    526 #define	CPUSET_EXCEPT(set, cpu)		((set) & ~CPUSET_SINGLE(cpu))
    527 
    528 #define	CPUSET_HAS_P(set, cpu)		((set) & CPUSET_SINGLE(cpu))
    529 #define	CPUSET_NEXT(set)		(ffs(set) - 1)
    530 
    531 #define	CPUSET_EMPTY_P(set)		((set) == (__cpuset_t)0)
    532 #define	CPUSET_EQUAL_P(set1, set2)	((set1) == (set2))
    533 #define	CPUSET_CLEAR(set)		((set) = (__cpuset_t)0)
    534 #define	CPUSET_ASSIGN(set1, set2)	((set1) = (set2))
    535 
    536 #define	CPUSET_EXPORT(kset, set)	kcpuset_export_u32((kset), &(set), sizeof(set))
    537 
    538 /*
    539  * Send an inter-processor interrupt to CPUs in cpuset (excludes curcpu())
    540  */
    541 static void
    542 cpu_multicast_ipi(__cpuset_t cpuset, uint32_t msg)
    543 {
    544 	CPU_INFO_ITERATOR cii;
    545 	struct cpu_info *ci;
    546 
    547 	CPUSET_DEL(cpuset, cpu_index(curcpu()));
    548 	if (CPUSET_EMPTY_P(cpuset))
    549 		return;
    550 
    551 	for (CPU_INFO_FOREACH(cii, ci)) {
    552 		const int index = cpu_index(ci);
    553 		if (CPUSET_HAS_P(cpuset, index)) {
    554 			CPUSET_DEL(cpuset, index);
    555 			cpu_send_ipi(index, msg);
    556 		}
    557 	}
    558 }
    559 
    560 static void
    561 cpu_ipi_error(const char *s, kcpuset_t *succeeded, __cpuset_t expected)
    562 {
    563 	__cpuset_t cpuset;
    564 
    565 	CPUSET_EXPORT(succeeded, cpuset);
    566 	CPUSET_SUB(expected, cpuset);
    567 	if (!CPUSET_EMPTY_P(expected)) {
    568 		printf("Failed to %s:", s);
    569 		do {
    570 			const int index = CPUSET_NEXT(expected);
    571 			CPUSET_DEL(expected, index);
    572 			printf(" cpu%d", index);
    573 		} while (!CPUSET_EMPTY_P(expected));
    574 		printf("\n");
    575 	}
    576 }
    577 
    578 static int
    579 cpu_ipi_wait(kcpuset_t *watchset, __cpuset_t mask)
    580 {
    581 	uint64_t tmout = curcpu()->ci_data.cpu_cc_freq; /* some finite amount of time */
    582 	__cpuset_t cpuset;
    583 
    584 	while (tmout--) {
    585 		CPUSET_EXPORT(watchset, cpuset);
    586 		if (cpuset == mask)
    587 			return 0;		/* success */
    588 	}
    589 	return 1;				/* timed out */
    590 }
    591 
    592 /*
    593  * Halt this cpu.
    594  */
    595 void
    596 cpu_halt(void)
    597 {
    598 	struct cpuset_info * const csi = &cpuset_info;
    599 	const cpuid_t index = cpu_index(curcpu());
    600 
    601 	printf("cpu%ld: shutting down\n", index);
    602 	kcpuset_set(csi->cpus_halted, index);
    603 	spl0();			/* allow interrupts e.g. further ipi ? */
    604 
    605 	/* spin */
    606 	for (;;)
    607 		continue;
    608 	/*NOTREACHED*/
    609 }
    610 
    611 /*
    612  * Halt all running cpus, excluding current cpu.
    613  */
    614 void
    615 cpu_halt_others(void)
    616 {
    617 	struct cpuset_info * const csi = &cpuset_info;
    618 	const cpuid_t index = cpu_index(curcpu());
    619 	__cpuset_t cpumask, cpuset, halted;
    620 
    621 	KASSERT(kpreempt_disabled());
    622 
    623 	CPUSET_EXPORT(csi->cpus_running, cpuset);
    624 	CPUSET_DEL(cpuset, index);
    625 	CPUSET_ASSIGN(cpumask, cpuset);
    626 	CPUSET_EXPORT(csi->cpus_halted, halted);
    627 	CPUSET_SUB(cpuset, halted);
    628 
    629 	if (CPUSET_EMPTY_P(cpuset))
    630 		return;
    631 
    632 	cpu_multicast_ipi(cpuset, IPI_HALT);
    633 	if (cpu_ipi_wait(csi->cpus_halted, cpumask))
    634 		cpu_ipi_error("halt", csi->cpus_halted, cpumask);
    635 
    636 	/*
    637 	 * TBD
    638 	 * Depending on available firmware methods, other cpus will
    639 	 * either shut down themselves, or spin and wait for us to
    640 	 * stop them.
    641 	 */
    642 }
    643 
    644 /*
    645  * Pause this cpu.
    646  */
    647 void
    648 cpu_pause(struct trapframe *tf)
    649 {
    650 	volatile struct cpuset_info * const csi = &cpuset_info;
    651 	int s = splhigh();
    652 	const cpuid_t index = cpu_index(curcpu());
    653 
    654 	for (;;) {
    655 		kcpuset_set(csi->cpus_paused, index);
    656 		while (kcpuset_isset(csi->cpus_paused, index))
    657 			docritpollhooks();
    658 		kcpuset_set(csi->cpus_resumed, index);
    659 #ifdef DDB
    660 		if (ddb_running_on_this_cpu_p())
    661 			cpu_Debugger();
    662 		if (ddb_running_on_any_cpu_p())
    663 			continue;
    664 #endif	/* DDB */
    665 		break;
    666 	}
    667 
    668 	splx(s);
    669 }
    670 
    671 /*
    672  * Pause all running cpus, excluding current cpu.
    673  */
    674 void
    675 cpu_pause_others(void)
    676 {
    677 	struct cpuset_info * const csi = &cpuset_info;
    678 	const cpuid_t index = cpu_index(curcpu());
    679 	__cpuset_t cpuset;
    680 
    681 	KASSERT(kpreempt_disabled());
    682 
    683 	CPUSET_EXPORT(csi->cpus_running, cpuset);
    684 	CPUSET_DEL(cpuset, index);
    685 
    686 	if (CPUSET_EMPTY_P(cpuset))
    687 		return;
    688 
    689 	cpu_multicast_ipi(cpuset, IPI_SUSPEND);
    690 	if (cpu_ipi_wait(csi->cpus_paused, cpuset))
    691 		cpu_ipi_error("pause", csi->cpus_paused, cpuset);
    692 }
    693 
    694 /*
    695  * Resume a single cpu.
    696  */
    697 void
    698 cpu_resume(cpuid_t index)
    699 {
    700 	struct cpuset_info * const csi = &cpuset_info;
    701 	__cpuset_t cpuset = CPUSET_SINGLE(index);
    702 
    703 	kcpuset_zero(csi->cpus_resumed);
    704 	kcpuset_clear(csi->cpus_paused, index);
    705 
    706 	if (cpu_ipi_wait(csi->cpus_paused, cpuset))
    707 		cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
    708 }
    709 
    710 /*
    711  * Resume all paused cpus.
    712  */
    713 void
    714 cpu_resume_others(void)
    715 {
    716 	struct cpuset_info * const csi = &cpuset_info;
    717 	__cpuset_t cpuset;
    718 
    719 	kcpuset_zero(csi->cpus_resumed);
    720 	CPUSET_EXPORT(csi->cpus_paused, cpuset);
    721 	kcpuset_zero(csi->cpus_paused);
    722 
    723 	if (cpu_ipi_wait(csi->cpus_resumed, cpuset))
    724 		cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
    725 }
    726 
    727 int
    728 cpu_is_paused(int index)
    729 {
    730 	struct cpuset_info * const csi = &cpuset_info;
    731 
    732 	return kcpuset_isset(csi->cpus_paused, index);
    733 }
    734 
    735 #ifdef DDB
    736 void
    737 cpu_debug_dump(void)
    738 {
    739 	struct cpuset_info * const csi = &cpuset_info;
    740 	CPU_INFO_ITERATOR cii;
    741 	struct cpu_info *ci;
    742 	char running, hatched, paused, resumed, halted;
    743 
    744 #ifdef _LP64
    745 	db_printf("CPU CPUID STATE CPUINFO          CPL INT MTX IPIS\n");
    746 #else
    747 	db_printf("CPU CPUID STATE CPUINFO  CPL INT MTX IPIS\n");
    748 #endif
    749 	for (CPU_INFO_FOREACH(cii, ci)) {
    750 		const cpuid_t index = cpu_index(ci);
    751 		hatched = (kcpuset_isset(csi->cpus_hatched, index) ? 'H' : '-');
    752 		running = (kcpuset_isset(csi->cpus_running, index) ? 'R' : '-');
    753 		paused  = (kcpuset_isset(csi->cpus_paused,  index) ? 'P' : '-');
    754 		resumed = (kcpuset_isset(csi->cpus_resumed, index) ? 'r' : '-');
    755 		halted  = (kcpuset_isset(csi->cpus_halted,  index) ? 'h' : '-');
    756 		db_printf("%3ld 0x%03x %c%c%c%c%c %p %3d %3d %3d 0x%08x\n",
    757 		    index, ci->ci_cpuid,
    758 		    running, hatched, paused, resumed, halted,
    759 		    ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
    760 		    ci->ci_pending_ipis);
    761 	}
    762 }
    763 #endif	/* DDB */
    764 #endif /* MULTIPROCESSOR */
    765 
    766 int
    767 emulate_mxmsr(struct lwp *l, struct trapframe *tf, uint32_t opcode)
    768 {
    769 
    770 #define	OPC_MFMSR_CODE		0x7c0000a6
    771 #define	OPC_MFMSR_MASK		0xfc1fffff
    772 #define	OPC_MFMSR_P(o)		(((o) & OPC_MFMSR_MASK) == OPC_MFMSR_CODE)
    773 
    774 #define	OPC_MTMSR_CODE		0x7c000124
    775 #define	OPC_MTMSR_MASK		0xfc1fffff
    776 #define	OPC_MTMSR_P(o)		(((o) & OPC_MTMSR_MASK) == OPC_MTMSR_CODE)
    777 
    778 #define	OPC_MXMSR_REG(o)	(((o) >> 21) & 0x1f)
    779 
    780 	if (OPC_MFMSR_P(opcode)) {
    781 		struct pcb * const pcb = lwp_getpcb(l);
    782 		register_t msr = tf->tf_srr1 & PSL_USERSRR1;
    783 
    784 		if (fpu_used_p(l))
    785 			msr |= PSL_FP;
    786 #ifdef ALTIVEC
    787 		if (vec_used_p(l))
    788 			msr |= PSL_VEC;
    789 #endif
    790 
    791 		msr |= (pcb->pcb_flags & PSL_FE_PREC);
    792 		tf->tf_fixreg[OPC_MXMSR_REG(opcode)] = msr;
    793 		return 1;
    794 	}
    795 
    796 	if (OPC_MTMSR_P(opcode)) {
    797 		struct pcb * const pcb = lwp_getpcb(l);
    798 		register_t msr = tf->tf_fixreg[OPC_MXMSR_REG(opcode)];
    799 
    800 		/*
    801 		 * Ignore the FP enable bit in the requested MSR.
    802 		 * It might be set in the thread's actual MSR but the
    803 		 * user code isn't allowed to change it.
    804 		 */
    805 		msr &= ~PSL_FP;
    806 #ifdef ALTIVEC
    807 		msr &= ~PSL_VEC;
    808 #endif
    809 
    810 		/*
    811 		 * Don't let the user muck with bits he's not allowed to.
    812 		 */
    813 #ifdef PPC_HAVE_FPU
    814 		if (!PSL_USEROK_P(msr))
    815 #else
    816 		if (!PSL_USEROK_P(msr & ~PSL_FE_PREC))
    817 #endif
    818 			return 0;
    819 
    820 		/*
    821 		 * For now, only update the FP exception mode.
    822 		 */
    823 		pcb->pcb_flags &= ~PSL_FE_PREC;
    824 		pcb->pcb_flags |= msr & PSL_FE_PREC;
    825 
    826 #ifdef PPC_HAVE_FPU
    827 		/*
    828 		 * If we think we have the FPU, update SRR1 too.  If we're
    829 		 * wrong userret() will take care of it.
    830 		 */
    831 		if (tf->tf_srr1 & PSL_FP) {
    832 			tf->tf_srr1 &= ~(PSL_FE0|PSL_FE1);
    833 			tf->tf_srr1 |= msr & (PSL_FE0|PSL_FE1);
    834 		}
    835 #endif
    836 		return 1;
    837 	}
    838 
    839 	return 0;
    840 }
    841 
    842 #if defined(MODULAR) && !defined(__PPC_HAVE_MODULE_INIT_MD)
    843 /*
    844  * Push any modules loaded by the boot loader.
    845  */
    846 void
    847 module_init_md(void)
    848 {
    849 }
    850 #endif
    851 
    852 bool
    853 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
    854 {
    855 	if (atop(pa) < physmem) {
    856 		*vap = pa;
    857 		return true;
    858 	}
    859 
    860 	return false;
    861 }
    862 
    863 int
    864 mm_md_physacc(paddr_t pa, vm_prot_t prot)
    865 {
    866 
    867 	return (atop(pa) < physmem) ? 0 : EFAULT;
    868 }
    869 
    870 int
    871 mm_md_kernacc(void *va, vm_prot_t prot, bool *handled)
    872 {
    873 	if (atop((paddr_t)va) < physmem) {
    874 		*handled = true;
    875 		return 0;
    876 	}
    877 
    878 	if ((vaddr_t)va < VM_MIN_KERNEL_ADDRESS
    879 	    || (vaddr_t)va >= VM_MAX_KERNEL_ADDRESS)
    880 		return EFAULT;
    881 
    882 	*handled = false;
    883 	return 0;
    884 }
    885