1 1.85 jmcneill /* $NetBSD: oea_machdep.c,v 1.85 2024/01/20 00:18:19 jmcneill Exp $ */ 2 1.1 matt 3 1.1 matt /* 4 1.1 matt * Copyright (C) 2002 Matt Thomas 5 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank. 6 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH. 7 1.1 matt * All rights reserved. 8 1.1 matt * 9 1.1 matt * Redistribution and use in source and binary forms, with or without 10 1.1 matt * modification, are permitted provided that the following conditions 11 1.1 matt * are met: 12 1.1 matt * 1. Redistributions of source code must retain the above copyright 13 1.1 matt * notice, this list of conditions and the following disclaimer. 14 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 matt * notice, this list of conditions and the following disclaimer in the 16 1.1 matt * documentation and/or other materials provided with the distribution. 17 1.1 matt * 3. All advertising materials mentioning features or use of this software 18 1.1 matt * must display the following acknowledgement: 19 1.1 matt * This product includes software developed by TooLs GmbH. 20 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products 21 1.1 matt * derived from this software without specific prior written permission. 22 1.1 matt * 23 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 24 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 1.1 matt */ 34 1.9 lukem 35 1.9 lukem #include <sys/cdefs.h> 36 1.85 jmcneill __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.85 2024/01/20 00:18:19 jmcneill Exp $"); 37 1.1 matt 38 1.80 rin #ifdef _KERNEL_OPT 39 1.80 rin #include "opt_altivec.h" 40 1.1 matt #include "opt_ddb.h" 41 1.1 matt #include "opt_kgdb.h" 42 1.1 matt #include "opt_multiprocessor.h" 43 1.80 rin #include "opt_ppcarch.h" 44 1.80 rin #endif 45 1.1 matt 46 1.1 matt #include <sys/param.h> 47 1.1 matt #include <sys/buf.h> 48 1.58 matt #include <sys/boot_flag.h> 49 1.1 matt #include <sys/exec.h> 50 1.58 matt #include <sys/kernel.h> 51 1.1 matt #include <sys/mbuf.h> 52 1.1 matt #include <sys/mount.h> 53 1.1 matt #include <sys/msgbuf.h> 54 1.1 matt #include <sys/proc.h> 55 1.1 matt #include <sys/reboot.h> 56 1.1 matt #include <sys/syscallargs.h> 57 1.1 matt #include <sys/syslog.h> 58 1.1 matt #include <sys/systm.h> 59 1.71 christos #include <sys/cpu.h> 60 1.1 matt 61 1.1 matt #include <uvm/uvm_extern.h> 62 1.1 matt 63 1.1 matt #ifdef DDB 64 1.58 matt #include <powerpc/db_machdep.h> 65 1.1 matt #include <ddb/db_extern.h> 66 1.1 matt #endif 67 1.1 matt 68 1.1 matt #ifdef KGDB 69 1.1 matt #include <sys/kgdb.h> 70 1.1 matt #endif 71 1.1 matt 72 1.58 matt #include <machine/powerpc.h> 73 1.58 matt 74 1.1 matt #include <powerpc/trap.h> 75 1.1 matt #include <powerpc/spr.h> 76 1.1 matt #include <powerpc/pte.h> 77 1.1 matt #include <powerpc/altivec.h> 78 1.54 rmind #include <powerpc/pcb.h> 79 1.1 matt 80 1.61 matt #include <powerpc/oea/bat.h> 81 1.61 matt #include <powerpc/oea/cpufeat.h> 82 1.53 matt #include <powerpc/oea/spr.h> 83 1.53 matt #include <powerpc/oea/sr_601.h> 84 1.53 matt 85 1.1 matt char machine[] = MACHINE; /* from <machine/param.h> */ 86 1.1 matt char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 87 1.1 matt 88 1.1 matt struct vm_map *phys_map = NULL; 89 1.1 matt 90 1.1 matt /* 91 1.1 matt * Global variables used here and there 92 1.1 matt */ 93 1.34 yamt static void trap0(void *); 94 1.26 sanjayl 95 1.26 sanjayl /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */ 96 1.61 matt struct bat battable[BAT_VA2IDX(0xffffffff)+1]; 97 1.26 sanjayl 98 1.2 matt register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */ 99 1.47 phx #ifndef MSGBUFADDR 100 1.1 matt paddr_t msgbuf_paddr; 101 1.47 phx #endif 102 1.1 matt 103 1.61 matt extern int dsitrap_fix_dbat4[]; 104 1.61 matt extern int dsitrap_fix_dbat5[]; 105 1.61 matt extern int dsitrap_fix_dbat6[]; 106 1.61 matt extern int dsitrap_fix_dbat7[]; 107 1.61 matt 108 1.74 mrg /* 109 1.74 mrg * Load pointer with 0 behind GCC's back, otherwise it will 110 1.74 mrg * emit a "trap" instead. 111 1.74 mrg */ 112 1.74 mrg static __inline__ uintptr_t 113 1.74 mrg zero_value(void) 114 1.74 mrg { 115 1.74 mrg uintptr_t dont_tell_gcc; 116 1.74 mrg 117 1.74 mrg __asm volatile ("li %0, 0" : "=r"(dont_tell_gcc) :); 118 1.74 mrg return dont_tell_gcc; 119 1.74 mrg } 120 1.74 mrg 121 1.1 matt void 122 1.1 matt oea_init(void (*handler)(void)) 123 1.1 matt { 124 1.6 matt extern int trapcode[], trapsize[]; 125 1.6 matt extern int sctrap[], scsize[]; 126 1.6 matt extern int alitrap[], alisize[]; 127 1.6 matt extern int dsitrap[], dsisize[]; 128 1.41 garbled extern int trapstart[], trapend[]; 129 1.40 garbled #ifdef PPC_OEA601 130 1.6 matt extern int dsi601trap[], dsi601size[]; 131 1.40 garbled #endif 132 1.6 matt extern int decrint[], decrsize[]; 133 1.6 matt extern int tlbimiss[], tlbimsize[]; 134 1.6 matt extern int tlbdlmiss[], tlbdlmsize[]; 135 1.6 matt extern int tlbdsmiss[], tlbdsmsize[]; 136 1.1 matt #if defined(DDB) || defined(KGDB) 137 1.6 matt extern int ddblow[], ddbsize[]; 138 1.1 matt #endif 139 1.1 matt #ifdef ALTIVEC 140 1.1 matt register_t msr; 141 1.1 matt #endif 142 1.45 phx uintptr_t exc, exc_base; 143 1.38 garbled #if defined(ALTIVEC) || defined(PPC_OEA) 144 1.1 matt register_t scratch; 145 1.38 garbled #endif 146 1.1 matt unsigned int cpuvers; 147 1.1 matt size_t size; 148 1.1 matt struct cpu_info * const ci = &cpu_info[0]; 149 1.1 matt 150 1.45 phx #ifdef PPC_HIGH_VEC 151 1.45 phx exc_base = EXC_HIGHVEC; 152 1.45 phx #else 153 1.74 mrg exc_base = zero_value(); 154 1.45 phx #endif 155 1.55 matt KASSERT(mfspr(SPR_SPRG0) == (uintptr_t)ci); 156 1.55 matt 157 1.66 matt #if defined (PPC_OEA64_BRIDGE) && defined (PPC_OEA) 158 1.66 matt if (oeacpufeat & OEACPU_64_BRIDGE) 159 1.66 matt pmap_setup64bridge(); 160 1.66 matt else 161 1.66 matt pmap_setup32(); 162 1.66 matt #endif 163 1.66 matt 164 1.66 matt 165 1.1 matt cpuvers = mfpvr() >> 16; 166 1.1 matt 167 1.1 matt /* 168 1.1 matt * Initialize proc0 and current pcb and pmap pointers. 169 1.1 matt */ 170 1.56 matt (void) ci; 171 1.1 matt KASSERT(ci != NULL); 172 1.1 matt KASSERT(curcpu() == ci); 173 1.55 matt KASSERT(lwp0.l_cpu == ci); 174 1.51 rmind 175 1.50 matt curpcb = lwp_getpcb(&lwp0); 176 1.51 rmind memset(curpcb, 0, sizeof(struct pcb)); 177 1.1 matt 178 1.5 matt #ifdef ALTIVEC 179 1.5 matt /* 180 1.5 matt * Initialize the vectors with NaNs 181 1.5 matt */ 182 1.5 matt for (scratch = 0; scratch < 32; scratch++) { 183 1.5 matt curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD; 184 1.5 matt curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD; 185 1.5 matt curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD; 186 1.5 matt curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD; 187 1.5 matt } 188 1.5 matt #endif 189 1.12 matt curpm = curpcb->pcb_pm = pmap_kernel(); 190 1.1 matt 191 1.1 matt /* 192 1.1 matt * Cause a PGM trap if we branch to 0. 193 1.25 mrg * 194 1.25 mrg * XXX GCC4.1 complains about memset on address zero, so 195 1.25 mrg * don't use the builtin. 196 1.1 matt */ 197 1.25 mrg #undef memset 198 1.1 matt memset(0, 0, 0x100); 199 1.1 matt 200 1.1 matt /* 201 1.1 matt * Set up trap vectors. Don't assume vectors are on 0x100. 202 1.1 matt */ 203 1.45 phx for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) { 204 1.45 phx switch (exc - exc_base) { 205 1.1 matt default: 206 1.6 matt size = (size_t)trapsize; 207 1.6 matt memcpy((void *)exc, trapcode, size); 208 1.1 matt break; 209 1.1 matt #if 0 210 1.1 matt case EXC_EXI: 211 1.1 matt /* 212 1.1 matt * This one is (potentially) installed during autoconf 213 1.1 matt */ 214 1.1 matt break; 215 1.1 matt #endif 216 1.1 matt case EXC_SC: 217 1.6 matt size = (size_t)scsize; 218 1.45 phx memcpy((void *)exc, sctrap, size); 219 1.1 matt break; 220 1.1 matt case EXC_ALI: 221 1.6 matt size = (size_t)alisize; 222 1.45 phx memcpy((void *)exc, alitrap, size); 223 1.1 matt break; 224 1.1 matt case EXC_DSI: 225 1.40 garbled #ifdef PPC_OEA601 226 1.1 matt if (cpuvers == MPC601) { 227 1.6 matt size = (size_t)dsi601size; 228 1.45 phx memcpy((void *)exc, dsi601trap, size); 229 1.42 matt break; 230 1.43 garbled } else 231 1.43 garbled #endif /* PPC_OEA601 */ 232 1.43 garbled if (oeacpufeat & OEACPU_NOBAT) { 233 1.43 garbled size = (size_t)alisize; 234 1.45 phx memcpy((void *)exc, alitrap, size); 235 1.43 garbled } else { 236 1.43 garbled size = (size_t)dsisize; 237 1.45 phx memcpy((void *)exc, dsitrap, size); 238 1.1 matt } 239 1.1 matt break; 240 1.1 matt case EXC_DECR: 241 1.6 matt size = (size_t)decrsize; 242 1.45 phx memcpy((void *)exc, decrint, size); 243 1.1 matt break; 244 1.1 matt case EXC_IMISS: 245 1.6 matt size = (size_t)tlbimsize; 246 1.45 phx memcpy((void *)exc, tlbimiss, size); 247 1.1 matt break; 248 1.1 matt case EXC_DLMISS: 249 1.6 matt size = (size_t)tlbdlmsize; 250 1.45 phx memcpy((void *)exc, tlbdlmiss, size); 251 1.1 matt break; 252 1.1 matt case EXC_DSMISS: 253 1.6 matt size = (size_t)tlbdsmsize; 254 1.45 phx memcpy((void *)exc, tlbdsmiss, size); 255 1.1 matt break; 256 1.1 matt case EXC_PERF: 257 1.6 matt size = (size_t)trapsize; 258 1.45 phx memcpy((void *)exc, trapcode, size); 259 1.45 phx memcpy((void *)(exc_base + EXC_VEC), trapcode, size); 260 1.1 matt break; 261 1.75 maxv #if defined(DDB) || defined(KGDB) 262 1.1 matt case EXC_RUNMODETRC: 263 1.42 matt #ifdef PPC_OEA601 264 1.76 mrg if (cpuvers != MPC601) 265 1.42 matt #endif 266 1.76 mrg { 267 1.6 matt size = (size_t)trapsize; 268 1.45 phx memcpy((void *)exc, trapcode, size); 269 1.1 matt break; 270 1.1 matt } 271 1.1 matt /* FALLTHROUGH */ 272 1.1 matt case EXC_PGM: 273 1.1 matt case EXC_TRC: 274 1.1 matt case EXC_BPT: 275 1.6 matt size = (size_t)ddbsize; 276 1.6 matt memcpy((void *)exc, ddblow, size); 277 1.1 matt break; 278 1.75 maxv #endif /* DDB || KGDB */ 279 1.1 matt } 280 1.1 matt #if 0 281 1.1 matt exc += roundup(size, 32); 282 1.1 matt #endif 283 1.1 matt } 284 1.1 matt 285 1.1 matt /* 286 1.34 yamt * Install a branch absolute to trap0 to force a panic. 287 1.34 yamt */ 288 1.45 phx if ((uintptr_t)trap0 < 0x2000000) { 289 1.74 mrg uint32_t *p = (uint32_t *)zero_value(); 290 1.74 mrg 291 1.74 mrg p[0] = 0x7c6802a6; 292 1.74 mrg p[1] = 0x48000002 | (uintptr_t) trap0; 293 1.45 phx } 294 1.34 yamt 295 1.34 yamt /* 296 1.1 matt * Get the cache sizes because install_extint calls __syncicache. 297 1.1 matt */ 298 1.1 matt cpu_probe_cache(); 299 1.1 matt 300 1.1 matt #define MxSPR_MASK 0x7c1fffff 301 1.1 matt #define MFSPR_MQ 0x7c0002a6 302 1.1 matt #define MTSPR_MQ 0x7c0003a6 303 1.17 kleink #define MTSPR_IBAT0L 0x7c1183a6 304 1.17 kleink #define MTSPR_IBAT1L 0x7c1383a6 305 1.1 matt #define NOP 0x60000000 306 1.17 kleink #define B 0x48000000 307 1.18 kleink #define TLBSYNC 0x7c00046c 308 1.18 kleink #define SYNC 0x7c0004ac 309 1.66 matt #ifdef PPC_OEA64_BRIDGE 310 1.66 matt #define MFMSR_MASK 0xfc1fffff 311 1.66 matt #define MFMSR 0x7c0000a6 312 1.66 matt #define MTMSRD_MASK 0xfc1effff 313 1.66 matt #define MTMSRD 0x7c000164 314 1.66 matt #define RLDICL_MASK 0xfc00001c 315 1.66 matt #define RLDICL 0x78000000 316 1.66 matt #define RFID 0x4c000024 317 1.66 matt #define RFI 0x4c000064 318 1.66 matt #endif 319 1.1 matt 320 1.1 matt #ifdef ALTIVEC 321 1.1 matt #define MFSPR_VRSAVE 0x7c0042a6 322 1.1 matt #define MTSPR_VRSAVE 0x7c0043a6 323 1.1 matt 324 1.1 matt /* 325 1.1 matt * Try to set the VEC bit in the MSR. If it doesn't get set, we are 326 1.1 matt * not on a AltiVec capable processor. 327 1.1 matt */ 328 1.24 perry __asm volatile ( 329 1.1 matt "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; " 330 1.1 matt "mfmsr %1; mtmsr %0; isync" 331 1.1 matt : "=r"(msr), "=r"(scratch) 332 1.1 matt : "J"(PSL_VEC)); 333 1.1 matt 334 1.1 matt /* 335 1.17 kleink * If we aren't on an AltiVec capable processor, we need to zap any of 336 1.17 kleink * the sequences we save/restore the VRSAVE SPR into NOPs. 337 1.1 matt */ 338 1.1 matt if (scratch & PSL_VEC) { 339 1.1 matt cpu_altivec = 1; 340 1.1 matt } else { 341 1.66 matt for (int *ip = trapstart; ip < trapend; ip++) { 342 1.1 matt if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) { 343 1.1 matt ip[0] = NOP; /* mfspr */ 344 1.1 matt ip[1] = NOP; /* stw */ 345 1.1 matt } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) { 346 1.1 matt ip[-1] = NOP; /* lwz */ 347 1.1 matt ip[0] = NOP; /* mtspr */ 348 1.1 matt } 349 1.1 matt } 350 1.1 matt } 351 1.1 matt #endif 352 1.1 matt 353 1.41 garbled /* XXX It would seem like this code could be elided ifndef 601, but 354 1.41 garbled * doing so breaks my power3 machine. 355 1.41 garbled */ 356 1.1 matt /* 357 1.17 kleink * If we aren't on a MPC601 processor, we need to zap any of the 358 1.17 kleink * sequences we save/restore the MQ SPR into NOPs, and skip over the 359 1.17 kleink * sequences where we zap/restore BAT registers on kernel exit/entry. 360 1.1 matt */ 361 1.1 matt if (cpuvers != MPC601) { 362 1.66 matt for (int *ip = trapstart; ip < trapend; ip++) { 363 1.1 matt if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) { 364 1.1 matt ip[0] = NOP; /* mfspr */ 365 1.1 matt ip[1] = NOP; /* stw */ 366 1.1 matt } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) { 367 1.1 matt ip[-1] = NOP; /* lwz */ 368 1.1 matt ip[0] = NOP; /* mtspr */ 369 1.17 kleink } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) { 370 1.17 kleink if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L) 371 1.17 kleink ip[-1] = B | 0x14; /* li */ 372 1.17 kleink else 373 1.17 kleink ip[-4] = B | 0x24; /* lis */ 374 1.1 matt } 375 1.1 matt } 376 1.1 matt } 377 1.1 matt 378 1.66 matt #ifdef PPC_OEA64_BRIDGE 379 1.66 matt if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 380 1.66 matt for (int *ip = (int *)exc_base; 381 1.66 matt (uintptr_t)ip <= exc_base + EXC_LAST; 382 1.66 matt ip++) { 383 1.66 matt if ((ip[0] & MFMSR_MASK) == MFMSR 384 1.66 matt && (ip[1] & RLDICL_MASK) == RLDICL 385 1.66 matt && (ip[2] & MTMSRD_MASK) == MTMSRD) { 386 1.66 matt *ip++ = NOP; 387 1.66 matt *ip++ = NOP; 388 1.66 matt ip[0] = NOP; 389 1.67 matt } else if (*ip == RFID) { 390 1.67 matt *ip = RFI; 391 1.66 matt } 392 1.66 matt } 393 1.66 matt 394 1.66 matt /* 395 1.66 matt * Now replace each rfid instruction with a rfi instruction. 396 1.66 matt */ 397 1.66 matt for (int *ip = trapstart; ip < trapend; ip++) { 398 1.66 matt if ((ip[0] & MFMSR_MASK) == MFMSR 399 1.66 matt && (ip[1] & RLDICL_MASK) == RLDICL 400 1.66 matt && (ip[2] & MTMSRD_MASK) == MTMSRD) { 401 1.66 matt *ip++ = NOP; 402 1.66 matt *ip++ = NOP; 403 1.66 matt ip[0] = NOP; 404 1.66 matt } else if (*ip == RFID) { 405 1.66 matt *ip = RFI; 406 1.66 matt } 407 1.66 matt } 408 1.66 matt } 409 1.66 matt #endif /* PPC_OEA64_BRIDGE */ 410 1.66 matt 411 1.17 kleink /* 412 1.17 kleink * Sync the changed instructions. 413 1.17 kleink */ 414 1.17 kleink __syncicache((void *) trapstart, 415 1.17 kleink (uintptr_t) trapend - (uintptr_t) trapstart); 416 1.61 matt __syncicache(dsitrap_fix_dbat4, 16); 417 1.61 matt __syncicache(dsitrap_fix_dbat7, 8); 418 1.41 garbled #ifdef PPC_OEA601 419 1.1 matt 420 1.1 matt /* 421 1.18 kleink * If we are on a MPC601 processor, we need to zap any tlbsync 422 1.18 kleink * instructions into sync. This differs from the above in 423 1.84 andvar * examining all kernel text, as opposed to just the exception handling. 424 1.18 kleink * We sync the icache on every instruction found since there are 425 1.18 kleink * only very few of them. 426 1.18 kleink */ 427 1.18 kleink if (cpuvers == MPC601) { 428 1.18 kleink extern int kernel_text[], etext[]; 429 1.18 kleink int *ip; 430 1.18 kleink 431 1.66 matt for (ip = kernel_text; ip < etext; ip++) { 432 1.18 kleink if (*ip == TLBSYNC) { 433 1.18 kleink *ip = SYNC; 434 1.18 kleink __syncicache(ip, sizeof(*ip)); 435 1.66 matt } 436 1.18 kleink } 437 1.18 kleink } 438 1.40 garbled #endif /* PPC_OEA601 */ 439 1.18 kleink 440 1.19 kleink /* 441 1.19 kleink * Configure a PSL user mask matching this processor. 442 1.72 matt * Don't allow to set PSL_FP/PSL_VEC, since that will affect PCU. 443 1.19 kleink */ 444 1.19 kleink cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI; 445 1.72 matt cpu_pslusermod = PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE; 446 1.40 garbled #ifdef PPC_OEA601 447 1.19 kleink if (cpuvers == MPC601) { 448 1.19 kleink cpu_psluserset &= PSL_601_MASK; 449 1.19 kleink cpu_pslusermod &= PSL_601_MASK; 450 1.19 kleink } 451 1.40 garbled #endif 452 1.45 phx #ifdef PPC_HIGH_VEC 453 1.45 phx cpu_psluserset |= PSL_IP; /* XXX ok? */ 454 1.45 phx #endif 455 1.19 kleink 456 1.18 kleink /* 457 1.1 matt * external interrupt handler install 458 1.1 matt */ 459 1.1 matt if (handler) 460 1.1 matt oea_install_extint(handler); 461 1.1 matt 462 1.45 phx __syncicache((void *)exc_base, EXC_LAST + 0x100); 463 1.1 matt 464 1.1 matt /* 465 1.1 matt * Now enable translation (and machine checks/recoverable interrupts). 466 1.1 matt */ 467 1.26 sanjayl #ifdef PPC_OEA 468 1.24 perry __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" 469 1.1 matt : "=r"(scratch) 470 1.1 matt : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI)); 471 1.26 sanjayl #endif 472 1.1 matt 473 1.57 matt /* 474 1.57 matt * Let's take all the indirect calls via our stubs and patch 475 1.57 matt * them to be direct calls. 476 1.57 matt */ 477 1.57 matt cpu_fixup_stubs(); 478 1.57 matt 479 1.1 matt KASSERT(curcpu() == ci); 480 1.1 matt } 481 1.1 matt 482 1.40 garbled #ifdef PPC_OEA601 483 1.82 thorpej static void 484 1.1 matt mpc601_ioseg_add(paddr_t pa, register_t len) 485 1.1 matt { 486 1.1 matt const u_int i = pa >> ADDR_SR_SHFT; 487 1.1 matt 488 1.1 matt if (len != BAT_BL_256M) 489 1.1 matt panic("mpc601_ioseg_add: len != 256M"); 490 1.1 matt 491 1.1 matt /* 492 1.1 matt * Translate into an I/O segment, load it, and stash away for use 493 1.1 matt * in pmap_bootstrap(). 494 1.1 matt */ 495 1.1 matt iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i); 496 1.70 macallan 497 1.70 macallan /* 498 1.70 macallan * XXX Setting segment register 0xf on my powermac 7200 499 1.70 macallan * wedges machine so set later in pmap.c 500 1.70 macallan */ 501 1.70 macallan /* 502 1.24 perry __asm volatile ("mtsrin %0,%1" 503 1.1 matt :: "r"(iosrtable[i]), 504 1.1 matt "r"(pa)); 505 1.70 macallan */ 506 1.1 matt } 507 1.40 garbled #endif /* PPC_OEA601 */ 508 1.26 sanjayl 509 1.39 garbled #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE) 510 1.61 matt #define DBAT_SET(n, batl, batu) \ 511 1.61 matt do { \ 512 1.61 matt mtspr(SPR_DBAT##n##L, (batl)); \ 513 1.61 matt mtspr(SPR_DBAT##n##U, (batu)); \ 514 1.61 matt } while (/*CONSTCOND*/ 0) 515 1.61 matt #define DBAT_RESET(n) DBAT_SET(n, 0, 0) 516 1.61 matt #define DBATU_GET(n) mfspr(SPR_DBAT##n##U) 517 1.61 matt #define IBAT_SET(n, batl, batu) \ 518 1.61 matt do { \ 519 1.61 matt mtspr(SPR_IBAT##n##L, (batl)); \ 520 1.61 matt mtspr(SPR_IBAT##n##U, (batu)); \ 521 1.61 matt } while (/*CONSTCOND*/ 0) 522 1.61 matt #define IBAT_RESET(n) IBAT_SET(n, 0, 0) 523 1.61 matt 524 1.1 matt void 525 1.1 matt oea_iobat_add(paddr_t pa, register_t len) 526 1.1 matt { 527 1.61 matt static int z = 1; 528 1.64 matt const u_int n = BAT_BL_TO_SIZE(len) / BAT_BL_TO_SIZE(BAT_BL_8M); 529 1.61 matt const u_int i = BAT_VA2IDX(pa) & -n; /* in case pa was in the middle */ 530 1.61 matt const int after_bat3 = (oeacpufeat & OEACPU_HIGHBAT) ? 4 : 8; 531 1.61 matt 532 1.61 matt KASSERT(len >= BAT_BL_8M); 533 1.61 matt 534 1.82 thorpej #ifdef PPC_OEA601 535 1.82 thorpej if (mfpvr() >> 16 == MPC601) { 536 1.82 thorpej /* Use I/O segments on the BAT-starved 601. */ 537 1.82 thorpej mpc601_ioseg_add(pa, len); 538 1.82 thorpej return; 539 1.82 thorpej } 540 1.82 thorpej #endif /* PPC_OEA601 */ 541 1.82 thorpej 542 1.64 matt /* 543 1.64 matt * If the caller wanted a bigger BAT than the hardware supports, 544 1.64 matt * split it into smaller BATs. 545 1.64 matt */ 546 1.64 matt if (len > BAT_BL_256M && (oeacpufeat & OEACPU_XBSEN) == 0) { 547 1.64 matt u_int xn = BAT_BL_TO_SIZE(len) >> 28; 548 1.64 matt while (xn-- > 0) { 549 1.64 matt oea_iobat_add(pa, BAT_BL_256M); 550 1.64 matt pa += 0x10000000; 551 1.64 matt } 552 1.64 matt return; 553 1.64 matt } 554 1.64 matt 555 1.61 matt const register_t batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW); 556 1.61 matt const register_t batu = BATU(pa, len, BAT_Vs); 557 1.61 matt 558 1.61 matt for (u_int j = 0; j < n; j++) { 559 1.61 matt battable[i + j].batl = batl; 560 1.61 matt battable[i + j].batu = batu; 561 1.61 matt } 562 1.1 matt 563 1.1 matt /* 564 1.1 matt * Let's start loading the BAT registers. 565 1.1 matt */ 566 1.61 matt switch (z) { 567 1.1 matt case 1: 568 1.61 matt DBAT_SET(1, batl, batu); 569 1.61 matt z = 2; 570 1.1 matt break; 571 1.1 matt case 2: 572 1.61 matt DBAT_SET(2, batl, batu); 573 1.61 matt z = 3; 574 1.1 matt break; 575 1.1 matt case 3: 576 1.61 matt DBAT_SET(3, batl, batu); 577 1.61 matt z = after_bat3; /* no highbat, skip to end */ 578 1.61 matt break; 579 1.61 matt case 4: 580 1.61 matt DBAT_SET(4, batl, batu); 581 1.61 matt z = 5; 582 1.61 matt break; 583 1.61 matt case 5: 584 1.61 matt DBAT_SET(5, batl, batu); 585 1.61 matt z = 6; 586 1.61 matt break; 587 1.61 matt case 6: 588 1.61 matt DBAT_SET(6, batl, batu); 589 1.61 matt z = 7; 590 1.61 matt break; 591 1.61 matt case 7: 592 1.61 matt DBAT_SET(7, batl, batu); 593 1.61 matt z = 8; 594 1.1 matt break; 595 1.1 matt default: 596 1.1 matt break; 597 1.3 matt } 598 1.3 matt } 599 1.3 matt 600 1.3 matt void 601 1.3 matt oea_iobat_remove(paddr_t pa) 602 1.3 matt { 603 1.61 matt const u_int i = BAT_VA2IDX(pa); 604 1.3 matt 605 1.61 matt if (!BAT_VA_MATCH_P(battable[i].batu, pa) || 606 1.61 matt !BAT_VALID_P(battable[i].batu, PSL_PR)) 607 1.3 matt return; 608 1.61 matt const int n = 609 1.61 matt __SHIFTOUT(battable[i].batu, (BAT_XBL|BAT_BL) & ~BAT_BL_8M) + 1; 610 1.61 matt KASSERT((n & (n-1)) == 0); /* power of 2 */ 611 1.61 matt KASSERT((i & (n-1)) == 0); /* multiple of n */ 612 1.61 matt 613 1.61 matt memset(&battable[i], 0, n*sizeof(battable[0])); 614 1.61 matt 615 1.61 matt const int maxbat = oeacpufeat & OEACPU_HIGHBAT ? 8 : 4; 616 1.61 matt for (u_int k = 1 ; k < maxbat; k++) { 617 1.61 matt register_t batu; 618 1.61 matt switch (k) { 619 1.3 matt case 1: 620 1.61 matt batu = DBATU_GET(1); 621 1.3 matt if (BAT_VA_MATCH_P(batu, pa) && 622 1.3 matt BAT_VALID_P(batu, PSL_PR)) 623 1.61 matt DBAT_RESET(1); 624 1.3 matt break; 625 1.3 matt case 2: 626 1.61 matt batu = DBATU_GET(2); 627 1.3 matt if (BAT_VA_MATCH_P(batu, pa) && 628 1.3 matt BAT_VALID_P(batu, PSL_PR)) 629 1.61 matt DBAT_RESET(2); 630 1.3 matt break; 631 1.3 matt case 3: 632 1.61 matt batu = DBATU_GET(3); 633 1.3 matt if (BAT_VA_MATCH_P(batu, pa) && 634 1.3 matt BAT_VALID_P(batu, PSL_PR)) 635 1.61 matt DBAT_RESET(3); 636 1.61 matt break; 637 1.61 matt case 4: 638 1.61 matt batu = DBATU_GET(4); 639 1.61 matt if (BAT_VA_MATCH_P(batu, pa) && 640 1.61 matt BAT_VALID_P(batu, PSL_PR)) 641 1.61 matt DBAT_RESET(4); 642 1.61 matt break; 643 1.61 matt case 5: 644 1.61 matt batu = DBATU_GET(5); 645 1.61 matt if (BAT_VA_MATCH_P(batu, pa) && 646 1.61 matt BAT_VALID_P(batu, PSL_PR)) 647 1.61 matt DBAT_RESET(5); 648 1.61 matt break; 649 1.61 matt case 6: 650 1.61 matt batu = DBATU_GET(6); 651 1.61 matt if (BAT_VA_MATCH_P(batu, pa) && 652 1.61 matt BAT_VALID_P(batu, PSL_PR)) 653 1.61 matt DBAT_RESET(6); 654 1.61 matt break; 655 1.61 matt case 7: 656 1.61 matt batu = DBATU_GET(7); 657 1.61 matt if (BAT_VA_MATCH_P(batu, pa) && 658 1.61 matt BAT_VALID_P(batu, PSL_PR)) 659 1.61 matt DBAT_RESET(7); 660 1.3 matt break; 661 1.3 matt default: 662 1.3 matt break; 663 1.3 matt } 664 1.1 matt } 665 1.1 matt } 666 1.1 matt 667 1.1 matt void 668 1.1 matt oea_batinit(paddr_t pa, ...) 669 1.1 matt { 670 1.1 matt struct mem_region *allmem, *availmem, *mp; 671 1.7 matt register_t msr = mfmsr(); 672 1.1 matt va_list ap; 673 1.68 mrg #ifdef PPC_OEA601 674 1.68 mrg unsigned int cpuvers; 675 1.1 matt 676 1.1 matt cpuvers = mfpvr() >> 16; 677 1.68 mrg #endif /* PPC_OEA601 */ 678 1.64 matt 679 1.63 macallan /* 680 1.63 macallan * we need to call this before zapping BATs so OF calls work 681 1.63 macallan */ 682 1.63 macallan mem_regions(&allmem, &availmem); 683 1.1 matt 684 1.1 matt /* 685 1.1 matt * Initialize BAT registers to unmapped to not generate 686 1.1 matt * overlapping mappings below. 687 1.1 matt * 688 1.1 matt * The 601's implementation differs in the Valid bit being situated 689 1.1 matt * in the lower BAT register, and in being a unified BAT only whose 690 1.1 matt * four entries are accessed through the IBAT[0-3] SPRs. 691 1.1 matt * 692 1.1 matt * Also, while the 601 does distinguish between supervisor/user 693 1.14 uebayasi * protection keys, it does _not_ distinguish between validity in 694 1.14 uebayasi * supervisor/user mode. 695 1.1 matt */ 696 1.7 matt if ((msr & (PSL_IR|PSL_DR)) == 0) { 697 1.40 garbled #ifdef PPC_OEA601 698 1.7 matt if (cpuvers == MPC601) { 699 1.24 perry __asm volatile ("mtibatl 0,%0" :: "r"(0)); 700 1.24 perry __asm volatile ("mtibatl 1,%0" :: "r"(0)); 701 1.24 perry __asm volatile ("mtibatl 2,%0" :: "r"(0)); 702 1.24 perry __asm volatile ("mtibatl 3,%0" :: "r"(0)); 703 1.40 garbled } else 704 1.40 garbled #endif /* PPC_OEA601 */ 705 1.40 garbled { 706 1.61 matt DBAT_RESET(0); IBAT_RESET(0); 707 1.61 matt DBAT_RESET(1); IBAT_RESET(1); 708 1.61 matt DBAT_RESET(2); IBAT_RESET(2); 709 1.61 matt DBAT_RESET(3); IBAT_RESET(3); 710 1.61 matt if (oeacpufeat & OEACPU_HIGHBAT) { 711 1.61 matt DBAT_RESET(4); IBAT_RESET(4); 712 1.61 matt DBAT_RESET(5); IBAT_RESET(5); 713 1.61 matt DBAT_RESET(6); IBAT_RESET(6); 714 1.61 matt DBAT_RESET(7); IBAT_RESET(7); 715 1.61 matt 716 1.61 matt /* 717 1.61 matt * Change the first instruction to branch to 718 1.61 matt * dsitrap_fix_dbat6 719 1.61 matt */ 720 1.61 matt dsitrap_fix_dbat4[0] &= ~0xfffc; 721 1.61 matt dsitrap_fix_dbat4[0] 722 1.61 matt += (uintptr_t)dsitrap_fix_dbat6 723 1.61 matt - (uintptr_t)&dsitrap_fix_dbat4[0]; 724 1.61 matt 725 1.61 matt /* 726 1.61 matt * Change the second instruction to branch to 727 1.61 matt * dsitrap_fix_dbat5 if bit 30 (aka bit 1) is 728 1.61 matt * true. 729 1.61 matt */ 730 1.61 matt dsitrap_fix_dbat4[1] = 0x419e0000 731 1.61 matt + (uintptr_t)dsitrap_fix_dbat5 732 1.61 matt - (uintptr_t)&dsitrap_fix_dbat4[1]; 733 1.61 matt 734 1.61 matt /* 735 1.61 matt * Change it to load dbat4 instead of dbat2 736 1.61 matt */ 737 1.61 matt dsitrap_fix_dbat4[2] = 0x7fd88ba6; 738 1.61 matt dsitrap_fix_dbat4[3] = 0x7ff98ba6; 739 1.61 matt 740 1.61 matt /* 741 1.61 matt * Change it to load dbat7 instead of dbat3 742 1.61 matt */ 743 1.61 matt dsitrap_fix_dbat7[0] = 0x7fde8ba6; 744 1.61 matt dsitrap_fix_dbat7[1] = 0x7fff8ba6; 745 1.61 matt } 746 1.7 matt } 747 1.1 matt } 748 1.1 matt 749 1.1 matt /* 750 1.1 matt * Set up BAT to map physical memory 751 1.1 matt */ 752 1.40 garbled #ifdef PPC_OEA601 753 1.1 matt if (cpuvers == MPC601) { 754 1.40 garbled int i; 755 1.40 garbled 756 1.1 matt /* 757 1.1 matt * Set up battable to map the lowest 256 MB area. 758 1.1 matt * Map the lowest 32 MB area via BAT[0-3]; 759 1.1 matt * BAT[01] are fixed, BAT[23] are floating. 760 1.1 matt */ 761 1.1 matt for (i = 0; i < 32; i++) { 762 1.1 matt battable[i].batl = BATL601(i << 23, 763 1.1 matt BAT601_BSM_8M, BAT601_V); 764 1.1 matt battable[i].batu = BATU601(i << 23, 765 1.1 matt BAT601_M, BAT601_Ku, BAT601_PP_NONE); 766 1.1 matt } 767 1.24 perry __asm volatile ("mtibatu 0,%1; mtibatl 0,%0" 768 1.1 matt :: "r"(battable[0x00000000 >> 23].batl), 769 1.1 matt "r"(battable[0x00000000 >> 23].batu)); 770 1.24 perry __asm volatile ("mtibatu 1,%1; mtibatl 1,%0" 771 1.1 matt :: "r"(battable[0x00800000 >> 23].batl), 772 1.1 matt "r"(battable[0x00800000 >> 23].batu)); 773 1.24 perry __asm volatile ("mtibatu 2,%1; mtibatl 2,%0" 774 1.1 matt :: "r"(battable[0x01000000 >> 23].batl), 775 1.1 matt "r"(battable[0x01000000 >> 23].batu)); 776 1.24 perry __asm volatile ("mtibatu 3,%1; mtibatl 3,%0" 777 1.1 matt :: "r"(battable[0x01800000 >> 23].batl), 778 1.1 matt "r"(battable[0x01800000 >> 23].batu)); 779 1.61 matt } 780 1.40 garbled #endif /* PPC_OEA601 */ 781 1.63 macallan 782 1.1 matt /* 783 1.1 matt * Now setup other fixed bat registers 784 1.1 matt * 785 1.1 matt * Note that we still run in real mode, and the BAT 786 1.1 matt * registers were cleared above. 787 1.1 matt */ 788 1.1 matt 789 1.1 matt /* 790 1.83 msaitoh * Add any I/O BATs specified; 791 1.1 matt */ 792 1.82 thorpej va_start(ap, pa); 793 1.82 thorpej while (pa != 0) { 794 1.82 thorpej register_t len = va_arg(ap, register_t); 795 1.82 thorpej oea_iobat_add(pa, len); 796 1.82 thorpej pa = va_arg(ap, paddr_t); 797 1.1 matt } 798 1.1 matt va_end(ap); 799 1.1 matt 800 1.1 matt /* 801 1.1 matt * Set up battable to map all RAM regions. 802 1.1 matt */ 803 1.40 garbled #ifdef PPC_OEA601 804 1.1 matt if (cpuvers == MPC601) { 805 1.1 matt for (mp = allmem; mp->size; mp++) { 806 1.22 he paddr_t paddr = mp->start & 0xff800000; 807 1.1 matt paddr_t end = mp->start + mp->size; 808 1.1 matt 809 1.1 matt do { 810 1.22 he u_int ix = paddr >> 23; 811 1.1 matt 812 1.22 he battable[ix].batl = 813 1.22 he BATL601(paddr, BAT601_BSM_8M, BAT601_V); 814 1.22 he battable[ix].batu = 815 1.22 he BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE); 816 1.22 he paddr += (1 << 23); 817 1.22 he } while (paddr < end); 818 1.1 matt } 819 1.40 garbled } else 820 1.40 garbled #endif 821 1.40 garbled { 822 1.61 matt const register_t bat_inc = BAT_IDX2VA(1); 823 1.1 matt for (mp = allmem; mp->size; mp++) { 824 1.61 matt paddr_t paddr = mp->start & -bat_inc; 825 1.61 matt paddr_t end = roundup2(mp->start + mp->size, bat_inc); 826 1.1 matt 827 1.61 matt /* 828 1.61 matt * If the next entries are adjacent, merge them 829 1.61 matt * into this one 830 1.61 matt */ 831 1.61 matt while (mp[1].size && end == (mp[1].start & -bat_inc)) { 832 1.61 matt mp++; 833 1.61 matt end = roundup2(mp->start + mp->size, bat_inc); 834 1.61 matt } 835 1.1 matt 836 1.61 matt while (paddr < end) { 837 1.61 matt register_t bl = (oeacpufeat & OEACPU_XBSEN 838 1.61 matt ? BAT_BL_2G 839 1.61 matt : BAT_BL_256M); 840 1.61 matt psize_t size = BAT_BL_TO_SIZE(bl); 841 1.61 matt u_int n = BAT_VA2IDX(size); 842 1.61 matt u_int i = BAT_VA2IDX(paddr); 843 1.61 matt 844 1.61 matt while ((paddr & (size - 1)) 845 1.61 matt || paddr + size > end) { 846 1.61 matt size >>= 1; 847 1.61 matt bl = (bl >> 1) & (BAT_XBL|BAT_BL); 848 1.61 matt n >>= 1; 849 1.61 matt } 850 1.61 matt 851 1.61 matt KASSERT(size >= bat_inc); 852 1.61 matt KASSERT(n >= 1); 853 1.61 matt KASSERT(bl >= BAT_BL_8M); 854 1.61 matt 855 1.61 matt register_t batl = BATL(paddr, BAT_M, BAT_PP_RW); 856 1.61 matt register_t batu = BATU(paddr, bl, BAT_Vs); 857 1.61 matt 858 1.61 matt for (; n-- > 0; i++) { 859 1.61 matt battable[i].batl = batl; 860 1.61 matt battable[i].batu = batu; 861 1.61 matt } 862 1.61 matt paddr += size; 863 1.61 matt } 864 1.1 matt } 865 1.61 matt /* 866 1.61 matt * Set up BAT0 to only map the lowest area. 867 1.61 matt */ 868 1.61 matt __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;" 869 1.61 matt "mtdbatl 0,%0; mtdbatu 0,%1;" 870 1.61 matt :: "r"(battable[0].batl), "r"(battable[0].batu)); 871 1.1 matt } 872 1.1 matt } 873 1.39 garbled #endif /* PPC_OEA || PPC_OEA64_BRIDGE */ 874 1.1 matt 875 1.1 matt void 876 1.1 matt oea_install_extint(void (*handler)(void)) 877 1.1 matt { 878 1.6 matt extern int extint[], extsize[]; 879 1.6 matt extern int extint_call[]; 880 1.6 matt uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call; 881 1.66 matt #ifdef PPC_HIGH_VEC 882 1.66 matt const uintptr_t exc_exi_base = EXC_HIGHVEC + EXC_EXI; 883 1.66 matt #else 884 1.66 matt const uintptr_t exc_exi_base = EXC_EXI; 885 1.66 matt #endif 886 1.1 matt int omsr, msr; 887 1.1 matt 888 1.1 matt #ifdef DIAGNOSTIC 889 1.1 matt if (offset > 0x1ffffff) 890 1.1 matt panic("install_extint: %p too far away (%#lx)", handler, 891 1.1 matt (unsigned long) offset); 892 1.1 matt #endif 893 1.24 perry __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1" 894 1.1 matt : "=r" (omsr), "=r" (msr) 895 1.1 matt : "K" ((u_short)~PSL_EE)); 896 1.6 matt extint_call[0] = (extint_call[0] & 0xfc000003) | offset; 897 1.45 phx __syncicache((void *)extint_call, sizeof extint_call[0]); 898 1.66 matt memcpy((void *)exc_exi_base, extint, (size_t)extsize); 899 1.66 matt #ifdef PPC_OEA64_BRIDGE 900 1.66 matt if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) { 901 1.66 matt for (int *ip = (int *)exc_exi_base; 902 1.66 matt (uintptr_t)ip <= exc_exi_base + (size_t)extsize; 903 1.66 matt ip++) { 904 1.66 matt if ((ip[0] & MFMSR_MASK) == MFMSR 905 1.66 matt && (ip[1] & RLDICL_MASK) == RLDICL 906 1.66 matt && (ip[2] & MTMSRD_MASK) == MTMSRD) { 907 1.66 matt *ip++ = NOP; 908 1.66 matt *ip++ = NOP; 909 1.66 matt ip[0] = NOP; 910 1.67 matt } else if (*ip == RFID) { 911 1.67 matt *ip = RFI; 912 1.66 matt } 913 1.66 matt } 914 1.66 matt } 915 1.45 phx #endif 916 1.69 matt __syncicache((void *)exc_exi_base, (size_t)extsize); 917 1.66 matt 918 1.24 perry __asm volatile ("mtmsr %0" :: "r"(omsr)); 919 1.1 matt } 920 1.1 matt 921 1.1 matt /* 922 1.1 matt * Machine dependent startup code. 923 1.1 matt */ 924 1.1 matt void 925 1.1 matt oea_startup(const char *model) 926 1.1 matt { 927 1.1 matt uintptr_t sz; 928 1.32 christos void *v; 929 1.1 matt vaddr_t minaddr, maxaddr; 930 1.71 christos char pbuf[9], mstr[128]; 931 1.1 matt 932 1.1 matt KASSERT(curcpu() != NULL); 933 1.1 matt KASSERT(lwp0.l_cpu != NULL); 934 1.55 matt KASSERT(curcpu()->ci_idepth == -1); 935 1.1 matt 936 1.47 phx sz = round_page(MSGBUFSIZE); 937 1.47 phx #ifdef MSGBUFADDR 938 1.47 phx v = (void *) MSGBUFADDR; 939 1.47 phx #else 940 1.1 matt /* 941 1.1 matt * If the msgbuf is not in segment 0, allocate KVA for it and access 942 1.1 matt * it via mapped pages. [This prevents unneeded BAT switches.] 943 1.1 matt */ 944 1.32 christos v = (void *) msgbuf_paddr; 945 1.1 matt if (msgbuf_paddr + sz > SEGMENT_LENGTH) { 946 1.47 phx u_int i; 947 1.47 phx 948 1.1 matt minaddr = 0; 949 1.1 matt if (uvm_map(kernel_map, &minaddr, sz, 950 1.1 matt NULL, UVM_UNKNOWN_OFFSET, 0, 951 1.1 matt UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 952 1.1 matt UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) 953 1.1 matt panic("startup: cannot allocate VM for msgbuf"); 954 1.32 christos v = (void *)minaddr; 955 1.8 thorpej for (i = 0; i < sz; i += PAGE_SIZE) { 956 1.1 matt pmap_kenter_pa(minaddr + i, msgbuf_paddr + i, 957 1.48 cegger VM_PROT_READ|VM_PROT_WRITE, 0); 958 1.1 matt } 959 1.1 matt pmap_update(pmap_kernel()); 960 1.1 matt } 961 1.47 phx #endif 962 1.1 matt initmsgbuf(v, sz); 963 1.1 matt 964 1.21 lukem printf("%s%s", copyright, version); 965 1.1 matt if (model != NULL) 966 1.1 matt printf("Model: %s\n", model); 967 1.71 christos cpu_identify(mstr, sizeof(mstr)); 968 1.71 christos cpu_setmodel("%s", mstr); 969 1.1 matt 970 1.1 matt format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem)); 971 1.1 matt printf("total memory = %s\n", pbuf); 972 1.1 matt 973 1.1 matt /* 974 1.1 matt * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after 975 1.1 matt * the bufpages are allocated in case they overlap since it's not 976 1.1 matt * fatal if we can't allocate these. 977 1.1 matt */ 978 1.4 matt if (KERNEL_SR == 13 || KERNEL2_SR == 14) { 979 1.4 matt int error; 980 1.4 matt minaddr = 0xDEAC0000; 981 1.4 matt error = uvm_map(kernel_map, &minaddr, 0x30000, 982 1.4 matt NULL, UVM_UNKNOWN_OFFSET, 0, 983 1.4 matt UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, 984 1.4 matt UVM_ADV_NORMAL, UVM_FLAG_FIXED)); 985 1.4 matt if (error != 0 || minaddr != 0xDEAC0000) 986 1.4 matt printf("oea_startup: failed to allocate DEAD " 987 1.4 matt "ZONE: error=%d\n", error); 988 1.1 matt } 989 1.13 pk 990 1.4 matt minaddr = 0; 991 1.1 matt 992 1.1 matt /* 993 1.1 matt * Allocate a submap for physio 994 1.1 matt */ 995 1.1 matt phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 996 1.31 thorpej VM_PHYS_SIZE, 0, false, NULL); 997 1.1 matt 998 1.79 ad format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 999 1.1 matt printf("avail memory = %s\n", pbuf); 1000 1.73 chs 1001 1.73 chs #ifdef MULTIPROCESSOR 1002 1.73 chs kcpuset_create(&cpuset_info.cpus_running, true); 1003 1.73 chs kcpuset_create(&cpuset_info.cpus_hatched, true); 1004 1.73 chs kcpuset_create(&cpuset_info.cpus_paused, true); 1005 1.73 chs kcpuset_create(&cpuset_info.cpus_resumed, true); 1006 1.73 chs kcpuset_create(&cpuset_info.cpus_halted, true); 1007 1.73 chs 1008 1.73 chs kcpuset_set(cpuset_info.cpus_running, cpu_number()); 1009 1.73 chs #endif 1010 1.1 matt } 1011 1.1 matt 1012 1.1 matt /* 1013 1.1 matt * Crash dump handling. 1014 1.1 matt */ 1015 1.1 matt 1016 1.1 matt void 1017 1.1 matt oea_dumpsys(void) 1018 1.1 matt { 1019 1.1 matt printf("dumpsys: TBD\n"); 1020 1.1 matt } 1021 1.1 matt 1022 1.1 matt /* 1023 1.1 matt * Convert kernel VA to physical address 1024 1.1 matt */ 1025 1.1 matt paddr_t 1026 1.32 christos kvtop(void *addr) 1027 1.1 matt { 1028 1.1 matt vaddr_t va; 1029 1.1 matt paddr_t pa; 1030 1.1 matt uintptr_t off; 1031 1.1 matt extern char end[]; 1032 1.1 matt 1033 1.33 macallan if (addr < (void *)end) 1034 1.1 matt return (paddr_t)addr; 1035 1.1 matt 1036 1.1 matt va = trunc_page((vaddr_t)addr); 1037 1.1 matt off = (uintptr_t)addr - va; 1038 1.1 matt 1039 1.31 thorpej if (pmap_extract(pmap_kernel(), va, &pa) == false) { 1040 1.1 matt /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/ 1041 1.1 matt return (paddr_t)addr; 1042 1.1 matt } 1043 1.1 matt 1044 1.1 matt return(pa + off); 1045 1.1 matt } 1046 1.1 matt 1047 1.1 matt /* 1048 1.1 matt * Allocate vm space and mapin the I/O address 1049 1.1 matt */ 1050 1.1 matt void * 1051 1.59 matt mapiodev(paddr_t pa, psize_t len, bool prefetchable) 1052 1.1 matt { 1053 1.1 matt paddr_t faddr; 1054 1.1 matt vaddr_t taddr, va; 1055 1.1 matt int off; 1056 1.1 matt 1057 1.1 matt faddr = trunc_page(pa); 1058 1.1 matt off = pa - faddr; 1059 1.1 matt len = round_page(off + len); 1060 1.20 yamt va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY); 1061 1.1 matt 1062 1.1 matt if (va == 0) 1063 1.1 matt return NULL; 1064 1.1 matt 1065 1.8 thorpej for (; len > 0; len -= PAGE_SIZE) { 1066 1.59 matt pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE, 1067 1.85 jmcneill PMAP_NOCACHE | (prefetchable ? PMAP_MD_PREFETCHABLE : 0)); 1068 1.8 thorpej faddr += PAGE_SIZE; 1069 1.8 thorpej taddr += PAGE_SIZE; 1070 1.1 matt } 1071 1.1 matt pmap_update(pmap_kernel()); 1072 1.1 matt return (void *)(va + off); 1073 1.1 matt } 1074 1.27 matt 1075 1.27 matt void 1076 1.27 matt unmapiodev(vaddr_t va, vsize_t len) 1077 1.27 matt { 1078 1.27 matt paddr_t faddr; 1079 1.27 matt 1080 1.28 freza if (! va) 1081 1.28 freza return; 1082 1.28 freza 1083 1.27 matt faddr = trunc_page(va); 1084 1.27 matt len = round_page(va - faddr + len); 1085 1.27 matt 1086 1.27 matt pmap_kremove(faddr, len); 1087 1.27 matt pmap_update(pmap_kernel()); 1088 1.27 matt uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY); 1089 1.27 matt } 1090 1.34 yamt 1091 1.34 yamt void 1092 1.34 yamt trap0(void *lr) 1093 1.34 yamt { 1094 1.34 yamt panic("call to null-ptr from %p", lr); 1095 1.34 yamt } 1096