Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: pmap_68k.h,v 1.11 2025/12/31 15:33:50 andvar Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2025 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1987 Carnegie-Mellon University
     34  * Copyright (c) 1991, 1993
     35  *      The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *      @(#)pmap.h      8.1 (Berkeley) 6/10/93
     66  */
     67 
     68 #ifndef _M68K_PMAP_68K_H_
     69 #define	_M68K_PMAP_68K_H_
     70 
     71 #include <sys/rbtree.h>
     72 #include <sys/queue.h>
     73 
     74 #include <m68k/mmu_51.h>
     75 #include <m68k/mmu_40.h>
     76 
     77 #include <sys/kcore.h>
     78 #include <m68k/kcore.h>
     79 
     80 typedef unsigned int	pt_entry_t;
     81 
     82 TAILQ_HEAD(pmap_ptpage_list, pmap_ptpage);
     83 LIST_HEAD(pmap_pv_list, pv_entry);
     84 
     85 struct pmap {
     86 	struct pmap_table *pm_lev1map;	/* level 1 table */
     87 	paddr_t            pm_lev1pa;	/* PA of level 1 table */
     88 	unsigned int       pm_refcnt;	/* reference count */
     89 
     90 	struct pmap_table *pm_pt_cache;	/* most recently used leaf table */
     91 
     92 	/* Red-Black tree that contains the active tables. */
     93 	struct rb_tree     pm_tables;	/* lev1map not in here */
     94 
     95 	/* Page table pages for segment and leaf tables. */
     96 	struct pmap_ptpage_list pm_ptpages[2];
     97 
     98 	struct pmap_pv_list pm_pvlist;	/* all associated P->V entries */
     99 
    100 	struct pmap_statistics pm_stats;/* statistics */
    101 };
    102 
    103 /*
    104  * One entry per P->V mapping of a managed page.
    105  *
    106  * N.B. We want to keep this structure's size to be a multiple of
    107  * 8; we want to align them to 8 bytes in order to be able to use
    108  * the lower 3 bits of the pv_entry list head for page attributes.
    109  */
    110 struct pv_entry {
    111 /* 0*/	struct pv_entry     *pv_next;	/* link on page list */
    112 /* 4*/	LIST_ENTRY(pv_entry) pv_pmlist;	/* link on pmap list */
    113 /*12*/	pmap_t               pv_pmap;	/* pmap that contains mapping */
    114 /*16*/	vaddr_t              pv_vf;	/* virtual address + flags */
    115 /*20*/	struct pmap_table   *pv_pt;	/* table that contains the PTE */
    116 /*24*/
    117 };
    118 
    119 /* Upper bits of pv_vf contain the virtual address */
    120 #define	PV_VA(pv)	((pv)->pv_vf & ~PAGE_MASK)
    121 
    122 /* Lower bits of pv_vf contain flags */
    123 #define	PV_F_CI_VAC	__BIT(0)	/* mapping CI due to VAC alias */
    124 #define	PV_F_CI_USR	__BIT(1)	/* mapping CI due to user request */
    125 
    126 /*
    127  * This describes an individual table used by the MMU.  Depending on
    128  * the MMU configuration, there may be more than one table per physical
    129  * page.
    130  *
    131  * For leaf (page) and inner segment tables, pt_st points to the
    132  * segment table one level up in the tree that maps it, and pt_stidx
    133  * is the index into that segment table.  pt_st also serves as a
    134  * proxy for whether or not the table has been inserted into the
    135  * table lookup tree.  For the level-1 table, pt_st is NULL and
    136  * that table is not inserted into the lookup tree.
    137  */
    138 struct pmap_table {
    139 	struct pmap_ptpage *pt_ptpage;
    140 	pt_entry_t         *pt_entries;
    141 	struct pmap_table  *pt_st;
    142 	unsigned short      pt_holdcnt;
    143 	unsigned short      pt_stidx;
    144 	unsigned int        pt_key;
    145 	union {
    146 		LIST_ENTRY(pmap_table) pt_freelist;
    147 		struct rb_node         pt_node;
    148 	};
    149 };
    150 
    151 /*
    152  * This describes a page table page, which contains one or more MMU tables.
    153  * It's variable length, and the table descriptors are allocated along with.
    154  */
    155 struct pmap_ptpage {
    156 	TAILQ_ENTRY(pmap_ptpage)  ptp_list;
    157 	LIST_HEAD(, pmap_table)   ptp_freelist;
    158 	struct vm_page           *ptp_pg;
    159 	unsigned int              ptp_vpagenum : 23,
    160 	                          ptp_freecnt : 8,
    161 	                          ptp_segtab : 1;
    162 	struct pmap_table         ptp_tables[];
    163 };
    164 
    165 /*
    166  * This structure describes regions to be statically allocated / mapped by
    167  * pmap_boostrap1().  This is used for the fixed regions that everyone
    168  * gets (kernel text / data / bss / symbols, lwp0 u-area, msgbuf address,
    169  * etc.) as well as any additional static regions (device areas, etc.) that
    170  * need to be mapped (with KVA space) or reserved (because they're mapped
    171  * with TT registers).
    172  *
    173  * Some notes:
    174  *
    175  * - Virtual addresses are allocated and stored in the variable pointed
    176  *   to by pmbm_vaddr_ptr, **unless** the PMBM_F_KEEPOUT or PMBM_F_FIXEDVA
    177  *   flags are set, in which case the pmbm_vaddr field indicates a range
    178  *   that kernel virtual space should keep out of (PMBM_F_KEEPOUT - usually
    179  *   because it's mapped by Transparent Translation registers) or that is
    180  *   used for a fixed-VA special mapping (PMBM_F_FIXEDVA).
    181  *
    182  * - If the PMBM_F_VAONLY flag is set, only VA space will be allocated,
    183  *   no mapping will be entered in the space.
    184  *
    185  * N.B. PMBM_F_KEEPOUT VA regions are assumed to lie beyond the normal
    186  * kernel virtual address space.  The maximum kernel virtual address will
    187  * be clamped to ensure that it never grows into the lowest of these regions.
    188  *
    189  * pmap_bootstrap1() makes no effort to ensure there are PTs backing any
    190  * PMBM_F_FIXEDVA range.  It is assumed that any fixed VA mapping will
    191  * occur within an already-provisioned VA range.
    192  *
    193  * All regions will be rounded / aligned to page boundaries.
    194  *
    195  * This list is terminated by placing a (vaddr_t)-1 in the pmbm_vaddr
    196  * field.
    197  *
    198  * N.B. IF YOU CHANGE THIS STRUCTURE, AUDIT ALL DECLS OF machine_bootmap[].
    199  */
    200 struct pmap_bootmap {
    201 	union {
    202 		vaddr_t		pmbm_vaddr;
    203 		vaddr_t *	pmbm_vaddr_ptr;
    204 	};
    205 	paddr_t			pmbm_paddr;
    206 	size_t			pmbm_size;
    207 	int			pmbm_flags;
    208 };
    209 
    210 #define	PMBM_F_VAONLY	__BIT(0)
    211 #define	PMBM_F_FIXEDVA	__BIT(1)
    212 #define	PMBM_F_KEEPOUT	__BIT(2)
    213 #define	PMBM_F_CI	__BIT(3)	/* cache-inhibited mapping */
    214 #define	PMBM_F_RO	__BIT(4)	/* read-only mapping */
    215 
    216 /*
    217  * Abstract definitions for PTE bits / fields.  C code will compile-time-
    218  * assert the equivalencies that we assume.
    219  *
    220  * N.B. assumes exclusive use of short descriptors on 68851.
    221  */
    222 #define	PTE_VALID	PTE40_RESIDENT	/* == DT51_PAGE */
    223 #define	PTE_WP		PTE40_W		/* == PTE51_WP */
    224 #define	PTE_M		PTE40_M		/* == PTE51_M */
    225 #define	PTE_U		PTE40_U		/* == PTE51_U */
    226 #define	PTE_PVLIST	PTE40_G		/* unused on '51, don't use PFLUSHxN */
    227 #define	PTE_WIRED	PTE40_UR	/* unused on '51 */
    228 
    229 /*
    230  * PTE40_CM overlaps with PTE51_CI and PTE51_L (which we don't use).
    231  */
    232 #define	PTE_CMASK	PTE40_CM
    233 
    234 /*
    235  * Critical bits that, when changed (see pmap_changebit()), require
    236  * invalidation of the ATC.
    237  */
    238 #define	PTE_CRIT_BITS	(PTE_WP | PTE_CMASK)
    239 
    240 /*
    241  * Root Pointer attributes for Supervisor and User modes.
    242  *
    243  * Supervisor:
    244  * - No index limit (Lower limit == 0)
    245  * - Points to Short format descriptor table.
    246  * - Shared Globally
    247  *
    248  * User:
    249  * - No index limit (Lower limit == 0)
    250  * - Points to Short format descriptor table.
    251  */
    252 #define	MMU51_SRP_BITS	(DTE51_LOWER | DTE51_SG | DT51_SHORT)
    253 #define	MMU51_CRP_BITS	(DTE51_LOWER |            DT51_SHORT)
    254 
    255 /*
    256  * Our abstract definition of a "segment" is "that which points to the
    257  * leaf tables".  On the 2-level configuration, that's the level 1 table,
    258  * and on the 3-level configuration, that's the level 2 table.
    259  *
    260  * This is the logical address layout:
    261  *
    262  * 2-level 4KB/page: l1,l2,page    == 10,10,12	(HP MMU compatible)
    263  * 2-level 8KB/page: l1,l2,page    ==  8,11,13
    264  * 3-level 4KB/page: l1,l2,l3,page == 7,7,6,12
    265  * 3-level 8KB/page: l1,l2,l3,page == 7,7,5,13
    266  *
    267  * The 2-level l2 size is chosen per the number of page table entries
    268  * per page, to use one whole page for PTEs per one segment table entry.
    269  *
    270  * The 3-level layout is defined by the 68040/68060 hardware, and is not
    271  * configurable (other than chosen page size).  If '851 / '030 chooses
    272  * to use the 3-level layout, it is specifically configured to be compatible
    273  * with the 68040.
    274  */
    275 							/*  8KB /  4KB  */
    276 #define	LA2L_L2_NBITS	(PGSHIFT - 2)			/*   11 /   10  */
    277 #define	LA2L_L2_COUNT	__BIT(LA2L_L2_NBITS)		/* 2048 / 1024  */
    278 #define	LA2L_L2_SHIFT	PGSHIFT				/*   13 /   12  */
    279 #define	LA2L_L1_NBITS	(32 - LA2L_L2_NBITS - PGSHIFT)	/*    8 /   10  */
    280 #define	LA2L_L1_COUNT	__BIT(LA2L_L1_NBITS)		/*  256 / 1024  */
    281 #define	LA2L_L1_SHIFT	(LA2L_L2_NBITS + PGSHIFT)	/*   24 /   22  */
    282 
    283 #define	LA2L_L1_MASK	(__BITS(0,(LA2L_L1_NBITS - 1)) << LA2L_L1_SHIFT)
    284 #define	LA2L_L2_MASK	(__BITS(0,(LA2L_L2_NBITS - 1)) << LA2L_L2_SHIFT)
    285 
    286 #define	LA2L_RI(va)	__SHIFTOUT((va), LA2L_L1_MASK)	/* root index */
    287 #define	LA2L_PGI(va)	__SHIFTOUT((va), LA2L_L2_MASK)	/* page index */
    288 
    289 #define	MMU51_TCR_BITS	(TCR51_E | TCR51_SRE |				\
    290 			 __SHIFTIN(PGSHIFT, TCR51_PS) |			\
    291 			 __SHIFTIN(LA2L_L1_NBITS, TCR51_TIA) |		\
    292 			 __SHIFTIN(LA2L_L2_NBITS, TCR51_TIB))
    293 
    294 #define	MMU51_3L_TCR_BITS (TCR51_E | TCR51_SRE |			\
    295 			__SHIFTIN(PGSHIFT, TCR51_PS) |			\
    296 			__SHIFTIN(LA40_L1_NBITS, TCR51_TIA) |		\
    297 			__SHIFTIN(LA40_L2_NBITS, TCR51_TIB) |		\
    298 			__SHIFTIN(LA40_L3_NBITS, TCR51_TIC))
    299 
    300 #define	MMU40_TCR_BITS	(TCR40_E |					\
    301 			 __SHIFTIN(PGSHIFT - 12, TCR40_P))
    302 
    303 /* SEG1SHIFT3L is for the "upper" segment on the 3-level configuration */
    304 #define	SEGSHIFT2L	(LA2L_L1_SHIFT)			/*   24 /   22  */
    305 #define	SEGSHIFT3L	(LA40_L2_SHIFT)			/*   18 /   18  */
    306 #define	SEG1SHIFT3L	(LA40_L1_SHIFT)			/*   25 /   25  */
    307 
    308 /* NBSEG13L is for the "upper" segment on the 3-level configuration */
    309 #define	NBSEG2L		__BIT(SEGSHIFT2L)
    310 #define	NBSEG3L		__BIT(SEGSHIFT3L)
    311 #define	NBSEG13L	__BIT(SEG1SHIFT3L)
    312 
    313 #define	SEGOFSET2L	(NBSEG2L - 1)
    314 #define	SEGOFSET3L	(NBSEG3L - 1)
    315 #define	SEG1OFSET3L	(NBSEG13L - 1)
    316 
    317 #define	pmap_trunc_seg_2L(va)	(((vaddr_t)(va)) & ~SEGOFSET2L)
    318 #define	pmap_round_seg_2L(va)	(pmap_trunc_seg_2L((vaddr_t)(va) + SEGOFSET2L))
    319 #define	pmap_seg_offset_2L(va)	(((vaddr_t)(va)) & SEGOFSET2L)
    320 
    321 #define	pmap_trunc_seg_3L(va)	(((vaddr_t)(va)) & ~SEGOFSET3L)
    322 #define	pmap_round_seg_3L(va)	(pmap_trunc_seg_3L((vaddr_t)(va) + SEGOFSET3L))
    323 #define	pmap_seg_offset_3L(va)	(((vaddr_t)(va)) & SEGOFSET3L)
    324 
    325 #define	pmap_trunc_seg1_3L(va)	(((vaddr_t)(va)) & ~SEG1OFSET3L)
    326 #define	pmap_round_seg1_3L(va)	(pmap_trunc_seg1_3L((vaddr_t)(va)+ SEG1OFSET3L))
    327 #define	pmap_seg1_offset_3L(va)	(((vaddr_t)(va)) & SEG1OFSET3L)
    328 
    329 /*
    330  * pmap-specific data store in the vm_page structure.
    331  *
    332  * We keep the U/M attrs in the lower 2 bits of the list head
    333  * pointer.  This is possible because both the U and M bits are
    334  * adjacent; we just need to shift them down 3 bit positions.
    335  *
    336  * Assumes that PV entries will be 4-byte aligned, but the allocator
    337  * guarantees this for us.
    338  */
    339 #define	__HAVE_VM_PAGE_MD
    340 struct vm_page_md {
    341 	uintptr_t pvh_listx;		/* pv_entry list + attrs */
    342 };
    343 
    344 #define	PVH_UM_SHIFT	3
    345 #define	PVH_UM_MASK	__BITS(0,1)
    346 #define	PVH_CI		__BIT(2)
    347 #define	PVH_ATTR_MASK	(PVH_UM_MASK | PVH_CI)
    348 #define	PVH_PV_MASK	(~PVH_ATTR_MASK)
    349 
    350 #define VM_MDPAGE_INIT(pg)					\
    351 do {								\
    352 	(pg)->mdpage.pvh_listx = 0;				\
    353 } while (/*CONSTCOND*/0)
    354 
    355 #define	VM_MDPAGE_PVS(pg)					\
    356 	((struct pv_entry *)((pg)->mdpage.pvh_listx & (uintptr_t)PVH_PV_MASK))
    357 
    358 #define	VM_MDPAGE_HEAD_PVP(pg)					\
    359 	((struct pv_entry **)&(pg)->mdpage.pvh_listx)
    360 
    361 #define	VM_MDPAGE_SETPVP(pvp, pv)				\
    362 do {								\
    363 	/*							\
    364 	 * The page attributes are in the lower two bits of	\
    365 	 * the first PV pointer.  Rather than comparing the	\
    366 	 * address and branching, we just always preserve what	\
    367 	 * might be there (either the attribute bits or zero	\
    368 	 * bits).						\
    369 	 */							\
    370 	*(pvp) = (struct pv_entry *)				\
    371 	    ((uintptr_t)(pv) |					\
    372 	     (((uintptr_t)(*(pvp))) & (uintptr_t)PVH_ATTR_MASK));\
    373 } while (/*CONSTCOND*/0)
    374 
    375 #define	VM_MDPAGE_UM(pg)					\
    376 	(((pg)->mdpage.pvh_listx & PVH_UM_MASK) << PVH_UM_SHIFT)
    377 
    378 #define	VM_MDPAGE_ADD_UM(pg, a)					\
    379 do {								\
    380 	(pg)->mdpage.pvh_listx |=				\
    381 	    ((a) >> PVH_UM_SHIFT) & PVH_UM_MASK;		\
    382 } while (/*CONSTCOND*/0)
    383 
    384 #define	VM_MDPAGE_SET_UM(pg, v)					\
    385 do {								\
    386 	(pg)->mdpage.pvh_listx =				\
    387 	    ((pg)->mdpage.pvh_listx & ~PVH_UM_MASK) |		\
    388 	    (((v) >> PVH_UM_SHIFT) & PVH_UM_MASK);		\
    389 } while (/*CONSTCOND*/0)
    390 
    391 #define	VM_MDPAGE_SET_CI(pg)					\
    392 do {								\
    393 	(pg)->mdpage.pvh_listx |= PVH_CI;			\
    394 } while (/*CONSTCOND*/0)
    395 
    396 #define VM_MDPAGE_CLR_CI(pg)					\
    397 do {								\
    398 	(pg)->mdpage.pvh_listx &= ~PVH_CI;			\
    399 } while (/*CONSTCOND*/0)
    400 
    401 #define	VM_MDPAGE_CI_P(pg)					\
    402 	((pg)->mdpage.pvh_listx & PVH_CI)
    403 
    404 bool	pmap_testbit(struct vm_page *, pt_entry_t);
    405 #define	pmap_is_referenced(pg)					\
    406 	((VM_MDPAGE_UM(pg) & PTE_U) || pmap_testbit((pg), PTE_U))
    407 #define	pmap_is_modified(pg)					\
    408 	((VM_MDPAGE_UM(pg) & PTE_M) || pmap_testbit((pg), PTE_M))
    409 
    410 bool	pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t);
    411 #define	pmap_clear_reference(pg)				\
    412 	pmap_changebit((pg), 0, (pt_entry_t)~PTE_U)
    413 #define	pmap_clear_modify(pg)					\
    414 	pmap_changebit((pg), 0, (pt_entry_t)~PTE_M)
    415 
    416 #define	pmap_update(pmap)		__nothing
    417 #define	pmap_copy(dp, sp, da, l, sa)	__nothing
    418 
    419 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    420 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    421 
    422 #define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */
    423 
    424 void	pmap_procwr(struct proc *, vaddr_t, size_t);
    425 #define	PMAP_NEED_PROCWR
    426 
    427 /*
    428  * pmap_bootstrap1() is called before the MMU is turned on.
    429  * pmap_bootstrap2() is called after.
    430  */
    431 paddr_t	pmap_bootstrap1(paddr_t/*nextpa*/, paddr_t/*reloff*/);
    432 void *	pmap_bootstrap2(void);
    433 
    434 /*
    435  * Variant of pmap_extract() that returns additional information about
    436  * the mapping.  Used by bus_dma(9).
    437  */
    438 bool	pmap_extract_info(pmap_t, vaddr_t, paddr_t *, int *);
    439 
    440 /*
    441  * Functions exported for compatibility with the Hibler pmap, where
    442  * these are needed by other shared m68k code.
    443  *
    444  * XXX Clean this up eventually.
    445  */
    446 pt_entry_t *		pmap_kernel_pte(vaddr_t);
    447 #define	kvtopte(va)	pmap_kernel_pte(va)
    448 
    449 paddr_t		vtophys(vaddr_t);
    450 
    451 extern char *	vmmap;
    452 extern void *	msgbufaddr;
    453 
    454 /* Support functions for HP MMU. */
    455 void	pmap_init_vac(size_t);
    456 void	pmap_prefer(vaddr_t, vaddr_t *, int);
    457 /* PMAP_PREFER() defined in <machine/pmap.h> on machines were it's needed. */
    458 
    459 /* Kernel crash dump support. */
    460 phys_ram_seg_t *	pmap_init_kcore_hdr(cpu_kcore_hdr_t *);
    461 
    462 /*
    463  * pmap_bootstrap1() may need to relocate global references, and perform
    464  * VA <-> PA conversions.  These macros facilitate these conversions, and
    465  * can be overridden in <machine/pmap.h> before including <m68k/pmap_68k.h>
    466  * if needed.
    467  *
    468  * The first two macros are specifically for converting addresses within
    469  * the confines of pmap_bootstrap1().  We may be running with the MMU off
    470  * (and either VA==PA or VA!=PA) or with the MMU on with some mappings.
    471  * The default ones are suitable for the "MMU off" case with the relocation
    472  * offset passed in the "reloff" variable.
    473  *
    474  * - PMAP_BOOTSTRAP_RELOC_GLOB() -- relocate a global reference in order
    475  *   to access it during bootstrap.
    476  *
    477  * - PMAP_BOOTSTRAP_RELOC_PA() -- relocate a physical address in order to
    478  *   access it during bootstrap.
    479  *
    480  * The next two macros are intended to convert kernel virtual <-> physical
    481  * addresses that will be used in the context of the running kernel once
    482  * the MMU is enabled and running on the kernel's ultimate mappings:
    483  *
    484  * - PMAP_BOOTSTRAP_VA_TO_PA() -- convert a kernel virtual address to
    485  *   a physical address using linear relocation.
    486  *
    487  * - PMAP_BOOTSTRAP_PA_TO_VA() -- and vice versa.
    488  */
    489 #ifndef PMAP_BOOTSTRAP_RELOC_GLOB
    490 #define	PMAP_BOOTSTRAP_RELOC_GLOB(va)					\
    491 	((((vaddr_t)(va)) - VM_MIN_KERNEL_ADDRESS) + reloff)
    492 #endif
    493 
    494 #ifndef PMAP_BOOTSTRAP_RELOC_PA
    495 #define	PMAP_BOOTSTRAP_RELOC_PA(pa)					\
    496 	((vaddr_t)(pa))
    497 #endif
    498 
    499 #ifndef PMAP_BOOTSTRAP_VA_TO_PA
    500 #define	PMAP_BOOTSTRAP_VA_TO_PA(va)					\
    501 	((((vaddr_t)(va)) - VM_MIN_KERNEL_ADDRESS) + reloff)
    502 #endif
    503 
    504 #ifndef PMAP_BOOTSTRAP_PA_TO_VA
    505 #define	PMAP_BOOTSTRAP_PA_TO_VA(pa)					\
    506 	(VM_MIN_KERNEL_ADDRESS + (((paddr_t)(pa)) - reloff))
    507 #endif
    508 
    509 #endif /* _M68K_PMAP_68K_H_ */
    510