Home | History | Annotate | Line # | Download | only in oea
      1 /*	$NetBSD: pmap.h,v 1.39 2023/12/15 09:42:33 rin Exp $	*/
      2 
      3 /*-
      4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      5  * Copyright (C) 1995, 1996 TooLs GmbH.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by TooLs GmbH.
     19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #ifndef	_POWERPC_OEA_PMAP_H_
     35 #define	_POWERPC_OEA_PMAP_H_
     36 
     37 #ifdef _LOCORE
     38 #error use assym.h instead
     39 #endif
     40 
     41 #ifdef _MODULE
     42 #error this file should not be included by loadable kernel modules
     43 #endif
     44 
     45 #ifdef _KERNEL_OPT
     46 #include "opt_ppcarch.h"
     47 #include "opt_modular.h"
     48 #endif
     49 #include <powerpc/oea/pte.h>
     50 
     51 #define	__HAVE_PMAP_PV_TRACK
     52 #include <uvm/pmap/pmap_pvt.h>
     53 
     54 /*
     55  * Pmap stuff
     56  */
     57 struct pmap {
     58 #ifdef PPC_OEA64
     59 	struct steg *pm_steg_table;		/* segment table pointer */
     60 	/* XXX need way to track exec pages */
     61 #endif
     62 
     63 #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
     64 	register_t pm_sr[16];			/* segments used in this pmap */
     65 	int pm_exec[16];			/* counts of exec mappings */
     66 #endif
     67 	register_t pm_vsid;			/* VSID bits */
     68 	int pm_refs;				/* ref count */
     69 	struct pmap_statistics pm_stats;	/* pmap statistics */
     70 	unsigned int pm_evictions;		/* pvo's not in page table */
     71 
     72 #ifdef PPC_OEA64
     73 	unsigned int pm_ste_evictions;
     74 #endif
     75 };
     76 
     77 struct pmap_ops {
     78 	int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
     79 	void (*pmapop_real_memory)(paddr_t *, psize_t *);
     80 	void (*pmapop_init)(void);
     81 	void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
     82 	pmap_t (*pmapop_create)(void);
     83 	void (*pmapop_reference)(pmap_t);
     84 	void (*pmapop_destroy)(pmap_t);
     85 	void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
     86 	void (*pmapop_update)(pmap_t);
     87 	int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
     88 	void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
     89 	void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
     90 	void (*pmapop_kremove)(vaddr_t, vsize_t);
     91 	bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
     92 
     93 	void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
     94 	void (*pmapop_unwire)(pmap_t, vaddr_t);
     95 	void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
     96 	void (*pmapop_pv_protect)(paddr_t, vm_prot_t);
     97 	bool (*pmapop_query_bit)(struct vm_page *, int);
     98 	bool (*pmapop_clear_bit)(struct vm_page *, int);
     99 
    100 	void (*pmapop_activate)(struct lwp *);
    101 	void (*pmapop_deactivate)(struct lwp *);
    102 
    103 	void (*pmapop_pinit)(pmap_t);
    104 	void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
    105 
    106 	void (*pmapop_pte_print)(volatile struct pte *);
    107 	void (*pmapop_pteg_check)(void);
    108 	void (*pmapop_print_mmuregs)(void);
    109 	void (*pmapop_print_pte)(pmap_t, vaddr_t);
    110 	void (*pmapop_pteg_dist)(void);
    111 	void (*pmapop_pvo_verify)(void);
    112 	vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
    113 	void (*pmapop_bootstrap)(paddr_t, paddr_t);
    114 	void (*pmapop_bootstrap1)(paddr_t, paddr_t);
    115 	void (*pmapop_bootstrap2)(void);
    116 };
    117 
    118 #ifdef	_KERNEL
    119 #include <sys/cdefs.h>
    120 __BEGIN_DECLS
    121 #include <sys/param.h>
    122 #include <sys/systm.h>
    123 
    124 /*
    125  * For OEA and OEA64_BRIDGE, we guarantee that pa below USER_ADDR
    126  * (== 3GB < VM_MIN_KERNEL_ADDRESS) is direct-mapped.
    127  */
    128 #if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE)
    129 #define	PMAP_DIRECT_MAPPED_SR	(USER_SR - 1)
    130 #define	PMAP_DIRECT_MAPPED_LEN \
    131     ((vaddr_t)SEGMENT_LENGTH * (PMAP_DIRECT_MAPPED_SR + 1))
    132 #endif
    133 
    134 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    135 extern register_t iosrtable[];
    136 #endif
    137 extern int pmap_use_altivec;
    138 
    139 #define pmap_clear_modify(pg)		(pmap_clear_bit((pg), PTE_CHG))
    140 #define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), PTE_REF))
    141 #define	pmap_is_modified(pg)		(pmap_query_bit((pg), PTE_CHG))
    142 #define	pmap_is_referenced(pg)		(pmap_query_bit((pg), PTE_REF))
    143 
    144 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    145 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    146 
    147 /* ARGSUSED */
    148 static __inline bool
    149 pmap_remove_all(struct pmap *pmap)
    150 {
    151 	/* Nothing. */
    152 	return false;
    153 }
    154 
    155 #if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
    156 #define	PMAP_NEEDS_FIXUP
    157 #endif
    158 
    159 extern volatile struct pteg *pmap_pteg_table;
    160 extern unsigned int pmap_pteg_cnt;
    161 extern unsigned int pmap_pteg_mask;
    162 
    163 void pmap_bootstrap(vaddr_t, vaddr_t);
    164 void pmap_bootstrap1(vaddr_t, vaddr_t);
    165 void pmap_bootstrap2(void);
    166 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
    167 bool pmap_query_bit(struct vm_page *, int);
    168 bool pmap_clear_bit(struct vm_page *, int);
    169 void pmap_real_memory(paddr_t *, psize_t *);
    170 void pmap_procwr(struct proc *, vaddr_t, size_t);
    171 int pmap_pte_spill(pmap_t, vaddr_t, bool);
    172 int pmap_ste_spill(pmap_t, vaddr_t, bool);
    173 void pmap_pinit(pmap_t);
    174 
    175 #ifdef PPC_OEA601
    176 bool	pmap_extract_ioseg601(vaddr_t, paddr_t *);
    177 #endif /* PPC_OEA601 */
    178 #ifdef PPC_OEA
    179 bool	pmap_extract_battable(vaddr_t, paddr_t *);
    180 #endif /* PPC_OEA */
    181 
    182 u_int powerpc_mmap_flags(paddr_t);
    183 #define POWERPC_MMAP_FLAG_MASK	0xf
    184 #define POWERPC_MMAP_FLAG_PREFETCHABLE	0x1
    185 #define POWERPC_MMAP_FLAG_CACHEABLE	0x2
    186 
    187 #define pmap_phys_address(ppn)		(ppn & ~POWERPC_MMAP_FLAG_MASK)
    188 #define pmap_mmap_flags(ppn)		powerpc_mmap_flags(ppn)
    189 
    190 static __inline paddr_t vtophys (vaddr_t);
    191 
    192 /*
    193  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
    194  *
    195  * Note: This won't work if we have more memory than can be direct-mapped
    196  * VA==PA all at once.  But pmap_copy_page() and pmap_zero_page() will have
    197  * this problem, too.
    198  */
    199 #if !defined(PPC_OEA64)
    200 #define	PMAP_MAP_POOLPAGE(pa)	(pa)
    201 #define	PMAP_UNMAP_POOLPAGE(pa)	(pa)
    202 #define POOL_VTOPHYS(va)	vtophys((vaddr_t) va)
    203 
    204 #define	PMAP_ALLOC_POOLPAGE(flags)	pmap_alloc_poolpage(flags)
    205 struct vm_page *pmap_alloc_poolpage(int);
    206 #endif
    207 
    208 static __inline paddr_t
    209 vtophys(vaddr_t va)
    210 {
    211 	paddr_t pa;
    212 
    213 	if (pmap_extract(pmap_kernel(), va, &pa))
    214 		return pa;
    215 	KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
    216 	return (paddr_t) -1;
    217 }
    218 
    219 
    220 #ifdef PMAP_NEEDS_FIXUP
    221 extern const struct pmap_ops *pmapops;
    222 extern const struct pmap_ops pmap32_ops;
    223 extern const struct pmap_ops pmap64_ops;
    224 extern const struct pmap_ops pmap64bridge_ops;
    225 
    226 static __inline void
    227 pmap_setup32(void)
    228 {
    229 	pmapops = &pmap32_ops;
    230 }
    231 
    232 static __inline void
    233 pmap_setup64(void)
    234 {
    235 	pmapops = &pmap64_ops;
    236 }
    237 
    238 static __inline void
    239 pmap_setup64bridge(void)
    240 {
    241 	pmapops = &pmap64bridge_ops;
    242 }
    243 #endif
    244 
    245 bool pmap_pageidlezero (paddr_t);
    246 void pmap_syncicache (paddr_t, psize_t);
    247 #ifdef PPC_OEA64
    248 vaddr_t pmap_setusr (vaddr_t);
    249 vaddr_t pmap_unsetusr (void);
    250 #endif
    251 
    252 #ifdef PPC_OEA64_BRIDGE
    253 int pmap_setup_segment0_map(int use_large_pages, ...);
    254 #endif
    255 
    256 #define PMAP_MD_PREFETCHABLE		0x2000000
    257 #define PMAP_STEAL_MEMORY
    258 #define PMAP_NEED_PROCWR
    259 
    260 void pmap_zero_page(paddr_t);
    261 void pmap_copy_page(paddr_t, paddr_t);
    262 
    263 LIST_HEAD(pvo_head, pvo_entry);
    264 
    265 #define	__HAVE_VM_PAGE_MD
    266 
    267 struct pmap_page {
    268 	unsigned int pp_attrs;
    269 	struct pvo_head pp_pvoh;
    270 #ifdef MODULAR
    271 	uintptr_t pp_dummy[3];
    272 #endif
    273 };
    274 
    275 struct vm_page_md {
    276 	struct pmap_page mdpg_pp;
    277 #define	mdpg_attrs	mdpg_pp.pp_attrs
    278 #define	mdpg_pvoh	mdpg_pp.pp_pvoh
    279 #ifdef MODULAR
    280 #define	mdpg_dummy	mdpg_pp.pp_dummy
    281 #endif
    282 };
    283 
    284 #define	VM_MDPAGE_INIT(pg) do {			\
    285 	(pg)->mdpage.mdpg_attrs = 0;		\
    286 	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
    287 } while (/*CONSTCOND*/0)
    288 
    289 __END_DECLS
    290 #endif	/* _KERNEL */
    291 
    292 #endif	/* _POWERPC_OEA_PMAP_H_ */
    293