Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.76
      1 /*	$NetBSD: pmap.h,v 1.76 2022/01/04 05:39:12 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Ralph Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     35  */
     36 
     37 /*
     38  * Copyright (c) 1987 Carnegie-Mellon University
     39  *
     40  * This code is derived from software contributed to Berkeley by
     41  * Ralph Campbell.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by the University of
     54  *	California, Berkeley and its contributors.
     55  * 4. Neither the name of the University nor the names of its contributors
     56  *    may be used to endorse or promote products derived from this software
     57  *    without specific prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  *
     71  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     72  */
     73 
     74 #ifndef	_MIPS_PMAP_H_
     75 #define	_MIPS_PMAP_H_
     76 
     77 #ifdef _KERNEL_OPT
     78 #include "opt_multiprocessor.h"
     79 #include "opt_uvmhist.h"
     80 #include "opt_cputype.h"
     81 #endif
     82 
     83 #include <sys/evcnt.h>
     84 #include <sys/kcpuset.h>
     85 #include <sys/kernhist.h>
     86 
     87 #ifndef __BSD_PTENTRY_T__
     88 #define	__BSD_PTENTRY_T__
     89 typedef uint32_t pt_entry_t;
     90 #define	PRIxPTE		PRIx32
     91 #endif /* __BSD_PTENTRY_T__ */
     92 
     93 #define	KERNEL_PID			0
     94 
     95 #if defined(__PMAP_PRIVATE)
     96 struct vm_page_md;
     97 
     98 #include <mips/locore.h>
     99 #include <mips/cache.h>
    100 
    101 #define	PMAP_VIRTUAL_CACHE_ALIASES
    102 #define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)NULL)
    103 #define	PMAP_TLB_NEED_SHOOTDOWN		1
    104 #define	PMAP_TLB_FLUSH_ASID_ON_RESET	false
    105 #if UPAGES > 1
    106 #define	PMAP_TLB_WIRED_UPAGES		MIPS3_TLB_WIRED_UPAGES
    107 #endif
    108 #define	pmap_md_tlb_asid_max()		(MIPS_TLB_NUM_PIDS - 1)
    109 #ifdef MULTIPROCESSOR
    110 #define	PMAP_NO_PV_UNCACHED
    111 #endif
    112 
    113 /*
    114  * We need the pmap_segtab's to be aligned on MIPS*R2 so we can use the
    115  * EXT/INS instructions on their addresses.
    116  */
    117 #if (MIPS32R2 + MIPS64R2 + MIPS64R2_RMIXL) > 0
    118 #define	PMAP_SEGTAB_ALIGN __aligned(sizeof(void *)*NSEGPG) __section(".data1")
    119 #endif
    120 
    121 #include <uvm/uvm_physseg.h>
    122 
    123 void	pmap_md_init(void);
    124 void	pmap_md_icache_sync_all(void);
    125 void	pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
    126 void	pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
    127 bool	pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
    128 void	pmap_md_vca_clean(struct vm_page_md *, int);
    129 void	pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
    130 bool	pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
    131 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
    132 
    133 static inline bool
    134 pmap_md_virtual_cache_aliasing_p(void)
    135 {
    136 	return MIPS_CACHE_VIRTUAL_ALIAS;
    137 }
    138 
    139 static inline vsize_t
    140 pmap_md_cache_prefer_mask(void)
    141 {
    142 	return MIPS_HAS_R4K_MMU ? mips_cache_info.mci_cache_prefer_mask : 0;
    143 }
    144 
    145 static inline void
    146 pmap_md_xtab_activate(struct pmap *pm, struct lwp *l)
    147 {
    148 
    149 	/* nothing */
    150 }
    151 
    152 static inline void
    153 pmap_md_xtab_deactivate(struct pmap *pm)
    154 {
    155 
    156 	/* nothing */
    157 }
    158 
    159 #endif /* __PMAP_PRIVATE */
    160 
    161 struct tlbmask {
    162 	vaddr_t	tlb_hi;
    163 #ifdef __mips_o32
    164 	uint32_t tlb_lo0;
    165 	uint32_t tlb_lo1;
    166 #else
    167 	uint64_t tlb_lo0;
    168 	uint64_t tlb_lo1;
    169 #endif
    170 	uint32_t tlb_mask;
    171 };
    172 
    173 #ifdef _LP64
    174 #define	PMAP_SEGTABSIZE		NSEGPG
    175 #else
    176 #define	PMAP_SEGTABSIZE		(1 << (31 - SEGSHIFT))
    177 #endif
    178 
    179 #include <uvm/uvm_pmap.h>
    180 #include <uvm/pmap/vmpagemd.h>
    181 #include <uvm/pmap/pmap.h>
    182 #include <uvm/pmap/pmap_pvt.h>
    183 #include <uvm/pmap/pmap_tlb.h>
    184 #include <uvm/pmap/pmap_synci.h>
    185 
    186 #ifdef _KERNEL
    187 /*
    188  * Select CCA to use for unmanaged pages.
    189  */
    190 #define	PMAP_CCA_FOR_PA(pa)	CCA_UNCACHED		/* uncached */
    191 
    192 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
    193 #define	PGC_NOCACHE	0x4000000000000000ULL
    194 #define	PGC_PREFETCH	0x2000000000000000ULL
    195 #endif
    196 
    197 #if defined(__PMAP_PRIVATE)
    198 #include <mips/pte.h>
    199 #endif
    200 
    201 /*
    202  * The user address space is 2Gb (0x0 - 0x80000000).
    203  * User programs are laid out in memory as follows:
    204  *			address
    205  *	USRTEXT		0x00001000
    206  *	USRDATA		USRTEXT + text_size
    207  *	USRSTACK	0x7FFFFFFF
    208  *
    209  * The user address space is mapped using a two level structure where
    210  * virtual address bits 30..22 are used to index into a segment table which
    211  * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
    212  * Bits 21..12 are then used to index a PTE which describes a page within
    213  * a segment.
    214  *
    215  * The wired entries in the TLB will contain the following:
    216  *	0-1	(UPAGES)	for curproc user struct and kernel stack.
    217  *
    218  * Note: The kernel doesn't use the same data structures as user programs.
    219  * All the PTE entries are stored in a single array in Sysmap which is
    220  * dynamically allocated at boot time.
    221  */
    222 
    223 #define	pmap_phys_address(x)	mips_ptob(x)
    224 
    225 /*
    226  *	Bootstrap the system enough to run with virtual memory.
    227  */
    228 void	pmap_bootstrap(void);
    229 void	pmap_md_alloc_ephemeral_address_space(struct cpu_info *);
    230 void	pmap_procwr(struct proc *, vaddr_t, size_t);
    231 #define	PMAP_NEED_PROCWR
    232 
    233 /*
    234  * pmap_prefer() helps reduce virtual-coherency exceptions in
    235  * the virtually-indexed cache on mips3 CPUs.
    236  */
    237 #ifdef MIPS3_PLUS
    238 #define	PMAP_PREFER(pa, va, sz, td)	pmap_prefer((pa), (va), (sz), (td))
    239 void	pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
    240 #endif /* MIPS3_PLUS */
    241 
    242 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    243 
    244 // these use register_t so we can pass XKPHYS addresses to them on N32
    245 bool	pmap_md_direct_mapped_vaddr_p(register_t);
    246 paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(register_t);
    247 bool	pmap_md_io_vaddr_p(vaddr_t);
    248 
    249 /*
    250  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
    251  */
    252 vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
    253 paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
    254 struct vm_page *pmap_md_alloc_poolpage(int);
    255 
    256 /*
    257  * Other hooks for the pool allocator.
    258  */
    259 paddr_t	pmap_md_pool_vtophys(vaddr_t);
    260 vaddr_t	pmap_md_pool_phystov(paddr_t);
    261 #define	POOL_VTOPHYS(va)	pmap_md_pool_vtophys((vaddr_t)va)
    262 #define	POOL_PHYSTOV(pa)	pmap_md_pool_phystov((paddr_t)pa)
    263 
    264 #ifdef MIPS64_SB1
    265 /* uncached accesses are bad; all accesses should be cached (and coherent) */
    266 #undef PMAP_PAGEIDLEZERO
    267 #define	PMAP_PAGEIDLEZERO(pa)   (pmap_zero_page(pa), true)
    268 
    269 int sbmips_cca_for_pa(paddr_t);
    270 
    271 #undef PMAP_CCA_FOR_PA
    272 #define	PMAP_CCA_FOR_PA(pa)	sbmips_cca_for_pa(pa)
    273 #endif
    274 
    275 #ifdef __HAVE_PMAP_PV_TRACK
    276 struct pmap_page {
    277         struct vm_page_md       pp_md;
    278 };
    279 
    280 #define PMAP_PAGE_TO_MD(ppage)  (&((ppage)->pp_md))
    281 #endif
    282 
    283 #endif	/* _KERNEL */
    284 #endif	/* _MIPS_PMAP_H_ */
    285