Home | History | Annotate | Line # | Download | only in oea
      1  1.39       rin /*	$NetBSD: pmap.h,v 1.39 2023/12/15 09:42:33 rin Exp $	*/
      2   1.1      matt 
      3   1.1      matt /*-
      4   1.1      matt  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      5   1.1      matt  * Copyright (C) 1995, 1996 TooLs GmbH.
      6   1.1      matt  * All rights reserved.
      7   1.1      matt  *
      8   1.1      matt  * Redistribution and use in source and binary forms, with or without
      9   1.1      matt  * modification, are permitted provided that the following conditions
     10   1.1      matt  * are met:
     11   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     12   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     13   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     15   1.1      matt  *    documentation and/or other materials provided with the distribution.
     16   1.1      matt  * 3. All advertising materials mentioning features or use of this software
     17   1.1      matt  *    must display the following acknowledgement:
     18   1.1      matt  *	This product includes software developed by TooLs GmbH.
     19   1.1      matt  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     20   1.1      matt  *    derived from this software without specific prior written permission.
     21   1.1      matt  *
     22   1.1      matt  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     23   1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24   1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25   1.1      matt  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     26   1.1      matt  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     27   1.1      matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     28   1.1      matt  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     29   1.1      matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     30   1.1      matt  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     31   1.1      matt  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32   1.1      matt  */
     33   1.1      matt 
     34   1.1      matt #ifndef	_POWERPC_OEA_PMAP_H_
     35   1.1      matt #define	_POWERPC_OEA_PMAP_H_
     36   1.1      matt 
     37  1.38     skrll #ifdef _LOCORE
     38  1.24      matt #error use assym.h instead
     39  1.24      matt #endif
     40  1.24      matt 
     41  1.31       rin #ifdef _MODULE
     42  1.24      matt #error this file should not be included by loadable kernel modules
     43  1.24      matt #endif
     44  1.24      matt 
     45  1.15        he #ifdef _KERNEL_OPT
     46  1.12   garbled #include "opt_ppcarch.h"
     47  1.32       rin #include "opt_modular.h"
     48  1.15        he #endif
     49   1.1      matt #include <powerpc/oea/pte.h>
     50   1.1      matt 
     51  1.37       rin #define	__HAVE_PMAP_PV_TRACK
     52  1.37       rin #include <uvm/pmap/pmap_pvt.h>
     53  1.37       rin 
     54   1.1      matt /*
     55   1.1      matt  * Pmap stuff
     56   1.1      matt  */
     57   1.1      matt struct pmap {
     58   1.6      matt #ifdef PPC_OEA64
     59   1.6      matt 	struct steg *pm_steg_table;		/* segment table pointer */
     60   1.6      matt 	/* XXX need way to track exec pages */
     61   1.6      matt #endif
     62   1.9   sanjayl 
     63   1.9   sanjayl #if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
     64   1.2      matt 	register_t pm_sr[16];			/* segments used in this pmap */
     65   1.5       chs 	int pm_exec[16];			/* counts of exec mappings */
     66   1.6      matt #endif
     67   1.6      matt 	register_t pm_vsid;			/* VSID bits */
     68   1.2      matt 	int pm_refs;				/* ref count */
     69   1.1      matt 	struct pmap_statistics pm_stats;	/* pmap statistics */
     70   1.2      matt 	unsigned int pm_evictions;		/* pvo's not in page table */
     71   1.9   sanjayl 
     72   1.6      matt #ifdef PPC_OEA64
     73   1.6      matt 	unsigned int pm_ste_evictions;
     74   1.6      matt #endif
     75   1.1      matt };
     76   1.1      matt 
     77  1.12   garbled struct pmap_ops {
     78  1.12   garbled 	int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
     79  1.12   garbled 	void (*pmapop_real_memory)(paddr_t *, psize_t *);
     80  1.12   garbled 	void (*pmapop_init)(void);
     81  1.12   garbled 	void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
     82  1.12   garbled 	pmap_t (*pmapop_create)(void);
     83  1.12   garbled 	void (*pmapop_reference)(pmap_t);
     84  1.12   garbled 	void (*pmapop_destroy)(pmap_t);
     85  1.12   garbled 	void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
     86  1.12   garbled 	void (*pmapop_update)(pmap_t);
     87  1.16    cegger 	int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
     88  1.12   garbled 	void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
     89  1.18    cegger 	void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
     90  1.12   garbled 	void (*pmapop_kremove)(vaddr_t, vsize_t);
     91  1.12   garbled 	bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
     92  1.12   garbled 
     93  1.12   garbled 	void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
     94  1.12   garbled 	void (*pmapop_unwire)(pmap_t, vaddr_t);
     95  1.12   garbled 	void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
     96  1.36  riastrad 	void (*pmapop_pv_protect)(paddr_t, vm_prot_t);
     97  1.12   garbled 	bool (*pmapop_query_bit)(struct vm_page *, int);
     98  1.12   garbled 	bool (*pmapop_clear_bit)(struct vm_page *, int);
     99  1.12   garbled 
    100  1.12   garbled 	void (*pmapop_activate)(struct lwp *);
    101  1.12   garbled 	void (*pmapop_deactivate)(struct lwp *);
    102  1.12   garbled 
    103  1.12   garbled 	void (*pmapop_pinit)(pmap_t);
    104  1.12   garbled 	void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
    105  1.12   garbled 
    106  1.12   garbled 	void (*pmapop_pte_print)(volatile struct pte *);
    107  1.12   garbled 	void (*pmapop_pteg_check)(void);
    108  1.12   garbled 	void (*pmapop_print_mmuregs)(void);
    109  1.12   garbled 	void (*pmapop_print_pte)(pmap_t, vaddr_t);
    110  1.12   garbled 	void (*pmapop_pteg_dist)(void);
    111  1.12   garbled 	void (*pmapop_pvo_verify)(void);
    112  1.12   garbled 	vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
    113  1.12   garbled 	void (*pmapop_bootstrap)(paddr_t, paddr_t);
    114  1.34   thorpej 	void (*pmapop_bootstrap1)(paddr_t, paddr_t);
    115  1.34   thorpej 	void (*pmapop_bootstrap2)(void);
    116  1.12   garbled };
    117  1.12   garbled 
    118   1.1      matt #ifdef	_KERNEL
    119  1.12   garbled #include <sys/cdefs.h>
    120  1.12   garbled __BEGIN_DECLS
    121   1.6      matt #include <sys/param.h>
    122   1.4      matt #include <sys/systm.h>
    123   1.4      matt 
    124  1.39       rin /*
    125  1.39       rin  * For OEA and OEA64_BRIDGE, we guarantee that pa below USER_ADDR
    126  1.39       rin  * (== 3GB < VM_MIN_KERNEL_ADDRESS) is direct-mapped.
    127  1.39       rin  */
    128  1.39       rin #if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE)
    129  1.39       rin #define	PMAP_DIRECT_MAPPED_SR	(USER_SR - 1)
    130  1.39       rin #define	PMAP_DIRECT_MAPPED_LEN \
    131  1.39       rin     ((vaddr_t)SEGMENT_LENGTH * (PMAP_DIRECT_MAPPED_SR + 1))
    132  1.39       rin #endif
    133  1.39       rin 
    134   1.9   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    135   1.2      matt extern register_t iosrtable[];
    136   1.6      matt #endif
    137   1.1      matt extern int pmap_use_altivec;
    138   1.1      matt 
    139   1.1      matt #define pmap_clear_modify(pg)		(pmap_clear_bit((pg), PTE_CHG))
    140   1.1      matt #define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), PTE_REF))
    141   1.1      matt #define	pmap_is_modified(pg)		(pmap_query_bit((pg), PTE_CHG))
    142   1.1      matt #define	pmap_is_referenced(pg)		(pmap_query_bit((pg), PTE_REF))
    143   1.1      matt 
    144   1.1      matt #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    145   1.1      matt #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    146   1.1      matt 
    147   1.3      matt /* ARGSUSED */
    148  1.30        ad static __inline bool
    149   1.1      matt pmap_remove_all(struct pmap *pmap)
    150   1.1      matt {
    151   1.1      matt 	/* Nothing. */
    152  1.30        ad 	return false;
    153   1.1      matt }
    154   1.1      matt 
    155  1.13      matt #if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
    156  1.21      matt #define	PMAP_NEEDS_FIXUP
    157   1.6      matt #endif
    158   1.1      matt 
    159  1.27      matt extern volatile struct pteg *pmap_pteg_table;
    160  1.27      matt extern unsigned int pmap_pteg_cnt;
    161  1.27      matt extern unsigned int pmap_pteg_mask;
    162  1.27      matt 
    163  1.12   garbled void pmap_bootstrap(vaddr_t, vaddr_t);
    164  1.33   thorpej void pmap_bootstrap1(vaddr_t, vaddr_t);
    165  1.33   thorpej void pmap_bootstrap2(void);
    166  1.12   garbled bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
    167  1.12   garbled bool pmap_query_bit(struct vm_page *, int);
    168  1.12   garbled bool pmap_clear_bit(struct vm_page *, int);
    169  1.12   garbled void pmap_real_memory(paddr_t *, psize_t *);
    170   1.5       chs void pmap_procwr(struct proc *, vaddr_t, size_t);
    171  1.12   garbled int pmap_pte_spill(pmap_t, vaddr_t, bool);
    172  1.28      matt int pmap_ste_spill(pmap_t, vaddr_t, bool);
    173  1.12   garbled void pmap_pinit(pmap_t);
    174   1.1      matt 
    175  1.35   thorpej #ifdef PPC_OEA601
    176  1.35   thorpej bool	pmap_extract_ioseg601(vaddr_t, paddr_t *);
    177  1.35   thorpej #endif /* PPC_OEA601 */
    178  1.35   thorpej #ifdef PPC_OEA
    179  1.35   thorpej bool	pmap_extract_battable(vaddr_t, paddr_t *);
    180  1.35   thorpej #endif /* PPC_OEA */
    181  1.35   thorpej 
    182  1.22  macallan u_int powerpc_mmap_flags(paddr_t);
    183  1.22  macallan #define POWERPC_MMAP_FLAG_MASK	0xf
    184  1.22  macallan #define POWERPC_MMAP_FLAG_PREFETCHABLE	0x1
    185  1.22  macallan #define POWERPC_MMAP_FLAG_CACHEABLE	0x2
    186  1.22  macallan 
    187  1.22  macallan #define pmap_phys_address(ppn)		(ppn & ~POWERPC_MMAP_FLAG_MASK)
    188  1.22  macallan #define pmap_mmap_flags(ppn)		powerpc_mmap_flags(ppn)
    189  1.22  macallan 
    190  1.29  christos static __inline paddr_t vtophys (vaddr_t);
    191   1.4      matt 
    192   1.1      matt /*
    193   1.1      matt  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
    194   1.1      matt  *
    195   1.1      matt  * Note: This won't work if we have more memory than can be direct-mapped
    196   1.1      matt  * VA==PA all at once.  But pmap_copy_page() and pmap_zero_page() will have
    197   1.1      matt  * this problem, too.
    198   1.1      matt  */
    199  1.39       rin #if !defined(PPC_OEA64)
    200   1.1      matt #define	PMAP_MAP_POOLPAGE(pa)	(pa)
    201   1.1      matt #define	PMAP_UNMAP_POOLPAGE(pa)	(pa)
    202   1.4      matt #define POOL_VTOPHYS(va)	vtophys((vaddr_t) va)
    203  1.39       rin 
    204  1.39       rin #define	PMAP_ALLOC_POOLPAGE(flags)	pmap_alloc_poolpage(flags)
    205  1.39       rin struct vm_page *pmap_alloc_poolpage(int);
    206   1.6      matt #endif
    207   1.1      matt 
    208  1.29  christos static __inline paddr_t
    209   1.1      matt vtophys(vaddr_t va)
    210   1.1      matt {
    211   1.1      matt 	paddr_t pa;
    212   1.1      matt 
    213   1.1      matt 	if (pmap_extract(pmap_kernel(), va, &pa))
    214   1.1      matt 		return pa;
    215  1.26       jym 	KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
    216   1.4      matt 	return (paddr_t) -1;
    217   1.1      matt }
    218   1.1      matt 
    219  1.21      matt 
    220  1.21      matt #ifdef PMAP_NEEDS_FIXUP
    221  1.12   garbled extern const struct pmap_ops *pmapops;
    222  1.12   garbled extern const struct pmap_ops pmap32_ops;
    223  1.12   garbled extern const struct pmap_ops pmap64_ops;
    224  1.12   garbled extern const struct pmap_ops pmap64bridge_ops;
    225  1.12   garbled 
    226  1.29  christos static __inline void
    227  1.12   garbled pmap_setup32(void)
    228  1.12   garbled {
    229  1.23      matt 	pmapops = &pmap32_ops;
    230  1.12   garbled }
    231  1.12   garbled 
    232  1.29  christos static __inline void
    233  1.12   garbled pmap_setup64(void)
    234  1.12   garbled {
    235  1.23      matt 	pmapops = &pmap64_ops;
    236  1.12   garbled }
    237  1.12   garbled 
    238  1.29  christos static __inline void
    239  1.12   garbled pmap_setup64bridge(void)
    240  1.12   garbled {
    241  1.23      matt 	pmapops = &pmap64bridge_ops;
    242  1.12   garbled }
    243  1.21      matt #endif
    244  1.12   garbled 
    245  1.12   garbled bool pmap_pageidlezero (paddr_t);
    246  1.12   garbled void pmap_syncicache (paddr_t, psize_t);
    247  1.12   garbled #ifdef PPC_OEA64
    248  1.12   garbled vaddr_t pmap_setusr (vaddr_t);
    249  1.12   garbled vaddr_t pmap_unsetusr (void);
    250  1.12   garbled #endif
    251  1.12   garbled 
    252  1.12   garbled #ifdef PPC_OEA64_BRIDGE
    253  1.12   garbled int pmap_setup_segment0_map(int use_large_pages, ...);
    254  1.12   garbled #endif
    255  1.12   garbled 
    256  1.22  macallan #define PMAP_MD_PREFETCHABLE		0x2000000
    257  1.12   garbled #define PMAP_STEAL_MEMORY
    258  1.12   garbled #define PMAP_NEED_PROCWR
    259  1.12   garbled 
    260  1.12   garbled void pmap_zero_page(paddr_t);
    261  1.12   garbled void pmap_copy_page(paddr_t, paddr_t);
    262  1.12   garbled 
    263  1.19  uebayasi LIST_HEAD(pvo_head, pvo_entry);
    264  1.19  uebayasi 
    265  1.19  uebayasi #define	__HAVE_VM_PAGE_MD
    266  1.19  uebayasi 
    267  1.36  riastrad struct pmap_page {
    268  1.36  riastrad 	unsigned int pp_attrs;
    269  1.36  riastrad 	struct pvo_head pp_pvoh;
    270  1.36  riastrad #ifdef MODULAR
    271  1.36  riastrad 	uintptr_t pp_dummy[3];
    272  1.36  riastrad #endif
    273  1.36  riastrad };
    274  1.36  riastrad 
    275  1.19  uebayasi struct vm_page_md {
    276  1.36  riastrad 	struct pmap_page mdpg_pp;
    277  1.36  riastrad #define	mdpg_attrs	mdpg_pp.pp_attrs
    278  1.36  riastrad #define	mdpg_pvoh	mdpg_pp.pp_pvoh
    279  1.24      matt #ifdef MODULAR
    280  1.36  riastrad #define	mdpg_dummy	mdpg_pp.pp_dummy
    281  1.24      matt #endif
    282  1.19  uebayasi };
    283  1.19  uebayasi 
    284  1.19  uebayasi #define	VM_MDPAGE_INIT(pg) do {			\
    285  1.24      matt 	(pg)->mdpage.mdpg_attrs = 0;		\
    286  1.19  uebayasi 	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
    287  1.19  uebayasi } while (/*CONSTCOND*/0)
    288  1.19  uebayasi 
    289  1.12   garbled __END_DECLS
    290   1.1      matt #endif	/* _KERNEL */
    291   1.1      matt 
    292   1.1      matt #endif	/* _POWERPC_OEA_PMAP_H_ */
    293