pmap.h revision 1.6 1 1.6 matt /* $NetBSD: pmap.h,v 1.6 2003/11/21 22:57:14 matt Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH.
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt * 3. All advertising materials mentioning features or use of this software
17 1.1 matt * must display the following acknowledgement:
18 1.1 matt * This product includes software developed by TooLs GmbH.
19 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 1.1 matt * derived from this software without specific prior written permission.
21 1.1 matt *
22 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 matt */
33 1.1 matt
34 1.1 matt #ifndef _POWERPC_OEA_PMAP_H_
35 1.1 matt #define _POWERPC_OEA_PMAP_H_
36 1.1 matt
37 1.1 matt #include <powerpc/oea/pte.h>
38 1.1 matt
39 1.1 matt #ifndef _LOCORE
40 1.1 matt /*
41 1.1 matt * Pmap stuff
42 1.1 matt */
43 1.1 matt struct pmap {
44 1.6 matt #ifdef PPC_OEA64
45 1.6 matt struct steg *pm_steg_table; /* segment table pointer */
46 1.6 matt /* XXX need way to track exec pages */
47 1.6 matt #endif
48 1.6 matt #ifdef PPC_OEA
49 1.2 matt register_t pm_sr[16]; /* segments used in this pmap */
50 1.5 chs int pm_exec[16]; /* counts of exec mappings */
51 1.6 matt #endif
52 1.6 matt register_t pm_vsid; /* VSID bits */
53 1.2 matt int pm_refs; /* ref count */
54 1.1 matt struct pmap_statistics pm_stats; /* pmap statistics */
55 1.2 matt unsigned int pm_evictions; /* pvo's not in page table */
56 1.6 matt #ifdef PPC_OEA64
57 1.6 matt unsigned int pm_ste_evictions;
58 1.6 matt #endif
59 1.1 matt };
60 1.1 matt
61 1.1 matt typedef struct pmap *pmap_t;
62 1.1 matt
63 1.1 matt #ifdef _KERNEL
64 1.6 matt #include <sys/param.h>
65 1.4 matt #include <sys/systm.h>
66 1.4 matt
67 1.6 matt #ifdef PPC_OEA
68 1.2 matt extern register_t iosrtable[];
69 1.6 matt #endif
70 1.1 matt extern int pmap_use_altivec;
71 1.1 matt extern struct pmap kernel_pmap_;
72 1.1 matt #define pmap_kernel() (&kernel_pmap_)
73 1.1 matt
74 1.1 matt #define pmap_clear_modify(pg) (pmap_clear_bit((pg), PTE_CHG))
75 1.1 matt #define pmap_clear_reference(pg) (pmap_clear_bit((pg), PTE_REF))
76 1.1 matt #define pmap_is_modified(pg) (pmap_query_bit((pg), PTE_CHG))
77 1.1 matt #define pmap_is_referenced(pg) (pmap_query_bit((pg), PTE_REF))
78 1.1 matt
79 1.1 matt #define pmap_phys_address(x) (x)
80 1.1 matt
81 1.1 matt #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
82 1.1 matt #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
83 1.1 matt
84 1.3 matt /* ARGSUSED */
85 1.1 matt static __inline void
86 1.1 matt pmap_remove_all(struct pmap *pmap)
87 1.1 matt {
88 1.1 matt /* Nothing. */
89 1.1 matt }
90 1.1 matt
91 1.5 chs void pmap_bootstrap (vaddr_t, vaddr_t);
92 1.1 matt boolean_t pmap_extract (struct pmap *, vaddr_t, paddr_t *);
93 1.1 matt boolean_t pmap_query_bit (struct vm_page *, int);
94 1.1 matt boolean_t pmap_clear_bit (struct vm_page *, int);
95 1.1 matt void pmap_real_memory (paddr_t *, psize_t *);
96 1.1 matt void pmap_pinit (struct pmap *);
97 1.1 matt boolean_t pmap_pageidlezero (paddr_t);
98 1.1 matt void pmap_syncicache (paddr_t, psize_t);
99 1.6 matt #ifdef PPC_OEA64
100 1.6 matt vaddr_t pmap_setusr (vaddr_t);
101 1.6 matt vaddr_t pmap_unsetusr (void);
102 1.6 matt #endif
103 1.1 matt
104 1.1 matt #define PMAP_NEED_PROCWR
105 1.5 chs void pmap_procwr(struct proc *, vaddr_t, size_t);
106 1.1 matt
107 1.5 chs int pmap_pte_spill(struct pmap *, vaddr_t, boolean_t);
108 1.1 matt
109 1.1 matt #define PMAP_NC 0x1000
110 1.1 matt
111 1.1 matt #define PMAP_STEAL_MEMORY
112 1.4 matt static __inline paddr_t vtophys (vaddr_t);
113 1.4 matt
114 1.1 matt /*
115 1.1 matt * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
116 1.1 matt *
117 1.1 matt * Note: This won't work if we have more memory than can be direct-mapped
118 1.1 matt * VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
119 1.1 matt * this problem, too.
120 1.1 matt */
121 1.6 matt #ifndef PPC_OEA64
122 1.1 matt #define PMAP_MAP_POOLPAGE(pa) (pa)
123 1.1 matt #define PMAP_UNMAP_POOLPAGE(pa) (pa)
124 1.4 matt #define POOL_VTOPHYS(va) vtophys((vaddr_t) va)
125 1.6 matt #endif
126 1.1 matt
127 1.1 matt static __inline paddr_t
128 1.1 matt vtophys(vaddr_t va)
129 1.1 matt {
130 1.1 matt paddr_t pa;
131 1.1 matt
132 1.1 matt if (pmap_extract(pmap_kernel(), va, &pa))
133 1.1 matt return pa;
134 1.4 matt KASSERT(0);
135 1.4 matt return (paddr_t) -1;
136 1.1 matt }
137 1.1 matt
138 1.1 matt #endif /* _KERNEL */
139 1.1 matt #endif /* _LOCORE */
140 1.1 matt
141 1.1 matt #endif /* _POWERPC_OEA_PMAP_H_ */
142