pmap.h revision 1.4 1 1.4 matt /* $NetBSD: pmap.h,v 1.4 2003/04/09 22:37:32 matt Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH.
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt * 3. All advertising materials mentioning features or use of this software
17 1.1 matt * must display the following acknowledgement:
18 1.1 matt * This product includes software developed by TooLs GmbH.
19 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 1.1 matt * derived from this software without specific prior written permission.
21 1.1 matt *
22 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 1.1 matt */
33 1.1 matt
34 1.1 matt #ifndef _POWERPC_OEA_PMAP_H_
35 1.1 matt #define _POWERPC_OEA_PMAP_H_
36 1.1 matt
37 1.1 matt #include <powerpc/oea/pte.h>
38 1.1 matt
39 1.1 matt #ifndef _LOCORE
40 1.1 matt /*
41 1.1 matt * Pmap stuff
42 1.1 matt */
43 1.1 matt struct pmap {
44 1.2 matt register_t pm_sr[16]; /* segments used in this pmap */
45 1.2 matt int pm_refs; /* ref count */
46 1.1 matt struct pmap_statistics pm_stats; /* pmap statistics */
47 1.2 matt unsigned int pm_evictions; /* pvo's not in page table */
48 1.1 matt };
49 1.1 matt
50 1.1 matt typedef struct pmap *pmap_t;
51 1.1 matt
52 1.1 matt #ifdef _KERNEL
53 1.4 matt #include <sys/systm.h>
54 1.4 matt
55 1.2 matt extern register_t iosrtable[];
56 1.1 matt extern int pmap_use_altivec;
57 1.1 matt extern struct pmap kernel_pmap_;
58 1.1 matt #define pmap_kernel() (&kernel_pmap_)
59 1.1 matt
60 1.1 matt #define pmap_clear_modify(pg) (pmap_clear_bit((pg), PTE_CHG))
61 1.1 matt #define pmap_clear_reference(pg) (pmap_clear_bit((pg), PTE_REF))
62 1.1 matt #define pmap_is_modified(pg) (pmap_query_bit((pg), PTE_CHG))
63 1.1 matt #define pmap_is_referenced(pg) (pmap_query_bit((pg), PTE_REF))
64 1.1 matt
65 1.1 matt #define pmap_phys_address(x) (x)
66 1.1 matt
67 1.1 matt #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
68 1.1 matt #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
69 1.1 matt
70 1.3 matt /* ARGSUSED */
71 1.1 matt static __inline void
72 1.1 matt pmap_remove_all(struct pmap *pmap)
73 1.1 matt {
74 1.1 matt /* Nothing. */
75 1.1 matt }
76 1.1 matt
77 1.1 matt void pmap_bootstrap (vaddr_t kernelstart, vaddr_t kernelend);
78 1.1 matt boolean_t pmap_extract (struct pmap *, vaddr_t, paddr_t *);
79 1.1 matt boolean_t pmap_query_bit (struct vm_page *, int);
80 1.1 matt boolean_t pmap_clear_bit (struct vm_page *, int);
81 1.1 matt void pmap_real_memory (paddr_t *, psize_t *);
82 1.1 matt void pmap_pinit (struct pmap *);
83 1.1 matt boolean_t pmap_pageidlezero (paddr_t);
84 1.1 matt void pmap_syncicache (paddr_t, psize_t);
85 1.1 matt
86 1.1 matt #define PMAP_NEED_PROCWR
87 1.1 matt void pmap_procwr (struct proc *, vaddr_t, size_t);
88 1.1 matt
89 1.1 matt int pmap_pte_spill(struct pmap *, vaddr_t);
90 1.1 matt
91 1.1 matt #define PMAP_NC 0x1000
92 1.1 matt
93 1.1 matt #define PMAP_STEAL_MEMORY
94 1.4 matt static __inline paddr_t vtophys (vaddr_t);
95 1.4 matt
96 1.1 matt #if 1
97 1.1 matt /*
98 1.1 matt * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
99 1.1 matt *
100 1.1 matt * Note: This won't work if we have more memory than can be direct-mapped
101 1.1 matt * VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
102 1.1 matt * this problem, too.
103 1.1 matt */
104 1.1 matt #define PMAP_MAP_POOLPAGE(pa) (pa)
105 1.1 matt #define PMAP_UNMAP_POOLPAGE(pa) (pa)
106 1.1 matt #endif
107 1.4 matt #define POOL_VTOPHYS(va) vtophys((vaddr_t) va)
108 1.1 matt
109 1.1 matt
110 1.1 matt static __inline paddr_t
111 1.1 matt vtophys(vaddr_t va)
112 1.1 matt {
113 1.1 matt paddr_t pa;
114 1.1 matt
115 1.1 matt if (pmap_extract(pmap_kernel(), va, &pa))
116 1.1 matt return pa;
117 1.4 matt KASSERT(0);
118 1.4 matt return (paddr_t) -1;
119 1.1 matt }
120 1.1 matt
121 1.1 matt #endif /* _KERNEL */
122 1.1 matt #endif /* _LOCORE */
123 1.1 matt
124 1.1 matt #endif /* _POWERPC_OEA_PMAP_H_ */
125