pmap.h revision 1.33.4.3 1 1.33.4.3 nathanw /* $NetBSD: pmap.h,v 1.33.4.3 2002/06/20 03:37:56 nathanw Exp $ */
2 1.33.4.2 scw
3 1.33.4.2 scw /*
4 1.33.4.2 scw * Copyright (c) 1987 Carnegie-Mellon University
5 1.33.4.2 scw * Copyright (c) 1991 Regents of the University of California.
6 1.33.4.2 scw * All rights reserved.
7 1.33.4.2 scw *
8 1.33.4.2 scw * This code is derived from software contributed to Berkeley by
9 1.33.4.2 scw * the Systems Programming Group of the University of Utah Computer
10 1.33.4.2 scw * Science Department.
11 1.33.4.2 scw *
12 1.33.4.2 scw * Redistribution and use in source and binary forms, with or without
13 1.33.4.2 scw * modification, are permitted provided that the following conditions
14 1.33.4.2 scw * are met:
15 1.33.4.2 scw * 1. Redistributions of source code must retain the above copyright
16 1.33.4.2 scw * notice, this list of conditions and the following disclaimer.
17 1.33.4.2 scw * 2. Redistributions in binary form must reproduce the above copyright
18 1.33.4.2 scw * notice, this list of conditions and the following disclaimer in the
19 1.33.4.2 scw * documentation and/or other materials provided with the distribution.
20 1.33.4.2 scw * 3. All advertising materials mentioning features or use of this software
21 1.33.4.2 scw * must display the following acknowledgement:
22 1.33.4.2 scw * This product includes software developed by the University of
23 1.33.4.2 scw * California, Berkeley and its contributors.
24 1.33.4.2 scw * 4. Neither the name of the University nor the names of its contributors
25 1.33.4.2 scw * may be used to endorse or promote products derived from this software
26 1.33.4.2 scw * without specific prior written permission.
27 1.33.4.2 scw *
28 1.33.4.2 scw * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.33.4.2 scw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.33.4.2 scw * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.33.4.2 scw * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.33.4.2 scw * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.33.4.2 scw * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.33.4.2 scw * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.33.4.2 scw * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.33.4.2 scw * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.33.4.2 scw * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.33.4.2 scw * SUCH DAMAGE.
39 1.33.4.2 scw *
40 1.33.4.2 scw * @(#)pmap.h 7.6 (Berkeley) 5/10/91
41 1.33.4.2 scw */
42 1.33.4.2 scw #ifndef _MACHINE_PMAP_H_
43 1.33.4.2 scw #define _MACHINE_PMAP_H_
44 1.33.4.2 scw
45 1.33.4.2 scw /*
46 1.33.4.2 scw * Pmap stuff
47 1.33.4.2 scw */
48 1.33.4.2 scw struct pmap {
49 1.33.4.2 scw pt_entry_t *pm_ptab; /* KVA of page table */
50 1.33.4.2 scw st_entry_t *pm_stab; /* KVA of segment table */
51 1.33.4.2 scw int pm_stfree; /* 040: free lev2 blocks */
52 1.33.4.2 scw u_int *pm_stpa; /* 040: ST phys addr */
53 1.33.4.2 scw short pm_sref; /* segment table ref count */
54 1.33.4.2 scw short pm_count; /* pmap reference count */
55 1.33.4.2 scw long pm_ptpages; /* more stats: PT pages */
56 1.33.4.2 scw struct simplelock pm_lock; /* lock on pmap */
57 1.33.4.2 scw struct pmap_statistics pm_stats; /* pmap statistics */
58 1.33.4.2 scw };
59 1.33.4.2 scw
60 1.33.4.2 scw typedef struct pmap *pmap_t;
61 1.33.4.2 scw
62 1.33.4.2 scw /*
63 1.33.4.2 scw * On the 040 we keep track of which level 2 blocks are already in use
64 1.33.4.2 scw * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
65 1.33.4.2 scw * (block 31). For convenience, the level 1 table is considered to be
66 1.33.4.2 scw * block 0.
67 1.33.4.2 scw *
68 1.33.4.2 scw * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
69 1.33.4.2 scw * for the kernel and users. 16 implies only the initial "segment table"
70 1.33.4.2 scw * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
71 1.33.4.2 scw * physically contiguous pages for the ST in pmap.c!
72 1.33.4.2 scw */
73 1.33.4.2 scw #define MAXKL2SIZE 32
74 1.33.4.2 scw #define MAXUL2SIZE 16
75 1.33.4.2 scw #define l2tobm(n) (1 << (n))
76 1.33.4.2 scw #define bmtol2(n) (ffs(n) - 1)
77 1.33.4.2 scw
78 1.33.4.2 scw /*
79 1.33.4.2 scw * Macros for speed
80 1.33.4.2 scw */
81 1.33.4.2 scw #define PMAP_ACTIVATE(pmap, loadhw) \
82 1.33.4.2 scw { \
83 1.33.4.2 scw if ((loadhw)) \
84 1.33.4.2 scw loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
85 1.33.4.2 scw }
86 1.33.4.2 scw
87 1.33.4.2 scw /*
88 1.33.4.2 scw * For each struct vm_page, there is a list of all currently valid virtual
89 1.33.4.2 scw * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
90 1.33.4.2 scw */
91 1.33.4.2 scw typedef struct pv_entry {
92 1.33.4.2 scw struct pv_entry *pv_next; /* next pv_entry */
93 1.33.4.2 scw struct pmap *pv_pmap; /* pmap where mapping lies */
94 1.33.4.2 scw vaddr_t pv_va; /* virtual address for mapping */
95 1.33.4.2 scw u_int *pv_ptste; /* non-zero if VA maps a PT page */
96 1.33.4.2 scw struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
97 1.33.4.2 scw int pv_flags; /* flags */
98 1.33.4.2 scw } *pv_entry_t;
99 1.33.4.2 scw
100 1.33.4.2 scw #define PV_CI 0x01 /* all entries must be cache inhibited */
101 1.33.4.2 scw #define PV_PTPAGE 0x02 /* entry maps a page table page */
102 1.33.4.2 scw
103 1.33.4.2 scw struct pv_page;
104 1.33.4.2 scw
105 1.33.4.2 scw struct pv_page_info {
106 1.33.4.2 scw TAILQ_ENTRY(pv_page) pgi_list;
107 1.33.4.2 scw struct pv_entry *pgi_freelist;
108 1.33.4.2 scw int pgi_nfree;
109 1.33.4.2 scw };
110 1.33.4.2 scw
111 1.33.4.2 scw /*
112 1.33.4.2 scw * This is basically:
113 1.33.4.2 scw * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
114 1.33.4.2 scw */
115 1.33.4.2 scw #define NPVPPG 340
116 1.33.4.2 scw
117 1.33.4.2 scw struct pv_page {
118 1.33.4.2 scw struct pv_page_info pvp_pgi;
119 1.33.4.2 scw struct pv_entry pvp_pv[NPVPPG];
120 1.33.4.2 scw };
121 1.33.4.2 scw
122 1.33.4.2 scw #ifdef _KERNEL
123 1.33.4.3 nathanw extern u_int *Sysmap;
124 1.33.4.3 nathanw extern caddr_t vmmap; /* map for mem, dumps, etc. */
125 1.33.4.3 nathanw extern struct pmap kernel_pmap_store;
126 1.33.4.2 scw
127 1.33.4.2 scw #define pmap_kernel() (&kernel_pmap_store)
128 1.33.4.2 scw
129 1.33.4.2 scw #define active_pmap(pm) \
130 1.33.4.2 scw ((pm) == pmap_kernel() || \
131 1.33.4.2 scw (pm) == curproc->l_proc->p_vmspace->vm_map.pmap)
132 1.33.4.2 scw #define active_user_pmap(pm) \
133 1.33.4.2 scw (curproc && \
134 1.33.4.2 scw (pm) != pmap_kernel() && \
135 1.33.4.2 scw (pm) == curproc->l_proc->p_vmspace->vm_map.pmap)
136 1.33.4.2 scw
137 1.33.4.2 scw #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
138 1.33.4.2 scw #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
139 1.33.4.2 scw
140 1.33.4.2 scw #define pmap_update(pmap) /* nothing (yet) */
141 1.33.4.2 scw
142 1.33.4.2 scw vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
143 1.33.4.2 scw void pmap_procwr __P((struct proc *, vaddr_t, u_long));
144 1.33.4.2 scw #define PMAP_NEED_PROCWR
145 1.33.4.2 scw
146 1.33.4.2 scw #endif /* _KERNEL */
147 1.33.4.2 scw
148 1.33.4.2 scw #endif /* !_MACHINE_PMAP_H_ */
149