pmap.h revision 1.27 1 /* $NetBSD: pmap.h,v 1.27 1998/05/03 13:02:22 ragge Exp $ */
2
3 /*
4 * Copyright (c) 1987 Carnegie-Mellon University
5 * Copyright (c) 1991 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Changed for the VAX port. /IC
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)pmap.h 7.6 (Berkeley) 5/10/91
43 */
44
45
46 #ifndef PMAP_H
47 #define PMAP_H
48
49 #include <machine/mtpr.h>
50
51 struct pte;
52
53 /*
54 * Pmap structure
55 * pm_stack holds lowest allocated memory for the process stack.
56 */
57
58 typedef struct pmap {
59 vm_offset_t pm_stack; /* Base of alloced p1 pte space */
60 int ref_count; /* reference count */
61 struct pte *pm_p0br; /* page 0 base register */
62 long pm_p0lr; /* page 0 length register */
63 struct pte *pm_p1br; /* page 1 base register */
64 long pm_p1lr; /* page 1 length register */
65 } *pmap_t;
66
67 /*
68 * For each vm_page_t, there is a list of all currently valid virtual
69 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
70 */
71
72 typedef struct pv_entry {
73 struct pv_entry *pv_next; /* next pv_entry */
74 struct pte *pv_pte; /* pte for this physical page */
75 } *pv_entry_t;
76
77 /* ROUND_PAGE used before vm system is initialized */
78 #define ROUND_PAGE(x) (((uint)(x) + CLOFSET)& ~CLOFSET)
79 #define TRUNC_PAGE(x) ((uint)(x) & ~CLOFSET)
80
81 /* Mapping macros used when allocating SPT */
82 #define MAPVIRT(ptr, count) \
83 (vm_offset_t)ptr = virtual_avail; \
84 virtual_avail += (count) * NBPG;
85
86 #define MAPPHYS(ptr, count, perm) \
87 (vm_offset_t)ptr = avail_start + KERNBASE; \
88 avail_start += (count) * NBPG;
89
90 #ifdef _KERNEL
91
92 extern struct pmap kernel_pmap_store;
93
94 #define pmap_kernel() (&kernel_pmap_store)
95
96 #endif /* _KERNEL */
97
98 /* Routines that are best to define as macros */
99 #define pmap_phys_address(phys) ((u_int)(phys)<<PAGE_SHIFT)
100 #define pmap_pageable(a,b,c,d) /* Dont do anything */
101 #define pmap_change_wiring(pmap, v, w) /* no need */
102 #define pmap_copy(a,b,c,d,e) /* Dont do anything */
103 #define pmap_update() mtpr(0,PR_TBIA) /* Update buffes */
104 #define pmap_collect(pmap) /* No need so far */
105 #define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
106 #ifdef UVM
107 #define pmap_reference(pmap) (pmap)->ref_count++
108 #else
109 #define pmap_reference(pmap) ((pmap) ? (pmap)->ref_count++ : 0)
110 #endif
111
112 /* These can be done as efficient inline macros */
113 #define pmap_copy_page(src, dst) \
114 __asm__("addl3 $0x80000000,%0,r0;addl3 $0x80000000,%1,r1; \
115 movc3 $1024,(r0),(r1)" \
116 :: "r"(src),"r"(dst):"r0","r1","r2","r3","r4","r5");
117
118 #define pmap_zero_page(phys) \
119 __asm__("addl3 $0x80000000,%0,r0;movc5 $0,(r0),$0,$1024,(r0)" \
120 :: "r"(phys): "r0","r1","r2","r3","r4","r5");
121
122 /* Prototypes */
123 void pmap_bootstrap __P((void));
124 vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
125 void pmap_pinit __P((pmap_t));
126
127 #endif PMAP_H
128