pmap3.h revision 1.34.4.2 1 1.34.4.2 nathanw /* $NetBSD: pmap3.h,v 1.34.4.2 2002/10/18 02:40:22 nathanw Exp $ */
2 1.34.4.2 nathanw
3 1.34.4.2 nathanw /*-
4 1.34.4.2 nathanw * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 1.34.4.2 nathanw * All rights reserved.
6 1.34.4.2 nathanw *
7 1.34.4.2 nathanw * This code is derived from software contributed to The NetBSD Foundation
8 1.34.4.2 nathanw * by Adam Glass and Gordon W. Ross.
9 1.34.4.2 nathanw *
10 1.34.4.2 nathanw * Redistribution and use in source and binary forms, with or without
11 1.34.4.2 nathanw * modification, are permitted provided that the following conditions
12 1.34.4.2 nathanw * are met:
13 1.34.4.2 nathanw * 1. Redistributions of source code must retain the above copyright
14 1.34.4.2 nathanw * notice, this list of conditions and the following disclaimer.
15 1.34.4.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
16 1.34.4.2 nathanw * notice, this list of conditions and the following disclaimer in the
17 1.34.4.2 nathanw * documentation and/or other materials provided with the distribution.
18 1.34.4.2 nathanw * 3. All advertising materials mentioning features or use of this software
19 1.34.4.2 nathanw * must display the following acknowledgement:
20 1.34.4.2 nathanw * This product includes software developed by the NetBSD
21 1.34.4.2 nathanw * Foundation, Inc. and its contributors.
22 1.34.4.2 nathanw * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.34.4.2 nathanw * contributors may be used to endorse or promote products derived
24 1.34.4.2 nathanw * from this software without specific prior written permission.
25 1.34.4.2 nathanw *
26 1.34.4.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.34.4.2 nathanw * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.34.4.2 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.34.4.2 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.34.4.2 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.34.4.2 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.34.4.2 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.34.4.2 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.34.4.2 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.34.4.2 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.34.4.2 nathanw * POSSIBILITY OF SUCH DAMAGE.
37 1.34.4.2 nathanw */
38 1.34.4.2 nathanw
39 1.34.4.2 nathanw /*
40 1.34.4.2 nathanw * Physical map structures exported to the VM code.
41 1.34.4.2 nathanw * XXX - Does user-level code really see this struct?
42 1.34.4.2 nathanw */
43 1.34.4.2 nathanw
44 1.34.4.2 nathanw struct pmap {
45 1.34.4.2 nathanw unsigned char *pm_segmap; /* soft copy of segmap */
46 1.34.4.2 nathanw int pm_ctxnum; /* MMU context number */
47 1.34.4.2 nathanw struct simplelock pm_lock; /* lock on pmap */
48 1.34.4.2 nathanw int pm_refcount; /* reference count */
49 1.34.4.2 nathanw int pm_version;
50 1.34.4.2 nathanw };
51 1.34.4.2 nathanw
52 1.34.4.2 nathanw #ifdef _KERNEL
53 1.34.4.2 nathanw extern struct pmap kernel_pmap_store;
54 1.34.4.2 nathanw #define pmap_kernel() (&kernel_pmap_store)
55 1.34.4.2 nathanw
56 1.34.4.2 nathanw /*
57 1.34.4.2 nathanw * We give the pmap code a chance to resolve faults by
58 1.34.4.2 nathanw * reloading translations that it was forced to unload.
59 1.34.4.2 nathanw * This function does that, and calls vm_fault if it
60 1.34.4.2 nathanw * could not resolve the fault by reloading the MMU.
61 1.34.4.2 nathanw */
62 1.34.4.2 nathanw int _pmap_fault __P((struct vm_map *, vaddr_t, vm_prot_t));
63 1.34.4.2 nathanw
64 1.34.4.2 nathanw /* This lets us have some say in choosing VA locations. */
65 1.34.4.2 nathanw extern void pmap_prefer(vaddr_t, vaddr_t *);
66 1.34.4.2 nathanw #define PMAP_PREFER(fo, ap) pmap_prefer((fo), (ap))
67 1.34.4.2 nathanw
68 1.34.4.2 nathanw /* This needs to be a macro for kern_sysctl.c */
69 1.34.4.2 nathanw extern segsz_t pmap_resident_pages(pmap_t);
70 1.34.4.2 nathanw #define pmap_resident_count(pmap) (pmap_resident_pages(pmap))
71 1.34.4.2 nathanw
72 1.34.4.2 nathanw /* This needs to be a macro for vm_mmap.c */
73 1.34.4.2 nathanw extern segsz_t pmap_wired_pages(pmap_t);
74 1.34.4.2 nathanw #define pmap_wired_count(pmap) (pmap_wired_pages(pmap))
75 1.34.4.2 nathanw
76 1.34.4.2 nathanw /* We use the PA plus some low bits for device mmap. */
77 1.34.4.2 nathanw #define pmap_phys_address(addr) (addr)
78 1.34.4.2 nathanw
79 1.34.4.2 nathanw #define pmap_update(pmap) /* nothing (yet) */
80 1.34.4.2 nathanw
81 1.34.4.2 nathanw /* Map a given physical region to a virtual region */
82 1.34.4.2 nathanw extern vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
83 1.34.4.2 nathanw
84 1.34.4.2 nathanw static __inline void
85 1.34.4.2 nathanw pmap_remove_all(struct pmap *pmap)
86 1.34.4.2 nathanw {
87 1.34.4.2 nathanw /* Nothing. */
88 1.34.4.2 nathanw }
89 1.34.4.2 nathanw
90 1.34.4.2 nathanw /*
91 1.34.4.2 nathanw * Since PTEs also contain type bits, we have to have some way
92 1.34.4.2 nathanw * to tell pmap_enter `this is an IO page' or `this is not to
93 1.34.4.2 nathanw * be cached'. Since physical addresses are always aligned, we
94 1.34.4.2 nathanw * can do this with the low order bits.
95 1.34.4.2 nathanw *
96 1.34.4.2 nathanw * The values below must agree with pte.h such that:
97 1.34.4.2 nathanw * (PMAP_OBIO << PG_MOD_SHIFT) == PGT_OBIO
98 1.34.4.2 nathanw */
99 1.34.4.2 nathanw #define PMAP_OBIO 0x04 /* tells pmap_enter to use PG_OBIO */
100 1.34.4.2 nathanw #define PMAP_VME16 0x08 /* etc */
101 1.34.4.2 nathanw #define PMAP_VME32 0x0C /* etc */
102 1.34.4.2 nathanw #define PMAP_NC 0x10 /* tells pmap_enter to set PG_NC */
103 1.34.4.2 nathanw #define PMAP_SPEC 0x1C /* mask to get all above. */
104 1.34.4.2 nathanw
105 1.34.4.2 nathanw #endif /* _KERNEL */
106