pmap.h revision 1.9
1/* $NetBSD: pmap.h,v 1.9 2001/05/26 16:32:43 chs Exp $ */
2
3/*
4 * This file was taken from from mvme68k/include/pmap.h and
5 * should probably be re-synced when needed.
6 * Darrin B Jewell <jewell@mit.edu>  Fri Aug 28 03:22:07 1998
7 * original cvs id: NetBSD: pmap.h,v 1.12 1998/08/22 10:55:34 scw Exp
8 */
9
10/*
11 * Copyright (c) 1987 Carnegie-Mellon University
12 * Copyright (c) 1991, 1993
13 *	The Regents of the University of California.  All rights reserved.
14 *
15 * This code is derived from software contributed to Berkeley by
16 * the Systems Programming Group of the University of Utah Computer
17 * Science Department.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 *    must display the following acknowledgement:
29 *	This product includes software developed by the University of
30 *	California, Berkeley and its contributors.
31 * 4. Neither the name of the University nor the names of its contributors
32 *    may be used to endorse or promote products derived from this software
33 *    without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 *
47 *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
48 */
49
50#ifndef	_MACHINE_PMAP_H_
51#define	_MACHINE_PMAP_H_
52
53#include <machine/pte.h>
54
55#if defined(M68040) && 0
56#define HP_SEG_SIZE	(mmutype == MMU_68040 ? 0x40000 : NBSEG)
57#else
58#define HP_SEG_SIZE	NBSEG
59#endif
60
61/*
62 * Pmap stuff
63 */
64struct pmap {
65	pt_entry_t		*pm_ptab;	/* KVA of page table */
66	st_entry_t		*pm_stab;	/* KVA of segment table */
67	int			pm_stfree;	/* 040: free lev2 blocks */
68	st_entry_t		*pm_stpa;	/* 040: ST phys addr */
69	short			pm_sref;	/* segment table ref count */
70	short			pm_count;	/* pmap reference count */
71	struct simplelock	pm_lock;	/* lock on pmap */
72	struct pmap_statistics	pm_stats;	/* pmap statistics */
73	long			pm_ptpages;	/* more stats: PT pages */
74};
75
76typedef struct pmap	*pmap_t;
77
78/*
79 * On the 040 we keep track of which level 2 blocks are already in use
80 * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
81 * (block 31).  For convenience, the level 1 table is considered to be
82 * block 0.
83 *
84 * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
85 * for the kernel and users.  8 implies only the initial "segment table"
86 * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
87 * physically contiguous pages for the ST in pmap.c!
88 */
89#define	MAXKL2SIZE	32
90#define MAXUL2SIZE	8
91#define l2tobm(n)	(1 << (n))
92#define	bmtol2(n)	(ffs(n) - 1)
93
94/*
95 * Macros for speed
96 */
97#define	PMAP_ACTIVATE(pmap, loadhw)					\
98{									\
99	if ((loadhw))							\
100		loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa));	\
101}
102
103/*
104 * For each vm_page_t, there is a list of all currently valid virtual
105 * mappings of that page.  An entry is a pv_entry, the list is pv_table.
106 */
107struct pv_entry {
108	struct pv_entry	*pv_next;	/* next pv_entry */
109	struct pmap	*pv_pmap;	/* pmap where mapping lies */
110	vaddr_t		pv_va;		/* virtual address for mapping */
111	st_entry_t	*pv_ptste;	/* non-zero if VA maps a PT page */
112	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
113	int		pv_flags;	/* flags */
114};
115
116#define	PV_CI		0x01	/* header: all entries are cache inhibited */
117#define PV_PTPAGE	0x02	/* header: entry maps a page table page */
118
119struct pv_page;
120
121struct pv_page_info {
122	TAILQ_ENTRY(pv_page) pgi_list;
123	struct pv_entry *pgi_freelist;
124	int pgi_nfree;
125};
126
127/*
128 * This is basically:
129 * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
130 */
131#define	NPVPPG	170
132
133struct pv_page {
134	struct pv_page_info pvp_pgi;
135	struct pv_entry pvp_pv[NPVPPG];
136};
137
138#ifdef	_KERNEL
139
140extern struct pmap	kernel_pmap_store;
141
142#define pmap_kernel()	(&kernel_pmap_store)
143#define	active_pmap(pm) \
144	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
145#define	active_user_pmap(pm) \
146	(curproc && \
147	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
148
149extern struct pv_entry	*pv_table;	/* array of entries, one per page */
150
151#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
152#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
153
154#define	pmap_update()			/* nothing (yet) */
155
156extern pt_entry_t	*Sysmap;
157extern char		*vmmap;		/* map for mem, dumps, etc. */
158
159vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
160void	pmap_procwr __P((struct proc *, vaddr_t, size_t));
161#define	PMAP_NEED_PROCWR
162
163#endif /* _KERNEL */
164
165#endif /* !_MACHINE_PMAP_H_ */
166