Home | History | Annotate | Line # | Download | only in include
pmap_motorola.h revision 1.3
      1 /*	$NetBSD: pmap_motorola.h,v 1.3 2003/04/02 07:36:00 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1987 Carnegie-Mellon University
      5  * Copyright (c) 1991, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     41  */
     42 
     43 #ifndef	_M68K_PMAP_MOTOROLA_H_
     44 #define	_M68K_PMAP_MOTOROLA_H_
     45 
     46 #include <machine/cpu.h>
     47 #include <machine/pte.h>
     48 
     49 #define M68K_SEG_SIZE	NBSEG
     50 
     51 /*
     52  * Pmap stuff
     53  */
     54 struct pmap {
     55 	pt_entry_t		*pm_ptab;	/* KVA of page table */
     56 	st_entry_t		*pm_stab;	/* KVA of segment table */
     57 	int			pm_stfree;	/* 040: free lev2 blocks */
     58 	st_entry_t		*pm_stpa;	/* 040: ST phys addr */
     59 	uint16_t		pm_sref;	/* segment table ref count */
     60 	uint16_t		pm_count;	/* pmap reference count */
     61 	struct simplelock	pm_lock;	/* lock on pmap */
     62 	struct pmap_statistics	pm_stats;	/* pmap statistics */
     63 	int			pm_ptpages;	/* more stats: PT pages */
     64 };
     65 
     66 typedef struct pmap	*pmap_t;
     67 
     68 /*
     69  * On the 040, we keep track of which level 2 blocks are already in use
     70  * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
     71  * (block 31).  For convenience, the level 1 table is considered to be
     72  * block 0.
     73  *
     74  * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
     75  * for the kernel and users.  8 implies only the initial "segment table"
     76  * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
     77  * physically contiguous pages for the ST in pmap.c!
     78  */
     79 #define MAXKL2SIZE	32
     80 #define MAXUL2SIZE	8
     81 #define l2tobm(n)	(1 << (n))
     82 #define bmtol2(n)	(ffs(n) - 1)
     83 
     84 /*
     85  * Macros for speed
     86  */
     87 #define	PMAP_ACTIVATE(pmap, loadhw)					\
     88 {									\
     89 	if ((loadhw))							\
     90 		loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa));		\
     91 }
     92 
     93 /*
     94  * For each struct vm_page, there is a list of all currently valid virtual
     95  * mappings of that page.  An entry is a pv_entry, the list is pv_table.
     96  */
     97 struct pv_entry {
     98 	struct pv_entry	*pv_next;	/* next pv_entry */
     99 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
    100 	vaddr_t		pv_va;		/* virtual address for mapping */
    101 	st_entry_t	*pv_ptste;	/* non-zero if VA maps a PT page */
    102 	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
    103 	int		pv_flags;	/* flags */
    104 };
    105 
    106 #define PV_CI		0x01	/* header: all entries are cache inhibited */
    107 #define PV_PTPAGE	0x02	/* header: entry maps a page table page */
    108 
    109 struct pv_page;
    110 
    111 struct pv_page_info {
    112 	TAILQ_ENTRY(pv_page) pgi_list;
    113 	struct pv_entry *pgi_freelist;
    114 	int pgi_nfree;
    115 };
    116 
    117 /*
    118  * This is basically:
    119  * ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
    120  */
    121 #define	NPVPPG	170
    122 
    123 struct pv_page {
    124 	struct pv_page_info pvp_pgi;
    125 	struct pv_entry pvp_pv[NPVPPG];
    126 };
    127 
    128 extern struct pmap	kernel_pmap_store;
    129 
    130 #define pmap_kernel()	(&kernel_pmap_store)
    131 #define	active_pmap(pm) \
    132 	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
    133 #define	active_user_pmap(pm) \
    134 	(curproc && \
    135 	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
    136 
    137 extern struct pv_entry	*pv_table;	/* array of entries, one per page */
    138 
    139 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    140 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    141 
    142 #define	pmap_update(pmap)		/* nothing (yet) */
    143 
    144 static __inline void
    145 pmap_remove_all(struct pmap *pmap)
    146 {
    147 	/* Nothing. */
    148 }
    149 
    150 extern pt_entry_t	*Sysmap;
    151 extern char		*vmmap;		/* map for mem, dumps, etc. */
    152 
    153 void	pmap_init_md(void);
    154 vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
    155 void	pmap_procwr __P((struct proc *, vaddr_t, size_t));
    156 #define	PMAP_NEED_PROCWR
    157 
    158 #ifdef CACHE_HAVE_VAC
    159 void	pmap_prefer __P((vaddr_t, vaddr_t *));
    160 #define	PMAP_PREFER(foff, vap)	pmap_prefer((foff), (vap))
    161 #endif
    162 
    163 #ifdef mvme68k
    164 void _pmap_set_page_cacheable __P((struct pmap *, vaddr_t));
    165 void _pmap_set_page_cacheinhibit __P((struct pmap *, vaddr_t));
    166 int _pmap_page_is_cacheable __P((struct pmap *, vaddr_t));
    167 #endif
    168 
    169 #endif /* !_M68K_PMAP_MOTOROLA_H_ */
    170