Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.16.10.3
      1 /*	$NetBSD: pmap.h,v 1.16.10.3 2010/03/11 15:02:27 yamt Exp $	*/
      2 
      3 /*	$OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $	*/
      4 
      5 /*
      6  * Copyright (c) 2002-2004 Michael Shalayeff
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
     22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
     28  * THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 /*
     32  *	Pmap header for hppa.
     33  */
     34 
     35 #ifndef	_HPPA_PMAP_H_
     36 #define	_HPPA_PMAP_H_
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_cputype.h"
     40 #endif
     41 
     42 #include <sys/mutex.h>
     43 #include <machine/pte.h>
     44 #include <machine/cpufunc.h>
     45 
     46 #include <uvm/uvm_pglist.h>
     47 #include <uvm/uvm_object.h>
     48 
     49 #ifdef	_KERNEL
     50 
     51 struct pmap {
     52 	struct uvm_object pm_obj;	/* object (lck by object lock) */
     53 #define	pm_lock	pm_obj.vmobjlock
     54 	struct vm_page	*pm_ptphint;
     55 	struct vm_page	*pm_pdir_pg;	/* vm_page for pdir */
     56 	volatile uint32_t *pm_pdir;	/* page dir (read-only after create) */
     57 	pa_space_t	pm_space;	/* space id (read-only after create) */
     58 	u_int		pm_pid;		/* prot id (read-only after create) */
     59 
     60 	struct pmap_statistics	pm_stats;
     61 };
     62 
     63 /*
     64  * Flags that indicate attributes of pages or mappings of pages.
     65  *
     66  * We need two flags for cacheability because pages/mappings can be marked
     67  * uncacheable for two reasons,
     68  *
     69  *	1) A page's contents may change under our feet and can never be
     70  *	   cacheable, e.g. I/O space, DMA buffers.
     71  *	2) A page has non-equivalent aliases and must be (temporarily)
     72  *	   marked uncachable.
     73  *
     74  * A page that is marked PVF_NC can *never* be marked cacheable and will have
     75  * all mappings marked PVF_UNCACHEABLE. A page marked PVF_UNCACHEABLE only
     76  * is done so due to non-equivalent aliases this maybe removed is the non-
     77  * equivalent aliases are removed.
     78  *
     79  */
     80 
     81 #define	PVF_NC		0x2000			/* pg is never cacheable */
     82 
     83 #define	PVF_MOD		PTE_PROT(TLB_DIRTY)	/* pg/mp is modified */
     84 #define	PVF_REF		PTE_PROT(TLB_REFTRAP)	/* pg/mp (inv) is referenced */
     85 #define	PVF_WRITE	PTE_PROT(TLB_WRITE)	/* pg/mp is writable */
     86 #define	PVF_UNCACHEABLE	PTE_PROT(TLB_UNCACHEABLE)
     87 						/* pg/mp is uncacheable */
     88 
     89 #define	pmap_is_aliased(pg)	\
     90 	(((pg)->mdpage.pvh_attrs & PVF_NC) == 0 && \
     91 	 ((pg)->mdpage.pvh_attrs & PVF_UNCACHEABLE) != 0)
     92 
     93 #define	HPPA_MAX_PID	0xfffa
     94 #define	HPPA_SID_MAX	0x7ffd
     95 
     96 /*
     97  * DON'T CHANGE THIS - this is assumed in lots of places.
     98  */
     99 #define	HPPA_SID_KERNEL	0
    100 #define	HPPA_PID_KERNEL	2
    101 
    102 struct pv_entry {			/* locked by its list's pvh_lock */
    103 	struct pv_entry	*pv_next;
    104 	struct pmap	*pv_pmap;	/* the pmap */
    105 	vaddr_t		pv_va;		/* the virtual address + flags */
    106 #define	PV_VAMASK	(~(PAGE_SIZE - 1))
    107 #define	PV_KENTER	0x001
    108 
    109 	struct vm_page	*pv_ptp;	/* the vm_page of the PTP */
    110 };
    111 
    112 extern int pmap_hptsize;
    113 extern struct pdc_hwtlb pdc_hwtlb;
    114 
    115 /*
    116  * pool quickmaps
    117  */
    118 static inline vaddr_t hppa_map_poolpage(paddr_t pa)
    119 {
    120 	return (vaddr_t)pa;
    121 }
    122 
    123 static inline paddr_t hppa_unmap_poolpage(vaddr_t va)
    124 {
    125 	pdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
    126 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
    127     defined(HP8500_CPU) || defined(HP8600_CPU)
    128 	pdtlb(HPPA_SID_KERNEL, va);
    129 	ficache(HPPA_SID_KERNEL, va, PAGE_SIZE);
    130 	pitlb(HPPA_SID_KERNEL, va);
    131 #endif
    132 
    133 	return (paddr_t)va;
    134 }
    135 
    136 #define	PMAP_MAP_POOLPAGE(pa)	hppa_map_poolpage(pa)
    137 #define	PMAP_UNMAP_POOLPAGE(va)	hppa_unmap_poolpage(va)
    138 
    139 /*
    140  * according to the parisc manual aliased va's should be
    141  * different by high 12 bits only.
    142  */
    143 #define	PMAP_PREFER(o,h,s,td)	do {					\
    144 	vaddr_t pmap_prefer_hint;					\
    145 	pmap_prefer_hint = (*(h) & HPPA_PGAMASK) | ((o) & HPPA_PGAOFF);	\
    146 	if (pmap_prefer_hint < *(h))					\
    147 		pmap_prefer_hint += HPPA_PGALIAS;			\
    148 	*(h) = pmap_prefer_hint;					\
    149 } while(0)
    150 
    151 #define	pmap_sid2pid(s)			(((s) + 1) << 1)
    152 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    153 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    154 #define	pmap_update(p)
    155 
    156 #define	pmap_copy(dpmap,spmap,da,len,sa)
    157 
    158 #define	pmap_clear_modify(pg)	pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
    159 #define	pmap_clear_reference(pg) \
    160 				pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
    161 #define	pmap_is_modified(pg)	pmap_testbit(pg, PTE_PROT(TLB_DIRTY))
    162 #define	pmap_is_referenced(pg)	pmap_testbit(pg, PTE_PROT(TLB_REFTRAP))
    163 #define	pmap_phys_address(ppn)	((ppn) << PAGE_SHIFT)
    164 
    165 void	pmap_activate(struct lwp *);
    166 
    167 void pmap_bootstrap(vaddr_t);
    168 bool pmap_changebit(struct vm_page *, u_int, u_int);
    169 bool pmap_testbit(struct vm_page *, u_int);
    170 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
    171 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
    172 void pmap_page_remove(struct vm_page *pg);
    173 
    174 static inline void
    175 pmap_deactivate(struct lwp *l)
    176 {
    177 	/* Nothing. */
    178 }
    179 
    180 static inline void
    181 pmap_remove_all(struct pmap *pmap)
    182 {
    183 	/* Nothing. */
    184 }
    185 
    186 static inline int
    187 pmap_prot(struct pmap *pmap, int prot)
    188 {
    189 	extern u_int hppa_prot[];
    190 	return (hppa_prot[prot] | (pmap == pmap_kernel()? 0 : TLB_USER));
    191 }
    192 
    193 static inline void
    194 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    195 {
    196 	if ((prot & UVM_PROT_WRITE) == 0) {
    197 		if (prot & (UVM_PROT_RX))
    198 			pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE));
    199 		else
    200 			pmap_page_remove(pg);
    201 	}
    202 }
    203 
    204 static inline void
    205 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    206 {
    207 	if ((prot & UVM_PROT_WRITE) == 0) {
    208 		if (prot & (UVM_PROT_RX))
    209 			pmap_write_protect(pmap, sva, eva, prot);
    210 		else
    211 			pmap_remove(pmap, sva, eva);
    212 	}
    213 }
    214 
    215 #define	pmap_sid(pmap, va) \
    216 	((((va) & 0xc0000000) != 0xc0000000) ? \
    217 	 (pmap)->pm_space : HPPA_SID_KERNEL)
    218 
    219 /*
    220  * MD flags that we use for pmap_kenter_pa:
    221  */
    222 #define	PMAP_NOCACHE	0x01000000	/* set the non-cacheable bit */
    223 
    224 #endif /* _KERNEL */
    225 
    226 #endif /* _HPPA_PMAP_H_ */
    227