Home | History | Annotate | Line # | Download | only in pmap
vmpagemd.h revision 1.15
      1 /*	$NetBSD: vmpagemd.h,v 1.15 2019/10/20 08:29:38 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      9  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
     10  *
     11  * This material is based upon work supported by the Defense Advanced Research
     12  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     13  * Contract No. N66001-09-C-2073.
     14  * Approved for Public Release, Distribution Unlimited
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #ifndef _UVM_PMAP_VMPAGEMD_H_
     39 #define _UVM_PMAP_VMPAGEMD_H_
     40 
     41 #ifdef _LOCORE
     42 #error use assym.h instead
     43 #endif
     44 
     45 //#ifdef _MODULE
     46 //#error this file should not be included by loadable kernel modules
     47 //#endif
     48 
     49 #ifdef _KERNEL_OPT
     50 #include "opt_modular.h"
     51 #include "opt_multiprocessor.h"
     52 #endif
     53 
     54 #include <sys/atomic.h>
     55 #include <sys/mutex.h>
     56 
     57 #define	__HAVE_VM_PAGE_MD
     58 
     59 typedef struct pv_entry {
     60 	struct pv_entry *pv_next;
     61 	struct pmap *pv_pmap;
     62 	vaddr_t pv_va;
     63 #define	PV_KENTER		__BIT(0)
     64 } *pv_entry_t;
     65 
     66 #define	PV_ISKENTER_P(pv)	(((pv->pv_va) & PV_KENTER) != 0)
     67 
     68 #ifndef _MODULE
     69 
     70 #define	VM_PAGEMD_REFERENCED	__BIT(0)	/* page has been referenced */
     71 #define	VM_PAGEMD_MODIFIED	__BIT(1)	/* page has been modified */
     72 #define	VM_PAGEMD_POOLPAGE	__BIT(2)	/* page is used as a poolpage */
     73 #define	VM_PAGEMD_EXECPAGE	__BIT(3)	/* page is exec mapped */
     74 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
     75 #define	VM_PAGEMD_UNCACHED	__BIT(4)	/* page is mapped uncached */
     76 #endif
     77 
     78 #define	VM_PAGEMD_REFERENCED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
     79 #define	VM_PAGEMD_MODIFIED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
     80 #define	VM_PAGEMD_POOLPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
     81 #define	VM_PAGEMD_EXECPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0)
     82 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
     83 #define	VM_PAGEMD_CACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0)
     84 #define	VM_PAGEMD_UNCACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0)
     85 #endif
     86 
     87 #endif /* !_MODULE */
     88 
     89 struct vm_page_md {
     90 	volatile unsigned long mdpg_attrs;	/* page attributes */
     91 	struct pv_entry mdpg_first;		/* pv_entry first */
     92 #if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
     93 	kmutex_t *mdpg_lock;			/* pv list lock */
     94 #endif
     95 };
     96 
     97 #ifndef _MODULE
     98 #if defined(MULTIPROCESSOR) || defined(MODULAR)
     99 #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) 	(mdpg)->mdpg_lock = NULL
    100 #else
    101 #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg)	__nothing
    102 #endif /* MULTIPROCESSOR || MODULAR */
    103 
    104 #define	VM_PAGEMD_PVLIST_LOCK(mdpg)	pmap_pvlist_lock(mdpg, 1)
    105 #define	VM_PAGEMD_PVLIST_READLOCK(mdpg)	pmap_pvlist_lock(mdpg, 0)
    106 #define	VM_PAGEMD_PVLIST_UNLOCK(mdpg)	pmap_pvlist_unlock(mdpg)
    107 #define	VM_PAGEMD_PVLIST_LOCKED_P(mdpg)	pmap_pvlist_locked_p(mdpg)
    108 #define	VM_PAGEMD_PVLIST_GEN(mdpg)	((mdpg)->mdpg_attrs >> 16)
    109 
    110 #define	VM_PAGEMD_PVLIST_EMPTY_P(mdpg)	((mdpg)->mdpg_first.pv_pmap == NULL)
    111 
    112 #ifdef _KERNEL
    113 #if defined(MULTIPROCESSOR) || defined(MODULAR)
    114 kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *);
    115 #else
    116 extern kmutex_t pmap_pvlist_mutex;
    117 static __inline kmutex_t *
    118 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
    119 {
    120 	return &pmap_pvlist_mutex;
    121 }
    122 #endif
    123 
    124 static __inline uintptr_t
    125 pmap_pvlist_lock(struct vm_page_md *mdpg, uintptr_t increment)
    126 {
    127 	mutex_spin_enter(pmap_pvlist_lock_addr(mdpg));
    128 	const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    129 	mdpg->mdpg_attrs += increment << 16;
    130 	return gen;
    131 }
    132 
    133 static __inline uintptr_t
    134 pmap_pvlist_unlock(struct vm_page_md *mdpg)
    135 {
    136 	const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    137 	mutex_spin_exit(pmap_pvlist_lock_addr(mdpg));
    138 	return gen;
    139 }
    140 
    141 static __inline bool
    142 pmap_pvlist_locked_p(struct vm_page_md *mdpg)
    143 {
    144 	return mutex_owned(pmap_pvlist_lock_addr(mdpg));
    145 }
    146 #endif /* _KERNEL */
    147 
    148 #define VM_MDPAGE_INIT(pg)						\
    149 do {									\
    150 	(pg)->mdpage.mdpg_first.pv_next = NULL;				\
    151 	(pg)->mdpage.mdpg_first.pv_pmap = NULL;				\
    152 	(pg)->mdpage.mdpg_first.pv_va = (pg)->phys_addr;		\
    153 	(pg)->mdpage.mdpg_attrs = 0;					\
    154 	VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage);			\
    155 } while (/* CONSTCOND */ 0)
    156 
    157 #endif /* _MODULE */
    158 
    159 #endif /* _UVM_PMAP_VMPAGEMD_H_ */
    160