Home | History | Annotate | Line # | Download | only in pmap
      1  1.17     skrll /*	$NetBSD: vmpagemd.h,v 1.17 2020/12/20 16:38:26 skrll Exp $	*/
      2   1.1  christos 
      3   1.1  christos /*-
      4   1.1  christos  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5   1.1  christos  * All rights reserved.
      6   1.1  christos  *
      7   1.1  christos  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  christos  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      9   1.1  christos  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
     10   1.1  christos  *
     11   1.1  christos  * This material is based upon work supported by the Defense Advanced Research
     12   1.1  christos  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     13   1.1  christos  * Contract No. N66001-09-C-2073.
     14   1.1  christos  * Approved for Public Release, Distribution Unlimited
     15   1.1  christos  *
     16   1.1  christos  * Redistribution and use in source and binary forms, with or without
     17   1.1  christos  * modification, are permitted provided that the following conditions
     18   1.1  christos  * are met:
     19   1.1  christos  * 1. Redistributions of source code must retain the above copyright
     20   1.1  christos  *    notice, this list of conditions and the following disclaimer.
     21   1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     22   1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     23   1.1  christos  *    documentation and/or other materials provided with the distribution.
     24   1.1  christos  *
     25   1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26   1.1  christos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27   1.1  christos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28   1.1  christos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29   1.1  christos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30   1.1  christos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31   1.1  christos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32   1.1  christos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33   1.1  christos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34   1.1  christos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35   1.1  christos  * POSSIBILITY OF SUCH DAMAGE.
     36   1.1  christos  */
     37   1.1  christos 
     38   1.5     skrll #ifndef _UVM_PMAP_VMPAGEMD_H_
     39   1.5     skrll #define _UVM_PMAP_VMPAGEMD_H_
     40   1.1  christos 
     41   1.1  christos #ifdef _LOCORE
     42   1.1  christos #error use assym.h instead
     43   1.1  christos #endif
     44   1.1  christos 
     45   1.3      matt //#ifdef _MODULE
     46   1.3      matt //#error this file should not be included by loadable kernel modules
     47   1.3      matt //#endif
     48   1.1  christos 
     49   1.2      matt #ifdef _KERNEL_OPT
     50   1.1  christos #include "opt_modular.h"
     51   1.1  christos #include "opt_multiprocessor.h"
     52   1.2      matt #endif
     53   1.1  christos 
     54   1.3      matt #include <sys/atomic.h>
     55   1.1  christos #include <sys/mutex.h>
     56   1.1  christos 
     57   1.1  christos #define	__HAVE_VM_PAGE_MD
     58   1.1  christos 
     59   1.1  christos typedef struct pv_entry {
     60   1.1  christos 	struct pv_entry *pv_next;
     61   1.1  christos 	struct pmap *pv_pmap;
     62   1.1  christos 	vaddr_t pv_va;
     63   1.7     skrll #define	PV_KENTER		__BIT(0)
     64   1.1  christos } *pv_entry_t;
     65   1.1  christos 
     66  1.12     skrll #define	PV_ISKENTER_P(pv)	(((pv->pv_va) & PV_KENTER) != 0)
     67  1.12     skrll 
     68   1.3      matt #ifndef _MODULE
     69   1.3      matt 
     70  1.17     skrll #define	VM_PAGEMD_VMPAGE	__BIT(0)	/* page is vm managed */
     71  1.17     skrll #define	VM_PAGEMD_REFERENCED	__BIT(1)	/* page has been referenced */
     72  1.17     skrll #define	VM_PAGEMD_MODIFIED	__BIT(2)	/* page has been modified */
     73  1.17     skrll #define	VM_PAGEMD_POOLPAGE	__BIT(3)	/* page is used as a poolpage */
     74  1.17     skrll #define	VM_PAGEMD_EXECPAGE	__BIT(4)	/* page is exec mapped */
     75   1.3      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
     76  1.17     skrll #define	VM_PAGEMD_UNCACHED	__BIT(5)	/* page is mapped uncached */
     77   1.1  christos #endif
     78   1.1  christos 
     79  1.17     skrll #define	VM_PAGEMD_VMPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_VMPAGE) != 0)
     80  1.13     skrll #define	VM_PAGEMD_REFERENCED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
     81  1.13     skrll #define	VM_PAGEMD_MODIFIED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
     82  1.13     skrll #define	VM_PAGEMD_POOLPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
     83  1.13     skrll #define	VM_PAGEMD_EXECPAGE_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0)
     84   1.3      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
     85   1.1  christos #define	VM_PAGEMD_CACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0)
     86   1.1  christos #define	VM_PAGEMD_UNCACHED_P(mdpg)	(((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0)
     87   1.1  christos #endif
     88   1.1  christos 
     89   1.3      matt #endif /* !_MODULE */
     90   1.3      matt 
     91   1.1  christos struct vm_page_md {
     92   1.3      matt 	volatile unsigned long mdpg_attrs;	/* page attributes */
     93   1.6     skrll 	struct pv_entry mdpg_first;		/* pv_entry first */
     94   1.3      matt #if defined(MULTIPROCESSOR) || defined(MODULAR) || defined(_MODULE)
     95   1.6     skrll 	kmutex_t *mdpg_lock;			/* pv list lock */
     96   1.3      matt #endif
     97   1.3      matt };
     98   1.3      matt 
     99   1.3      matt #ifndef _MODULE
    100   1.1  christos #if defined(MULTIPROCESSOR) || defined(MODULAR)
    101  1.10     skrll #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) 	(mdpg)->mdpg_lock = NULL
    102   1.3      matt #else
    103  1.11  christos #define	VM_PAGEMD_PVLIST_LOCK_INIT(mdpg)	__nothing
    104   1.3      matt #endif /* MULTIPROCESSOR || MODULAR */
    105   1.3      matt 
    106  1.14     skrll #define	VM_PAGEMD_PVLIST_LOCK(mdpg)	pmap_pvlist_lock(mdpg, 1)
    107  1.14     skrll #define	VM_PAGEMD_PVLIST_READLOCK(mdpg)	pmap_pvlist_lock(mdpg, 0)
    108  1.14     skrll #define	VM_PAGEMD_PVLIST_UNLOCK(mdpg)	pmap_pvlist_unlock(mdpg)
    109  1.14     skrll #define	VM_PAGEMD_PVLIST_LOCKED_P(mdpg)	pmap_pvlist_locked_p(mdpg)
    110  1.14     skrll #define	VM_PAGEMD_PVLIST_GEN(mdpg)	((mdpg)->mdpg_attrs >> 16)
    111   1.3      matt 
    112  1.15     skrll #define	VM_PAGEMD_PVLIST_EMPTY_P(mdpg)	((mdpg)->mdpg_first.pv_pmap == NULL)
    113  1.15     skrll 
    114   1.3      matt #ifdef _KERNEL
    115   1.3      matt #if defined(MULTIPROCESSOR) || defined(MODULAR)
    116   1.3      matt kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *);
    117   1.1  christos #else
    118   1.3      matt extern kmutex_t pmap_pvlist_mutex;
    119   1.8  christos static __inline kmutex_t *
    120   1.3      matt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
    121   1.3      matt {
    122   1.3      matt 	return &pmap_pvlist_mutex;
    123   1.3      matt }
    124   1.3      matt #endif
    125   1.3      matt 
    126   1.8  christos static __inline uintptr_t
    127   1.3      matt pmap_pvlist_lock(struct vm_page_md *mdpg, uintptr_t increment)
    128   1.3      matt {
    129   1.3      matt 	mutex_spin_enter(pmap_pvlist_lock_addr(mdpg));
    130   1.3      matt 	const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    131   1.3      matt 	mdpg->mdpg_attrs += increment << 16;
    132   1.3      matt 	return gen;
    133   1.3      matt }
    134   1.3      matt 
    135   1.8  christos static __inline uintptr_t
    136   1.3      matt pmap_pvlist_unlock(struct vm_page_md *mdpg)
    137   1.3      matt {
    138   1.3      matt 	const uintptr_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    139   1.3      matt 	mutex_spin_exit(pmap_pvlist_lock_addr(mdpg));
    140   1.3      matt 	return gen;
    141   1.3      matt }
    142   1.3      matt 
    143   1.8  christos static __inline bool
    144   1.3      matt pmap_pvlist_locked_p(struct vm_page_md *mdpg)
    145   1.3      matt {
    146  1.17     skrll 
    147   1.3      matt 	return mutex_owned(pmap_pvlist_lock_addr(mdpg));
    148   1.3      matt }
    149   1.3      matt #endif /* _KERNEL */
    150   1.1  christos 
    151   1.1  christos #define VM_MDPAGE_INIT(pg)						\
    152   1.1  christos do {									\
    153   1.1  christos 	(pg)->mdpage.mdpg_first.pv_next = NULL;				\
    154   1.1  christos 	(pg)->mdpage.mdpg_first.pv_pmap = NULL;				\
    155  1.16        ad 	(pg)->mdpage.mdpg_first.pv_va = VM_PAGE_TO_PHYS(pg);		\
    156  1.17     skrll 	(pg)->mdpage.mdpg_attrs = VM_PAGEMD_VMPAGE;			\
    157   1.1  christos 	VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage);			\
    158   1.1  christos } while (/* CONSTCOND */ 0)
    159   1.1  christos 
    160   1.3      matt #endif /* _MODULE */
    161   1.3      matt 
    162   1.5     skrll #endif /* _UVM_PMAP_VMPAGEMD_H_ */
    163