Home | History | Annotate | Line # | Download | only in uvm
uvm_amap.h revision 1.6.2.1
      1  1.6.2.1    eeh /*	$NetBSD: uvm_amap.h,v 1.6.2.1 1998/07/30 14:04:08 eeh Exp $	*/
      2      1.1    mrg 
      3      1.1    mrg /*
      4      1.1    mrg  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5      1.1    mrg  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6      1.1    mrg  */
      7      1.1    mrg /*
      8      1.1    mrg  *
      9      1.1    mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     10      1.1    mrg  * All rights reserved.
     11      1.1    mrg  *
     12      1.1    mrg  * Redistribution and use in source and binary forms, with or without
     13      1.1    mrg  * modification, are permitted provided that the following conditions
     14      1.1    mrg  * are met:
     15      1.1    mrg  * 1. Redistributions of source code must retain the above copyright
     16      1.1    mrg  *    notice, this list of conditions and the following disclaimer.
     17      1.1    mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18      1.1    mrg  *    notice, this list of conditions and the following disclaimer in the
     19      1.1    mrg  *    documentation and/or other materials provided with the distribution.
     20      1.1    mrg  * 3. All advertising materials mentioning features or use of this software
     21      1.1    mrg  *    must display the following acknowledgement:
     22      1.1    mrg  *      This product includes software developed by Charles D. Cranor and
     23      1.1    mrg  *      Washington University.
     24      1.1    mrg  * 4. The name of the author may not be used to endorse or promote products
     25      1.1    mrg  *    derived from this software without specific prior written permission.
     26      1.1    mrg  *
     27      1.1    mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     28      1.1    mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     29      1.1    mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30      1.1    mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     31      1.1    mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     32      1.1    mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     33      1.1    mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     34      1.1    mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     35      1.1    mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     36      1.1    mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     37      1.3    mrg  *
     38      1.3    mrg  * from: Id: uvm_amap.h,v 1.1.2.7 1998/01/05 18:12:56 chuck Exp
     39      1.1    mrg  */
     40      1.1    mrg 
     41      1.4  perry #ifndef _UVM_UVM_AMAP_H_
     42      1.4  perry #define _UVM_UVM_AMAP_H_
     43      1.4  perry 
     44      1.1    mrg /*
     45      1.1    mrg  * uvm_amap.h
     46      1.1    mrg  */
     47      1.1    mrg 
     48      1.1    mrg /*
     49      1.1    mrg  * defines for handling of large sparce amaps.
     50      1.1    mrg  *
     51      1.1    mrg  * currently the kernel likes to allocate large chunks of VM to reserve
     52      1.1    mrg  * them for possible use.   for example, it allocates (reserves) a large
     53      1.1    mrg  * chunk of user VM for possible stack growth.   most of the time only
     54      1.1    mrg  * a page or two of this VM is actually used.   since the stack is anonymous
     55      1.1    mrg  * memory it makes sense for it to live in an amap, but if we allocated
     56      1.1    mrg  * an amap for the entire stack range we could end up wasting a large
     57      1.1    mrg  * amount of malloc'd KVM.
     58      1.1    mrg  *
     59      1.1    mrg  * for example, on the i386 at boot time we allocate two amaps for the stack
     60      1.1    mrg  * of /sbin/init:
     61      1.1    mrg  *  1. a 7680 slot amap at protection 0 (reserve space for stack)
     62      1.1    mrg  *  2. a 512 slot amap at protection 7 (top of stack)
     63      1.1    mrg  *
     64      1.1    mrg  * most of that VM is never mapped or used.
     65      1.1    mrg  *
     66      1.1    mrg  * to avoid allocating amap resources for the whole range we have the
     67      1.1    mrg  * VM system look for maps that are larger than UVM_AMAP_LARGE slots
     68      1.1    mrg  * (note that 1 slot = 1 vm_page).   for maps that are large, we attempt
     69      1.1    mrg  * to break them up into UVM_AMAP_CHUNK slot sized amaps.
     70      1.1    mrg  *
     71      1.1    mrg  * so, in the i386 example, the 7680 slot area is never referenced so
     72      1.1    mrg  * nothing gets allocated.   the 512 slot area is referenced, and it
     73      1.1    mrg  * gets divided into 16 slot chunks (hopefully with one 16 slot chunk
     74      1.1    mrg  * being enough to handle the whole stack...).
     75      1.1    mrg  */
     76      1.1    mrg 
     77      1.1    mrg #define UVM_AMAP_LARGE		256	/* # of slots in "large" amap */
     78      1.1    mrg #define UVM_AMAP_CHUNK		16	/* # of slots to chunk large amaps in */
     79      1.1    mrg 
     80      1.1    mrg 
     81      1.1    mrg #ifdef DIAGNOSTIC
     82      1.1    mrg #define AMAP_B2SLOT(S,B) { \
     83      1.1    mrg 	if ((B) % PAGE_SIZE) \
     84      1.1    mrg 		panic("AMAP_B2SLOT: invalid byte count"); \
     85      1.1    mrg 	(S) = (B) / PAGE_SIZE; \
     86      1.1    mrg }
     87      1.1    mrg #else
     88      1.1    mrg #define AMAP_B2SLOT(S,B) (S) = (B) / PAGE_SIZE
     89      1.1    mrg #endif
     90      1.1    mrg 
     91      1.1    mrg #ifdef VM_AMAP_PPREF
     92      1.1    mrg #define PPREF_NONE ((int *) -1)
     93      1.1    mrg #endif
     94      1.1    mrg 
     95      1.1    mrg /*
     96      1.1    mrg  * handle inline options
     97      1.1    mrg  */
     98      1.1    mrg 
     99      1.1    mrg #ifdef UVM_AMAP_INLINE
    100      1.1    mrg #define AMAP_INLINE static __inline
    101      1.1    mrg #else
    102      1.1    mrg #define AMAP_INLINE /* nothing */
    103      1.1    mrg #endif /* UVM_AMAP_INLINE */
    104      1.1    mrg 
    105      1.1    mrg /*
    106      1.1    mrg  * prototypes: the following prototypes define the interface to amaps
    107      1.1    mrg  */
    108      1.1    mrg 
    109  1.6.2.1    eeh AMAP_INLINE vaddr_t amap_add __P((struct vm_aref *, vaddr_t,
    110      1.6    mrg 				      struct vm_anon *, int));
    111  1.6.2.1    eeh struct vm_amap *amap_alloc __P((vaddr_t, vaddr_t, int));
    112      1.1    mrg void amap_copy __P((vm_map_t, vm_map_entry_t, int, boolean_t,
    113  1.6.2.1    eeh 			vaddr_t, vaddr_t));
    114      1.1    mrg void amap_cow_now __P((vm_map_t, vm_map_entry_t));
    115  1.6.2.1    eeh void amap_extend __P((vm_map_entry_t, vsize_t));
    116      1.1    mrg void amap_free __P((struct vm_amap *));
    117  1.6.2.1    eeh AMAP_INLINE struct vm_anon *amap_lookup __P((struct vm_aref *, vaddr_t));
    118  1.6.2.1    eeh AMAP_INLINE void amap_lookups __P((struct vm_aref *, vaddr_t,
    119      1.1    mrg 				   struct vm_anon **, int));
    120      1.1    mrg #ifdef VM_AMAP_PPREF
    121  1.6.2.1    eeh void amap_pp_adjref __P((struct vm_amap *, int, vsize_t, int));
    122      1.1    mrg void amap_pp_establish __P((struct vm_amap *));
    123      1.1    mrg #endif
    124      1.1    mrg AMAP_INLINE void amap_ref __P((vm_map_entry_t, int));
    125      1.1    mrg void amap_share_protect __P((vm_map_entry_t, vm_prot_t));
    126  1.6.2.1    eeh void amap_splitref __P((struct vm_aref *, struct vm_aref *, vaddr_t));
    127  1.6.2.1    eeh AMAP_INLINE void amap_unadd __P((struct vm_amap *, vaddr_t));
    128      1.1    mrg AMAP_INLINE void amap_unref __P((vm_map_entry_t, int));
    129      1.1    mrg void amap_wipeout __P((struct vm_amap *));
    130      1.1    mrg #ifdef VM_AMAP_PPREF
    131      1.1    mrg void amap_wiperange __P((struct vm_amap *, int, int));
    132      1.1    mrg #endif
    133      1.1    mrg 
    134      1.1    mrg struct vm_anon *uvm_analloc __P((void));
    135      1.1    mrg void uvm_anfree __P((struct vm_anon *));
    136      1.1    mrg void uvm_anon_init __P((void));
    137      1.1    mrg void uvm_anon_add __P((int));
    138      1.1    mrg struct vm_page *uvm_anon_lockloanpg __P((struct vm_anon *));
    139      1.4  perry 
    140      1.4  perry #endif /* _UVM_UVM_AMAP_H_ */
    141