Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.h revision 1.12
      1 /*	$NetBSD: uvm_pager.h,v 1.12 2000/03/26 20:54:47 kleink Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.h,v 1.1.2.14 1998/01/13 19:00:50 chuck Exp
     35  */
     36 
     37 #ifndef _UVM_UVM_PAGER_H_
     38 #define _UVM_UVM_PAGER_H_
     39 
     40 /*
     41  * uvm_pager.h
     42  */
     43 
     44 /*
     45  * async pager i/o descriptor structure
     46  */
     47 
     48 TAILQ_HEAD(uvm_aiohead, uvm_aiodesc);
     49 
     50 struct uvm_aiodesc {
     51 	void (*aiodone) __P((struct uvm_aiodesc *));
     52 						/* aio done function */
     53 	vaddr_t kva;			/* KVA of mapped page(s) */
     54 	int npages;				/* # of pages in I/O req */
     55 	void *pd_ptr;				/* pager-dependent pointer */
     56 	TAILQ_ENTRY(uvm_aiodesc) aioq;		/* linked list of aio's */
     57 };
     58 
     59 /*
     60  * pager ops
     61  */
     62 
     63 struct uvm_pagerops {
     64 	void		(*pgo_init) __P((void));/* init pager */
     65 	void		(*pgo_reference)	/* add reference to obj */
     66 			 __P((struct uvm_object *));
     67 	void			(*pgo_detach)	/* drop reference to obj */
     68 			 __P((struct uvm_object *));
     69 	int			(*pgo_fault)	/* special nonstd fault fn */
     70 			 __P((struct uvm_faultinfo *, vaddr_t,
     71 				 vm_page_t *, int, int, vm_fault_t,
     72 				 vm_prot_t, int));
     73 	boolean_t		(*pgo_flush)	/* flush pages out of obj */
     74 			 __P((struct uvm_object *, voff_t, voff_t, int));
     75 	int			(*pgo_get)	/* get/read page */
     76 			 __P((struct uvm_object *, voff_t,
     77 				 vm_page_t *, int *, int, vm_prot_t, int, int));
     78 	int			(*pgo_asyncget)	/* start async get */
     79 			 __P((struct uvm_object *, voff_t, int));
     80 	int			(*pgo_put)	/* put/write page */
     81 			 __P((struct uvm_object *, vm_page_t *,
     82 				 int, boolean_t));
     83 	void			(*pgo_cluster)	/* return range of cluster */
     84 			__P((struct uvm_object *, voff_t, voff_t *,
     85 				voff_t *));
     86 	struct vm_page **	(*pgo_mk_pcluster)	/* make "put" cluster */
     87 			 __P((struct uvm_object *, struct vm_page **,
     88 				 int *, struct vm_page *, int, voff_t,
     89 				 voff_t));
     90 	void			(*pgo_shareprot)	/* share protect */
     91 			 __P((vm_map_entry_t, vm_prot_t));
     92 	void			(*pgo_aiodone)		/* async iodone */
     93 			 __P((struct uvm_aiodesc *));
     94 	boolean_t		(*pgo_releasepg)	/* release page */
     95 			 __P((struct vm_page *, struct vm_page **));
     96 };
     97 
     98 /* pager flags [mostly for flush] */
     99 
    100 #define PGO_CLEANIT	0x001	/* write dirty pages to backing store */
    101 #define PGO_SYNCIO	0x002	/* if PGO_CLEAN: use sync I/O? */
    102 /*
    103  * obviously if neither PGO_INVALIDATE or PGO_FREE are set then the pages
    104  * stay where they are.
    105  */
    106 #define PGO_DEACTIVATE	0x004	/* deactivate flushed pages */
    107 #define PGO_FREE	0x008	/* free flushed pages */
    108 
    109 #define PGO_ALLPAGES	0x010	/* flush whole object/get all pages */
    110 #define PGO_DOACTCLUST	0x020	/* flag to mk_pcluster to include active */
    111 #define PGO_LOCKED	0x040	/* fault data structures are locked [get] */
    112 #define PGO_PDFREECLUST	0x080	/* daemon's free cluster flag [uvm_pager_put] */
    113 #define PGO_REALLOCSWAP	0x100	/* reallocate swap area [pager_dropcluster] */
    114 
    115 /* page we are not interested in getting */
    116 #define PGO_DONTCARE ((struct vm_page *) -1)	/* [get only] */
    117 
    118 #ifdef _KERNEL
    119 
    120 /*
    121  * handle inline options
    122  */
    123 
    124 #ifdef UVM_PAGER_INLINE
    125 #define PAGER_INLINE static __inline
    126 #else
    127 #define PAGER_INLINE /* nothing */
    128 #endif /* UVM_PAGER_INLINE */
    129 
    130 /*
    131  * prototypes
    132  */
    133 
    134 void		uvm_pager_dropcluster __P((struct uvm_object *,
    135 					struct vm_page *, struct vm_page **,
    136 					int *, int));
    137 void		uvm_pager_init __P((void));
    138 int		uvm_pager_put __P((struct uvm_object *, struct vm_page *,
    139 				   struct vm_page ***, int *, int,
    140 				   voff_t, voff_t));
    141 
    142 PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
    143 
    144 vaddr_t	uvm_pagermapin __P((struct vm_page **, int,
    145 				    struct uvm_aiodesc **, int));
    146 void		uvm_pagermapout __P((vaddr_t, int));
    147 struct vm_page **uvm_mk_pcluster  __P((struct uvm_object *, struct vm_page **,
    148 				       int *, struct vm_page *, int,
    149 				       voff_t, voff_t));
    150 void		uvm_shareprot __P((vm_map_entry_t, vm_prot_t));
    151 
    152 #endif /* _KERNEL */
    153 
    154 #endif /* _UVM_UVM_PAGER_H_ */
    155