Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.h revision 1.4
      1  1.4  perry /*	$NetBSD: uvm_pager.h,v 1.4 1998/02/10 02:34:56 perry Exp $	*/
      2  1.1    mrg 
      3  1.1    mrg /*
      4  1.1    mrg  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  1.1    mrg  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  1.1    mrg  */
      7  1.1    mrg /*
      8  1.1    mrg  *
      9  1.1    mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     10  1.1    mrg  * All rights reserved.
     11  1.1    mrg  *
     12  1.1    mrg  * Redistribution and use in source and binary forms, with or without
     13  1.1    mrg  * modification, are permitted provided that the following conditions
     14  1.1    mrg  * are met:
     15  1.1    mrg  * 1. Redistributions of source code must retain the above copyright
     16  1.1    mrg  *    notice, this list of conditions and the following disclaimer.
     17  1.1    mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.1    mrg  *    notice, this list of conditions and the following disclaimer in the
     19  1.1    mrg  *    documentation and/or other materials provided with the distribution.
     20  1.1    mrg  * 3. All advertising materials mentioning features or use of this software
     21  1.1    mrg  *    must display the following acknowledgement:
     22  1.1    mrg  *      This product includes software developed by Charles D. Cranor and
     23  1.1    mrg  *      Washington University.
     24  1.1    mrg  * 4. The name of the author may not be used to endorse or promote products
     25  1.1    mrg  *    derived from this software without specific prior written permission.
     26  1.1    mrg  *
     27  1.1    mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     28  1.1    mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     29  1.1    mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30  1.1    mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     31  1.1    mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     32  1.1    mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     33  1.1    mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     34  1.1    mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     35  1.1    mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     36  1.1    mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     37  1.3    mrg  *
     38  1.3    mrg  * from: Id: uvm_pager.h,v 1.1.2.14 1998/01/13 19:00:50 chuck Exp
     39  1.1    mrg  */
     40  1.1    mrg 
     41  1.4  perry #ifndef _UVM_UVM_PAGER_H_
     42  1.4  perry #define _UVM_UVM_PAGER_H_
     43  1.4  perry 
     44  1.1    mrg /*
     45  1.1    mrg  * uvm_pager.h
     46  1.1    mrg  */
     47  1.1    mrg 
     48  1.1    mrg UVMHIST_DECL(maphist);
     49  1.1    mrg 
     50  1.1    mrg 
     51  1.1    mrg /*
     52  1.1    mrg  * async pager i/o descriptor structure
     53  1.1    mrg  */
     54  1.1    mrg 
     55  1.1    mrg TAILQ_HEAD(uvm_aiohead, uvm_aiodesc);
     56  1.1    mrg 
     57  1.1    mrg struct uvm_aiodesc {
     58  1.1    mrg   void (*aiodone) __P((struct uvm_aiodesc *));
     59  1.1    mrg 					/* aio done function */
     60  1.1    mrg   vm_offset_t kva;			/* KVA of mapped page(s) */
     61  1.1    mrg   int npages;				/* # of pages in I/O req */
     62  1.1    mrg   void *pd_ptr;				/* pager-dependent pointer */
     63  1.1    mrg   TAILQ_ENTRY(uvm_aiodesc) aioq;	/* linked list of aio's */
     64  1.1    mrg };
     65  1.1    mrg 
     66  1.1    mrg /*
     67  1.1    mrg  * pager ops
     68  1.1    mrg  */
     69  1.1    mrg 
     70  1.1    mrg struct uvm_pagerops {
     71  1.1    mrg   void			(*pgo_init) __P((void));	/* init pager */
     72  1.1    mrg   struct uvm_object *	(*pgo_attach) 		/* get uvm_object */
     73  1.1    mrg 			    __P((void *, vm_prot_t));
     74  1.1    mrg   void			(*pgo_reference)	/* add reference to obj */
     75  1.1    mrg 			    __P((struct uvm_object *));
     76  1.1    mrg   void			(*pgo_detach)		/* drop reference to obj */
     77  1.1    mrg 			    __P((struct uvm_object *));
     78  1.1    mrg   int			(*pgo_fault)		/* special nonstd fault fn */
     79  1.1    mrg   			    __P((struct uvm_faultinfo *, vm_offset_t,
     80  1.1    mrg 				 vm_page_t *, int, int, vm_fault_t,
     81  1.1    mrg 				 vm_prot_t, int));
     82  1.1    mrg   boolean_t		(*pgo_flush)		/* flush pages out of obj */
     83  1.1    mrg 			    __P((struct uvm_object *, vm_offset_t,
     84  1.1    mrg 				vm_offset_t, int));
     85  1.1    mrg   int			(*pgo_get)		/* get/read page */
     86  1.1    mrg 			    __P((struct uvm_object *, vm_offset_t,
     87  1.1    mrg 				 vm_page_t *, int *, int, vm_prot_t, int, int));
     88  1.1    mrg   int			(*pgo_asyncget)		/* start async get */
     89  1.1    mrg        			    __P((struct uvm_object *, vm_offset_t, int));
     90  1.1    mrg   int			(*pgo_put)		/* put/write page */
     91  1.1    mrg        			    __P((struct uvm_object *, vm_page_t *,
     92  1.1    mrg 				 int, boolean_t));
     93  1.1    mrg   void			(*pgo_cluster)          /* return range of cluster */
     94  1.1    mrg                             __P((struct uvm_object *, vm_offset_t,
     95  1.1    mrg                                  vm_offset_t *, vm_offset_t *));
     96  1.1    mrg   struct vm_page **	(*pgo_mk_pcluster)	/* make "put" cluster */
     97  1.1    mrg        			    __P((struct uvm_object *, struct vm_page **,
     98  1.1    mrg 				 int *, struct vm_page *, int, vm_offset_t,
     99  1.1    mrg 				 vm_offset_t));
    100  1.1    mrg   void			(*pgo_shareprot)	/* share protect */
    101  1.1    mrg 			    __P((vm_map_entry_t, vm_prot_t));
    102  1.1    mrg   void			(*pgo_aiodone)		/* async iodone */
    103  1.1    mrg 			    __P((struct uvm_aiodesc *));
    104  1.1    mrg   boolean_t		(*pgo_releasepg)	/* release page */
    105  1.1    mrg 			    __P((struct vm_page *, struct vm_page **));
    106  1.1    mrg };
    107  1.1    mrg 
    108  1.1    mrg /* pager flags [mostly for flush] */
    109  1.1    mrg 
    110  1.1    mrg #define PGO_CLEANIT	0x001	/* write dirty pages to backing store */
    111  1.1    mrg #define PGO_SYNCIO	0x002	/* if PGO_CLEAN: use sync I/O? */
    112  1.1    mrg /*
    113  1.1    mrg  * obviously if neither PGO_INVALIDATE or PGO_FREE are set then the pages
    114  1.1    mrg  * stay where they are.
    115  1.1    mrg  */
    116  1.1    mrg #define PGO_DEACTIVATE	0x004	/* deactivate flushed pages */
    117  1.1    mrg #define PGO_FREE	0x008	/* free flushed pages */
    118  1.1    mrg 
    119  1.1    mrg #define PGO_ALLPAGES	0x010	/* flush whole object/get all pages */
    120  1.1    mrg #define PGO_DOACTCLUST	0x020	/* flag to mk_pcluster to include active */
    121  1.1    mrg #define PGO_LOCKED	0x040	/* fault data structures are locked [get] */
    122  1.1    mrg #define PGO_PDFREECLUST	0x080	/* daemon's free cluster flag [uvm_pager_put] */
    123  1.1    mrg #define PGO_REALLOCSWAP	0x100	/* reallocate swap area [pager_dropcluster] */
    124  1.1    mrg 
    125  1.1    mrg /* page we are not interested in getting */
    126  1.1    mrg #define PGO_DONTCARE ((struct vm_page *) -1)	/* [get only] */
    127  1.1    mrg 
    128  1.1    mrg /*
    129  1.1    mrg  * handle inline options
    130  1.1    mrg  */
    131  1.1    mrg 
    132  1.1    mrg #ifdef UVM_PAGER_INLINE
    133  1.1    mrg #define PAGER_INLINE static __inline
    134  1.1    mrg #else
    135  1.1    mrg #define PAGER_INLINE /* nothing */
    136  1.1    mrg #endif /* UVM_PAGER_INLINE */
    137  1.1    mrg 
    138  1.1    mrg /*
    139  1.1    mrg  * prototypes
    140  1.1    mrg  */
    141  1.1    mrg 
    142  1.1    mrg void		uvm_pager_dropcluster __P((struct uvm_object *,
    143  1.1    mrg 					struct vm_page *, struct vm_page **,
    144  1.1    mrg 					int *, int, int));
    145  1.1    mrg void		uvm_pager_init __P((void));
    146  1.1    mrg int		uvm_pager_put __P((struct uvm_object *, struct vm_page *,
    147  1.1    mrg 				   struct vm_page ***, int *, int,
    148  1.1    mrg 				   vm_offset_t, vm_offset_t));
    149  1.1    mrg 
    150  1.1    mrg PAGER_INLINE struct vm_page *uvm_pageratop __P((vm_offset_t));
    151  1.1    mrg 
    152  1.1    mrg vm_offset_t	uvm_pagermapin __P((struct vm_page **, int,
    153  1.1    mrg 				    struct uvm_aiodesc **, int));
    154  1.1    mrg void		uvm_pagermapout __P((vm_offset_t, int));
    155  1.1    mrg struct vm_page **uvm_mk_pcluster  __P((struct uvm_object *, struct vm_page **,
    156  1.1    mrg 				       int *, struct vm_page *, int,
    157  1.1    mrg 				       vm_offset_t, vm_offset_t));
    158  1.1    mrg void		uvm_shareprot __P((vm_map_entry_t, vm_prot_t));
    159  1.1    mrg 
    160  1.4  perry 
    161  1.4  perry #endif /* _UVM_UVM_PAGER_H_ */
    162