uvm_pager.h revision 1.10 1 /* $NetBSD: uvm_pager.h,v 1.10 1999/06/21 17:25:12 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.h,v 1.1.2.14 1998/01/13 19:00:50 chuck Exp
35 */
36
37 #ifndef _UVM_UVM_PAGER_H_
38 #define _UVM_UVM_PAGER_H_
39
40 /*
41 * uvm_pager.h
42 */
43
44 /*
45 * async pager i/o descriptor structure
46 */
47
48 TAILQ_HEAD(uvm_aiohead, uvm_aiodesc);
49
50 struct uvm_aiodesc {
51 void (*aiodone) __P((struct uvm_aiodesc *));
52 /* aio done function */
53 vaddr_t kva; /* KVA of mapped page(s) */
54 int npages; /* # of pages in I/O req */
55 void *pd_ptr; /* pager-dependent pointer */
56 TAILQ_ENTRY(uvm_aiodesc) aioq; /* linked list of aio's */
57 };
58
59 /*
60 * pager ops
61 */
62
63 struct uvm_pagerops {
64 void (*pgo_init) __P((void));/* init pager */
65 void (*pgo_reference) /* add reference to obj */
66 __P((struct uvm_object *));
67 void (*pgo_detach) /* drop reference to obj */
68 __P((struct uvm_object *));
69 int (*pgo_fault) /* special nonstd fault fn */
70 __P((struct uvm_faultinfo *, vaddr_t,
71 vm_page_t *, int, int, vm_fault_t,
72 vm_prot_t, int));
73 boolean_t (*pgo_flush) /* flush pages out of obj */
74 __P((struct uvm_object *, vaddr_t,
75 vaddr_t, int));
76 int (*pgo_get) /* get/read page */
77 __P((struct uvm_object *, vaddr_t,
78 vm_page_t *, int *, int, vm_prot_t, int, int));
79 int (*pgo_asyncget) /* start async get */
80 __P((struct uvm_object *, vaddr_t, int));
81 int (*pgo_put) /* put/write page */
82 __P((struct uvm_object *, vm_page_t *,
83 int, boolean_t));
84 void (*pgo_cluster) /* return range of cluster */
85 __P((struct uvm_object *, vaddr_t, vaddr_t *,
86 vaddr_t *));
87 struct vm_page ** (*pgo_mk_pcluster) /* make "put" cluster */
88 __P((struct uvm_object *, struct vm_page **,
89 int *, struct vm_page *, int, vaddr_t,
90 vaddr_t));
91 void (*pgo_shareprot) /* share protect */
92 __P((vm_map_entry_t, vm_prot_t));
93 void (*pgo_aiodone) /* async iodone */
94 __P((struct uvm_aiodesc *));
95 boolean_t (*pgo_releasepg) /* release page */
96 __P((struct vm_page *, struct vm_page **));
97 };
98
99 /* pager flags [mostly for flush] */
100
101 #define PGO_CLEANIT 0x001 /* write dirty pages to backing store */
102 #define PGO_SYNCIO 0x002 /* if PGO_CLEAN: use sync I/O? */
103 /*
104 * obviously if neither PGO_INVALIDATE or PGO_FREE are set then the pages
105 * stay where they are.
106 */
107 #define PGO_DEACTIVATE 0x004 /* deactivate flushed pages */
108 #define PGO_FREE 0x008 /* free flushed pages */
109
110 #define PGO_ALLPAGES 0x010 /* flush whole object/get all pages */
111 #define PGO_DOACTCLUST 0x020 /* flag to mk_pcluster to include active */
112 #define PGO_LOCKED 0x040 /* fault data structures are locked [get] */
113 #define PGO_PDFREECLUST 0x080 /* daemon's free cluster flag [uvm_pager_put] */
114 #define PGO_REALLOCSWAP 0x100 /* reallocate swap area [pager_dropcluster] */
115
116 /* page we are not interested in getting */
117 #define PGO_DONTCARE ((struct vm_page *) -1) /* [get only] */
118
119 #ifdef _KERNEL
120
121 /*
122 * handle inline options
123 */
124
125 #ifdef UVM_PAGER_INLINE
126 #define PAGER_INLINE static __inline
127 #else
128 #define PAGER_INLINE /* nothing */
129 #endif /* UVM_PAGER_INLINE */
130
131 /*
132 * prototypes
133 */
134
135 void uvm_pager_dropcluster __P((struct uvm_object *,
136 struct vm_page *, struct vm_page **,
137 int *, int, int));
138 void uvm_pager_init __P((void));
139 int uvm_pager_put __P((struct uvm_object *, struct vm_page *,
140 struct vm_page ***, int *, int,
141 vaddr_t, vaddr_t));
142
143 PAGER_INLINE struct vm_page *uvm_pageratop __P((vaddr_t));
144
145 vaddr_t uvm_pagermapin __P((struct vm_page **, int,
146 struct uvm_aiodesc **, int));
147 void uvm_pagermapout __P((vaddr_t, int));
148 struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **,
149 int *, struct vm_page *, int,
150 vaddr_t, vaddr_t));
151 void uvm_shareprot __P((vm_map_entry_t, vm_prot_t));
152
153 #endif /* _KERNEL */
154
155 #endif /* _UVM_UVM_PAGER_H_ */
156