uvm_page_array.c revision 1.8 1 1.8 ad /* $NetBSD: uvm_page_array.c,v 1.8 2020/05/25 22:01:26 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c)2011 YAMAMOTO Takashi,
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * Redistribution and use in source and binary forms, with or without
8 1.2 ad * modification, are permitted provided that the following conditions
9 1.2 ad * are met:
10 1.2 ad * 1. Redistributions of source code must retain the above copyright
11 1.2 ad * notice, this list of conditions and the following disclaimer.
12 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 ad * notice, this list of conditions and the following disclaimer in the
14 1.2 ad * documentation and/or other materials provided with the distribution.
15 1.2 ad *
16 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 ad * SUCH DAMAGE.
27 1.2 ad */
28 1.2 ad
29 1.2 ad #include <sys/cdefs.h>
30 1.8 ad __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.8 2020/05/25 22:01:26 ad Exp $");
31 1.2 ad
32 1.2 ad #include <sys/param.h>
33 1.2 ad #include <sys/systm.h>
34 1.2 ad
35 1.2 ad #include <uvm/uvm_extern.h>
36 1.2 ad #include <uvm/uvm_object.h>
37 1.2 ad #include <uvm/uvm_page.h>
38 1.2 ad #include <uvm/uvm_page_array.h>
39 1.2 ad
40 1.2 ad /*
41 1.2 ad * uvm_page_array_init: initialize the array.
42 1.2 ad */
43 1.2 ad
44 1.2 ad void
45 1.6 ad uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
46 1.6 ad unsigned int flags)
47 1.2 ad {
48 1.2 ad
49 1.6 ad ar->ar_idx = 0;
50 1.6 ad ar->ar_npages = 0;
51 1.6 ad ar->ar_uobj = uobj;
52 1.6 ad ar->ar_flags = flags;
53 1.2 ad }
54 1.2 ad
55 1.2 ad /*
56 1.2 ad * uvm_page_array_fini: clean up the array.
57 1.2 ad */
58 1.2 ad
59 1.2 ad void
60 1.2 ad uvm_page_array_fini(struct uvm_page_array *ar)
61 1.2 ad {
62 1.2 ad
63 1.2 ad /*
64 1.2 ad * currently nothing to do.
65 1.2 ad */
66 1.2 ad #if defined(DIAGNOSTIC)
67 1.2 ad /*
68 1.2 ad * poison to trigger assertion in uvm_page_array_peek to
69 1.2 ad * detect usage errors.
70 1.2 ad */
71 1.2 ad ar->ar_npages = 1;
72 1.2 ad ar->ar_idx = 1000;
73 1.2 ad #endif /* defined(DIAGNOSTIC) */
74 1.2 ad }
75 1.2 ad
76 1.2 ad /*
77 1.2 ad * uvm_page_array_clear: forget the cached pages and initialize the array.
78 1.2 ad */
79 1.2 ad
80 1.2 ad void
81 1.2 ad uvm_page_array_clear(struct uvm_page_array *ar)
82 1.2 ad {
83 1.2 ad
84 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
85 1.6 ad ar->ar_idx = 0;
86 1.6 ad ar->ar_npages = 0;
87 1.2 ad }
88 1.2 ad
89 1.2 ad /*
90 1.2 ad * uvm_page_array_peek: return the next cached page.
91 1.2 ad */
92 1.2 ad
93 1.2 ad struct vm_page *
94 1.2 ad uvm_page_array_peek(struct uvm_page_array *ar)
95 1.2 ad {
96 1.2 ad
97 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
98 1.2 ad if (ar->ar_idx == ar->ar_npages) {
99 1.2 ad return NULL;
100 1.2 ad }
101 1.2 ad return ar->ar_pages[ar->ar_idx];
102 1.2 ad }
103 1.2 ad
104 1.2 ad /*
105 1.2 ad * uvm_page_array_advance: advance the array to the next cached page
106 1.2 ad */
107 1.2 ad
108 1.2 ad void
109 1.2 ad uvm_page_array_advance(struct uvm_page_array *ar)
110 1.2 ad {
111 1.2 ad
112 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
113 1.2 ad ar->ar_idx++;
114 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
115 1.2 ad }
116 1.2 ad
117 1.2 ad /*
118 1.2 ad * uvm_page_array_fill: lookup pages and keep them cached.
119 1.2 ad *
120 1.2 ad * return 0 on success. in that case, cache the result in the array
121 1.2 ad * so that they will be picked by later uvm_page_array_peek.
122 1.2 ad *
123 1.2 ad * nwant is a number of pages to fetch. a caller should consider it a hint.
124 1.2 ad * nwant == 0 means a caller have no specific idea.
125 1.2 ad *
126 1.2 ad * return ENOENT if no pages are found.
127 1.2 ad *
128 1.2 ad * called with object lock held.
129 1.2 ad */
130 1.2 ad
131 1.2 ad int
132 1.6 ad uvm_page_array_fill(struct uvm_page_array *ar, voff_t off, unsigned int nwant)
133 1.2 ad {
134 1.2 ad unsigned int npages;
135 1.2 ad #if defined(DEBUG)
136 1.2 ad unsigned int i;
137 1.2 ad #endif /* defined(DEBUG) */
138 1.2 ad unsigned int maxpages = __arraycount(ar->ar_pages);
139 1.6 ad struct uvm_object *uobj = ar->ar_uobj;
140 1.6 ad const int flags = ar->ar_flags;
141 1.2 ad const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
142 1.2 ad const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
143 1.2 ad
144 1.2 ad if (nwant != 0 && nwant < maxpages) {
145 1.2 ad maxpages = nwant;
146 1.2 ad }
147 1.2 ad #if 0 /* called from DDB for "show obj/f" without lock */
148 1.5 ad KASSERT(rw_lock_held(uobj->vmobjlock));
149 1.2 ad #endif
150 1.2 ad KASSERT(uvm_page_array_peek(ar) == NULL);
151 1.2 ad if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
152 1.2 ad unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
153 1.2 ad
154 1.2 ad if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
155 1.2 ad tagmask |= UVM_PAGE_WRITEBACK_TAG;
156 1.2 ad }
157 1.2 ad npages =
158 1.2 ad (backward ? radix_tree_gang_lookup_tagged_node_reverse :
159 1.2 ad radix_tree_gang_lookup_tagged_node)(
160 1.2 ad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
161 1.2 ad maxpages, dense, tagmask);
162 1.3 ad } else {
163 1.2 ad npages =
164 1.2 ad (backward ? radix_tree_gang_lookup_node_reverse :
165 1.2 ad radix_tree_gang_lookup_node)(
166 1.2 ad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
167 1.2 ad maxpages, dense);
168 1.2 ad }
169 1.2 ad if (npages == 0) {
170 1.6 ad if (flags != 0) {
171 1.6 ad /*
172 1.6 ad * if dense or looking for tagged entries (or
173 1.6 ad * working backwards), fail right away.
174 1.6 ad */
175 1.6 ad uvm_page_array_clear(ar);
176 1.6 ad return ENOENT;
177 1.6 ad } else {
178 1.6 ad /*
179 1.6 ad * there's nothing else to be found with the current
180 1.6 ad * set of arguments, in the current version of the
181 1.6 ad * tree.
182 1.6 ad *
183 1.8 ad * minimize repeated tree lookups by "finding" a
184 1.8 ad * null pointer, in case the caller keeps looping (a
185 1.8 ad * common use case).
186 1.6 ad */
187 1.8 ad npages = 1;
188 1.8 ad ar->ar_pages[0] = NULL;
189 1.6 ad }
190 1.2 ad }
191 1.2 ad KASSERT(npages <= maxpages);
192 1.2 ad ar->ar_npages = npages;
193 1.2 ad ar->ar_idx = 0;
194 1.2 ad #if defined(DEBUG)
195 1.2 ad for (i = 0; i < ar->ar_npages; i++) {
196 1.2 ad struct vm_page * const pg = ar->ar_pages[i];
197 1.2 ad
198 1.7 ad if (pg == NULL) {
199 1.6 ad continue;
200 1.6 ad }
201 1.2 ad KDASSERT(pg->uobject == uobj);
202 1.2 ad if (backward) {
203 1.2 ad KDASSERT(pg->offset <= off);
204 1.2 ad KDASSERT(i == 0 ||
205 1.2 ad pg->offset < ar->ar_pages[i - 1]->offset);
206 1.2 ad } else {
207 1.2 ad KDASSERT(pg->offset >= off);
208 1.2 ad KDASSERT(i == 0 ||
209 1.2 ad pg->offset > ar->ar_pages[i - 1]->offset);
210 1.2 ad }
211 1.2 ad }
212 1.2 ad #endif /* defined(DEBUG) */
213 1.2 ad return 0;
214 1.2 ad }
215 1.2 ad
216 1.2 ad /*
217 1.2 ad * uvm_page_array_fill_and_peek:
218 1.2 ad * same as uvm_page_array_peek except that, if the array is empty, try to fill
219 1.2 ad * it first.
220 1.2 ad */
221 1.2 ad
222 1.2 ad struct vm_page *
223 1.8 ad uvm_page_array_fill_and_peek(struct uvm_page_array *ar, voff_t off,
224 1.6 ad unsigned int nwant)
225 1.2 ad {
226 1.2 ad int error;
227 1.2 ad
228 1.8 ad if (ar->ar_idx != ar->ar_npages) {
229 1.8 ad return ar->ar_pages[ar->ar_idx];
230 1.2 ad }
231 1.8 ad error = uvm_page_array_fill(ar, off, nwant);
232 1.2 ad if (error != 0) {
233 1.2 ad return NULL;
234 1.2 ad }
235 1.8 ad return uvm_page_array_peek(ar);
236 1.2 ad }
237