uvm_page_array.c revision 1.5 1 1.5 ad /* $NetBSD: uvm_page_array.c,v 1.5 2020/03/17 00:30:17 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c)2011 YAMAMOTO Takashi,
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * Redistribution and use in source and binary forms, with or without
8 1.2 ad * modification, are permitted provided that the following conditions
9 1.2 ad * are met:
10 1.2 ad * 1. Redistributions of source code must retain the above copyright
11 1.2 ad * notice, this list of conditions and the following disclaimer.
12 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 ad * notice, this list of conditions and the following disclaimer in the
14 1.2 ad * documentation and/or other materials provided with the distribution.
15 1.2 ad *
16 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 ad * SUCH DAMAGE.
27 1.2 ad */
28 1.2 ad
29 1.2 ad #include <sys/cdefs.h>
30 1.5 ad __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.5 2020/03/17 00:30:17 ad Exp $");
31 1.2 ad
32 1.2 ad #include <sys/param.h>
33 1.2 ad #include <sys/systm.h>
34 1.2 ad
35 1.2 ad #include <uvm/uvm_extern.h>
36 1.2 ad #include <uvm/uvm_object.h>
37 1.2 ad #include <uvm/uvm_page.h>
38 1.2 ad #include <uvm/uvm_page_array.h>
39 1.2 ad
40 1.2 ad /*
41 1.2 ad * uvm_page_array_init: initialize the array.
42 1.2 ad */
43 1.2 ad
44 1.2 ad void
45 1.2 ad uvm_page_array_init(struct uvm_page_array *ar)
46 1.2 ad {
47 1.2 ad
48 1.2 ad ar->ar_idx = ar->ar_npages = 0;
49 1.2 ad }
50 1.2 ad
51 1.2 ad /*
52 1.2 ad * uvm_page_array_fini: clean up the array.
53 1.2 ad */
54 1.2 ad
55 1.2 ad void
56 1.2 ad uvm_page_array_fini(struct uvm_page_array *ar)
57 1.2 ad {
58 1.2 ad
59 1.2 ad /*
60 1.2 ad * currently nothing to do.
61 1.2 ad */
62 1.2 ad #if defined(DIAGNOSTIC)
63 1.2 ad /*
64 1.2 ad * poison to trigger assertion in uvm_page_array_peek to
65 1.2 ad * detect usage errors.
66 1.2 ad */
67 1.2 ad ar->ar_npages = 1;
68 1.2 ad ar->ar_idx = 1000;
69 1.2 ad #endif /* defined(DIAGNOSTIC) */
70 1.2 ad }
71 1.2 ad
72 1.2 ad /*
73 1.2 ad * uvm_page_array_clear: forget the cached pages and initialize the array.
74 1.2 ad */
75 1.2 ad
76 1.2 ad void
77 1.2 ad uvm_page_array_clear(struct uvm_page_array *ar)
78 1.2 ad {
79 1.2 ad
80 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
81 1.2 ad uvm_page_array_init(ar);
82 1.2 ad }
83 1.2 ad
84 1.2 ad /*
85 1.2 ad * uvm_page_array_peek: return the next cached page.
86 1.2 ad */
87 1.2 ad
88 1.2 ad struct vm_page *
89 1.2 ad uvm_page_array_peek(struct uvm_page_array *ar)
90 1.2 ad {
91 1.2 ad
92 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
93 1.2 ad if (ar->ar_idx == ar->ar_npages) {
94 1.2 ad return NULL;
95 1.2 ad }
96 1.2 ad return ar->ar_pages[ar->ar_idx];
97 1.2 ad }
98 1.2 ad
99 1.2 ad /*
100 1.2 ad * uvm_page_array_advance: advance the array to the next cached page
101 1.2 ad */
102 1.2 ad
103 1.2 ad void
104 1.2 ad uvm_page_array_advance(struct uvm_page_array *ar)
105 1.2 ad {
106 1.2 ad
107 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
108 1.2 ad ar->ar_idx++;
109 1.2 ad KASSERT(ar->ar_idx <= ar->ar_npages);
110 1.2 ad }
111 1.2 ad
112 1.2 ad /*
113 1.2 ad * uvm_page_array_fill: lookup pages and keep them cached.
114 1.2 ad *
115 1.2 ad * return 0 on success. in that case, cache the result in the array
116 1.2 ad * so that they will be picked by later uvm_page_array_peek.
117 1.2 ad *
118 1.2 ad * nwant is a number of pages to fetch. a caller should consider it a hint.
119 1.2 ad * nwant == 0 means a caller have no specific idea.
120 1.2 ad *
121 1.2 ad * return ENOENT if no pages are found.
122 1.2 ad *
123 1.2 ad * called with object lock held.
124 1.2 ad */
125 1.2 ad
126 1.2 ad int
127 1.2 ad uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
128 1.2 ad voff_t off, unsigned int nwant, unsigned int flags)
129 1.2 ad {
130 1.2 ad unsigned int npages;
131 1.2 ad #if defined(DEBUG)
132 1.2 ad unsigned int i;
133 1.2 ad #endif /* defined(DEBUG) */
134 1.2 ad unsigned int maxpages = __arraycount(ar->ar_pages);
135 1.2 ad const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
136 1.2 ad const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
137 1.2 ad
138 1.2 ad if (nwant != 0 && nwant < maxpages) {
139 1.2 ad maxpages = nwant;
140 1.2 ad }
141 1.2 ad #if 0 /* called from DDB for "show obj/f" without lock */
142 1.5 ad KASSERT(rw_lock_held(uobj->vmobjlock));
143 1.2 ad #endif
144 1.2 ad KASSERT(uvm_page_array_peek(ar) == NULL);
145 1.2 ad if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
146 1.2 ad unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
147 1.2 ad
148 1.2 ad if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
149 1.2 ad tagmask |= UVM_PAGE_WRITEBACK_TAG;
150 1.2 ad }
151 1.2 ad npages =
152 1.2 ad (backward ? radix_tree_gang_lookup_tagged_node_reverse :
153 1.2 ad radix_tree_gang_lookup_tagged_node)(
154 1.2 ad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
155 1.2 ad maxpages, dense, tagmask);
156 1.3 ad } else {
157 1.2 ad npages =
158 1.2 ad (backward ? radix_tree_gang_lookup_node_reverse :
159 1.2 ad radix_tree_gang_lookup_node)(
160 1.2 ad &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
161 1.2 ad maxpages, dense);
162 1.2 ad }
163 1.2 ad if (npages == 0) {
164 1.2 ad uvm_page_array_clear(ar);
165 1.2 ad return ENOENT;
166 1.2 ad }
167 1.2 ad KASSERT(npages <= maxpages);
168 1.2 ad ar->ar_npages = npages;
169 1.2 ad ar->ar_idx = 0;
170 1.2 ad #if defined(DEBUG)
171 1.2 ad for (i = 0; i < ar->ar_npages; i++) {
172 1.2 ad struct vm_page * const pg = ar->ar_pages[i];
173 1.2 ad
174 1.2 ad KDASSERT(pg != NULL);
175 1.2 ad KDASSERT(pg->uobject == uobj);
176 1.2 ad if (backward) {
177 1.2 ad KDASSERT(pg->offset <= off);
178 1.2 ad KDASSERT(i == 0 ||
179 1.2 ad pg->offset < ar->ar_pages[i - 1]->offset);
180 1.2 ad } else {
181 1.2 ad KDASSERT(pg->offset >= off);
182 1.2 ad KDASSERT(i == 0 ||
183 1.2 ad pg->offset > ar->ar_pages[i - 1]->offset);
184 1.2 ad }
185 1.2 ad }
186 1.2 ad #endif /* defined(DEBUG) */
187 1.2 ad return 0;
188 1.2 ad }
189 1.2 ad
190 1.2 ad /*
191 1.2 ad * uvm_page_array_fill_and_peek:
192 1.2 ad * same as uvm_page_array_peek except that, if the array is empty, try to fill
193 1.2 ad * it first.
194 1.2 ad */
195 1.2 ad
196 1.2 ad struct vm_page *
197 1.2 ad uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
198 1.2 ad voff_t off, unsigned int nwant, unsigned int flags)
199 1.2 ad {
200 1.2 ad struct vm_page *pg;
201 1.2 ad int error;
202 1.2 ad
203 1.2 ad pg = uvm_page_array_peek(a);
204 1.2 ad if (pg != NULL) {
205 1.2 ad return pg;
206 1.2 ad }
207 1.2 ad error = uvm_page_array_fill(a, uobj, off, nwant, flags);
208 1.2 ad if (error != 0) {
209 1.2 ad return NULL;
210 1.2 ad }
211 1.2 ad pg = uvm_page_array_peek(a);
212 1.2 ad KASSERT(pg != NULL);
213 1.2 ad return pg;
214 1.2 ad }
215