uvm_page_array.c revision 1.5.2.2 1 1.5.2.2 martin /* $NetBSD: uvm_page_array.c,v 1.5.2.2 2020/04/08 14:09:04 martin Exp $ */
2 1.5.2.2 martin
3 1.5.2.2 martin /*-
4 1.5.2.2 martin * Copyright (c)2011 YAMAMOTO Takashi,
5 1.5.2.2 martin * All rights reserved.
6 1.5.2.2 martin *
7 1.5.2.2 martin * Redistribution and use in source and binary forms, with or without
8 1.5.2.2 martin * modification, are permitted provided that the following conditions
9 1.5.2.2 martin * are met:
10 1.5.2.2 martin * 1. Redistributions of source code must retain the above copyright
11 1.5.2.2 martin * notice, this list of conditions and the following disclaimer.
12 1.5.2.2 martin * 2. Redistributions in binary form must reproduce the above copyright
13 1.5.2.2 martin * notice, this list of conditions and the following disclaimer in the
14 1.5.2.2 martin * documentation and/or other materials provided with the distribution.
15 1.5.2.2 martin *
16 1.5.2.2 martin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.5.2.2 martin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.5.2.2 martin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.5.2.2 martin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.5.2.2 martin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.5.2.2 martin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.5.2.2 martin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.5.2.2 martin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.5.2.2 martin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.5.2.2 martin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.5.2.2 martin * SUCH DAMAGE.
27 1.5.2.2 martin */
28 1.5.2.2 martin
29 1.5.2.2 martin #include <sys/cdefs.h>
30 1.5.2.2 martin __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.5.2.2 2020/04/08 14:09:04 martin Exp $");
31 1.5.2.2 martin
32 1.5.2.2 martin #include <sys/param.h>
33 1.5.2.2 martin #include <sys/systm.h>
34 1.5.2.2 martin
35 1.5.2.2 martin #include <uvm/uvm_extern.h>
36 1.5.2.2 martin #include <uvm/uvm_object.h>
37 1.5.2.2 martin #include <uvm/uvm_page.h>
38 1.5.2.2 martin #include <uvm/uvm_page_array.h>
39 1.5.2.2 martin
40 1.5.2.2 martin /*
41 1.5.2.2 martin * uvm_page_array_init: initialize the array.
42 1.5.2.2 martin */
43 1.5.2.2 martin
44 1.5.2.2 martin void
45 1.5.2.2 martin uvm_page_array_init(struct uvm_page_array *ar)
46 1.5.2.2 martin {
47 1.5.2.2 martin
48 1.5.2.2 martin ar->ar_idx = ar->ar_npages = 0;
49 1.5.2.2 martin }
50 1.5.2.2 martin
51 1.5.2.2 martin /*
52 1.5.2.2 martin * uvm_page_array_fini: clean up the array.
53 1.5.2.2 martin */
54 1.5.2.2 martin
55 1.5.2.2 martin void
56 1.5.2.2 martin uvm_page_array_fini(struct uvm_page_array *ar)
57 1.5.2.2 martin {
58 1.5.2.2 martin
59 1.5.2.2 martin /*
60 1.5.2.2 martin * currently nothing to do.
61 1.5.2.2 martin */
62 1.5.2.2 martin #if defined(DIAGNOSTIC)
63 1.5.2.2 martin /*
64 1.5.2.2 martin * poison to trigger assertion in uvm_page_array_peek to
65 1.5.2.2 martin * detect usage errors.
66 1.5.2.2 martin */
67 1.5.2.2 martin ar->ar_npages = 1;
68 1.5.2.2 martin ar->ar_idx = 1000;
69 1.5.2.2 martin #endif /* defined(DIAGNOSTIC) */
70 1.5.2.2 martin }
71 1.5.2.2 martin
72 1.5.2.2 martin /*
73 1.5.2.2 martin * uvm_page_array_clear: forget the cached pages and initialize the array.
74 1.5.2.2 martin */
75 1.5.2.2 martin
76 1.5.2.2 martin void
77 1.5.2.2 martin uvm_page_array_clear(struct uvm_page_array *ar)
78 1.5.2.2 martin {
79 1.5.2.2 martin
80 1.5.2.2 martin KASSERT(ar->ar_idx <= ar->ar_npages);
81 1.5.2.2 martin uvm_page_array_init(ar);
82 1.5.2.2 martin }
83 1.5.2.2 martin
84 1.5.2.2 martin /*
85 1.5.2.2 martin * uvm_page_array_peek: return the next cached page.
86 1.5.2.2 martin */
87 1.5.2.2 martin
88 1.5.2.2 martin struct vm_page *
89 1.5.2.2 martin uvm_page_array_peek(struct uvm_page_array *ar)
90 1.5.2.2 martin {
91 1.5.2.2 martin
92 1.5.2.2 martin KASSERT(ar->ar_idx <= ar->ar_npages);
93 1.5.2.2 martin if (ar->ar_idx == ar->ar_npages) {
94 1.5.2.2 martin return NULL;
95 1.5.2.2 martin }
96 1.5.2.2 martin return ar->ar_pages[ar->ar_idx];
97 1.5.2.2 martin }
98 1.5.2.2 martin
99 1.5.2.2 martin /*
100 1.5.2.2 martin * uvm_page_array_advance: advance the array to the next cached page
101 1.5.2.2 martin */
102 1.5.2.2 martin
103 1.5.2.2 martin void
104 1.5.2.2 martin uvm_page_array_advance(struct uvm_page_array *ar)
105 1.5.2.2 martin {
106 1.5.2.2 martin
107 1.5.2.2 martin KASSERT(ar->ar_idx <= ar->ar_npages);
108 1.5.2.2 martin ar->ar_idx++;
109 1.5.2.2 martin KASSERT(ar->ar_idx <= ar->ar_npages);
110 1.5.2.2 martin }
111 1.5.2.2 martin
112 1.5.2.2 martin /*
113 1.5.2.2 martin * uvm_page_array_fill: lookup pages and keep them cached.
114 1.5.2.2 martin *
115 1.5.2.2 martin * return 0 on success. in that case, cache the result in the array
116 1.5.2.2 martin * so that they will be picked by later uvm_page_array_peek.
117 1.5.2.2 martin *
118 1.5.2.2 martin * nwant is a number of pages to fetch. a caller should consider it a hint.
119 1.5.2.2 martin * nwant == 0 means a caller have no specific idea.
120 1.5.2.2 martin *
121 1.5.2.2 martin * return ENOENT if no pages are found.
122 1.5.2.2 martin *
123 1.5.2.2 martin * called with object lock held.
124 1.5.2.2 martin */
125 1.5.2.2 martin
126 1.5.2.2 martin int
127 1.5.2.2 martin uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
128 1.5.2.2 martin voff_t off, unsigned int nwant, unsigned int flags)
129 1.5.2.2 martin {
130 1.5.2.2 martin unsigned int npages;
131 1.5.2.2 martin #if defined(DEBUG)
132 1.5.2.2 martin unsigned int i;
133 1.5.2.2 martin #endif /* defined(DEBUG) */
134 1.5.2.2 martin unsigned int maxpages = __arraycount(ar->ar_pages);
135 1.5.2.2 martin const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
136 1.5.2.2 martin const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
137 1.5.2.2 martin
138 1.5.2.2 martin if (nwant != 0 && nwant < maxpages) {
139 1.5.2.2 martin maxpages = nwant;
140 1.5.2.2 martin }
141 1.5.2.2 martin #if 0 /* called from DDB for "show obj/f" without lock */
142 1.5.2.2 martin KASSERT(rw_lock_held(uobj->vmobjlock));
143 1.5.2.2 martin #endif
144 1.5.2.2 martin KASSERT(uvm_page_array_peek(ar) == NULL);
145 1.5.2.2 martin if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
146 1.5.2.2 martin unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
147 1.5.2.2 martin
148 1.5.2.2 martin if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
149 1.5.2.2 martin tagmask |= UVM_PAGE_WRITEBACK_TAG;
150 1.5.2.2 martin }
151 1.5.2.2 martin npages =
152 1.5.2.2 martin (backward ? radix_tree_gang_lookup_tagged_node_reverse :
153 1.5.2.2 martin radix_tree_gang_lookup_tagged_node)(
154 1.5.2.2 martin &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
155 1.5.2.2 martin maxpages, dense, tagmask);
156 1.5.2.2 martin } else {
157 1.5.2.2 martin npages =
158 1.5.2.2 martin (backward ? radix_tree_gang_lookup_node_reverse :
159 1.5.2.2 martin radix_tree_gang_lookup_node)(
160 1.5.2.2 martin &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
161 1.5.2.2 martin maxpages, dense);
162 1.5.2.2 martin }
163 1.5.2.2 martin if (npages == 0) {
164 1.5.2.2 martin uvm_page_array_clear(ar);
165 1.5.2.2 martin return ENOENT;
166 1.5.2.2 martin }
167 1.5.2.2 martin KASSERT(npages <= maxpages);
168 1.5.2.2 martin ar->ar_npages = npages;
169 1.5.2.2 martin ar->ar_idx = 0;
170 1.5.2.2 martin #if defined(DEBUG)
171 1.5.2.2 martin for (i = 0; i < ar->ar_npages; i++) {
172 1.5.2.2 martin struct vm_page * const pg = ar->ar_pages[i];
173 1.5.2.2 martin
174 1.5.2.2 martin KDASSERT(pg != NULL);
175 1.5.2.2 martin KDASSERT(pg->uobject == uobj);
176 1.5.2.2 martin if (backward) {
177 1.5.2.2 martin KDASSERT(pg->offset <= off);
178 1.5.2.2 martin KDASSERT(i == 0 ||
179 1.5.2.2 martin pg->offset < ar->ar_pages[i - 1]->offset);
180 1.5.2.2 martin } else {
181 1.5.2.2 martin KDASSERT(pg->offset >= off);
182 1.5.2.2 martin KDASSERT(i == 0 ||
183 1.5.2.2 martin pg->offset > ar->ar_pages[i - 1]->offset);
184 1.5.2.2 martin }
185 1.5.2.2 martin }
186 1.5.2.2 martin #endif /* defined(DEBUG) */
187 1.5.2.2 martin return 0;
188 1.5.2.2 martin }
189 1.5.2.2 martin
190 1.5.2.2 martin /*
191 1.5.2.2 martin * uvm_page_array_fill_and_peek:
192 1.5.2.2 martin * same as uvm_page_array_peek except that, if the array is empty, try to fill
193 1.5.2.2 martin * it first.
194 1.5.2.2 martin */
195 1.5.2.2 martin
196 1.5.2.2 martin struct vm_page *
197 1.5.2.2 martin uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
198 1.5.2.2 martin voff_t off, unsigned int nwant, unsigned int flags)
199 1.5.2.2 martin {
200 1.5.2.2 martin struct vm_page *pg;
201 1.5.2.2 martin int error;
202 1.5.2.2 martin
203 1.5.2.2 martin pg = uvm_page_array_peek(a);
204 1.5.2.2 martin if (pg != NULL) {
205 1.5.2.2 martin return pg;
206 1.5.2.2 martin }
207 1.5.2.2 martin error = uvm_page_array_fill(a, uobj, off, nwant, flags);
208 1.5.2.2 martin if (error != 0) {
209 1.5.2.2 martin return NULL;
210 1.5.2.2 martin }
211 1.5.2.2 martin pg = uvm_page_array_peek(a);
212 1.5.2.2 martin KASSERT(pg != NULL);
213 1.5.2.2 martin return pg;
214 1.5.2.2 martin }
215