uvm_page_array.c revision 1.1.2.1 1 1.1.2.1 yamt /* $NetBSD: uvm_page_array.c,v 1.1.2.1 2011/11/02 21:55:39 yamt Exp $ */
2 1.1.2.1 yamt
3 1.1.2.1 yamt /*-
4 1.1.2.1 yamt * Copyright (c)2011 YAMAMOTO Takashi,
5 1.1.2.1 yamt * All rights reserved.
6 1.1.2.1 yamt *
7 1.1.2.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1.2.1 yamt * modification, are permitted provided that the following conditions
9 1.1.2.1 yamt * are met:
10 1.1.2.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1.2.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1.2.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.2.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1.2.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1.2.1 yamt *
16 1.1.2.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1.2.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1.2.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1.2.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1.2.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1.2.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1.2.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1.2.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1.2.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1.2.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1.2.1 yamt * SUCH DAMAGE.
27 1.1.2.1 yamt */
28 1.1.2.1 yamt
29 1.1.2.1 yamt #include <sys/cdefs.h>
30 1.1.2.1 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.1 2011/11/02 21:55:39 yamt Exp $");
31 1.1.2.1 yamt
32 1.1.2.1 yamt #include <sys/param.h>
33 1.1.2.1 yamt #include <sys/systm.h>
34 1.1.2.1 yamt
35 1.1.2.1 yamt #include <uvm/uvm_extern.h>
36 1.1.2.1 yamt #include <uvm/uvm_object.h>
37 1.1.2.1 yamt #include <uvm/uvm_page.h>
38 1.1.2.1 yamt #include <uvm/uvm_page_array.h>
39 1.1.2.1 yamt
40 1.1.2.1 yamt /*
41 1.1.2.1 yamt * uvm_page_array_init: initialize the array.
42 1.1.2.1 yamt */
43 1.1.2.1 yamt
44 1.1.2.1 yamt void
45 1.1.2.1 yamt uvm_page_array_init(struct uvm_page_array *ar)
46 1.1.2.1 yamt {
47 1.1.2.1 yamt
48 1.1.2.1 yamt ar->ar_idx = ar->ar_npages = 0;
49 1.1.2.1 yamt }
50 1.1.2.1 yamt
51 1.1.2.1 yamt /*
52 1.1.2.1 yamt * uvm_page_array_fini: clean up the array.
53 1.1.2.1 yamt */
54 1.1.2.1 yamt
55 1.1.2.1 yamt void
56 1.1.2.1 yamt uvm_page_array_fini(struct uvm_page_array *ar)
57 1.1.2.1 yamt {
58 1.1.2.1 yamt
59 1.1.2.1 yamt /*
60 1.1.2.1 yamt * currently nothing to do.
61 1.1.2.1 yamt */
62 1.1.2.1 yamt #if defined(DIAGNOSTIC)
63 1.1.2.1 yamt /*
64 1.1.2.1 yamt * poison to trigger assertion in uvm_page_array_peek to
65 1.1.2.1 yamt * detect usage errors.
66 1.1.2.1 yamt */
67 1.1.2.1 yamt ar->ar_npages = 1;
68 1.1.2.1 yamt ar->ar_idx = 1000;
69 1.1.2.1 yamt #endif /* defined(DIAGNOSTIC) */
70 1.1.2.1 yamt }
71 1.1.2.1 yamt
72 1.1.2.1 yamt /*
73 1.1.2.1 yamt * uvm_page_array_clear: forget the cached pages and initialize the array.
74 1.1.2.1 yamt */
75 1.1.2.1 yamt
76 1.1.2.1 yamt void
77 1.1.2.1 yamt uvm_page_array_clear(struct uvm_page_array *ar)
78 1.1.2.1 yamt {
79 1.1.2.1 yamt
80 1.1.2.1 yamt KASSERT(ar->ar_idx <= ar->ar_npages);
81 1.1.2.1 yamt uvm_page_array_init(ar);
82 1.1.2.1 yamt }
83 1.1.2.1 yamt
84 1.1.2.1 yamt /*
85 1.1.2.1 yamt * uvm_page_array_peek: return the next cached page.
86 1.1.2.1 yamt */
87 1.1.2.1 yamt
88 1.1.2.1 yamt struct vm_page *
89 1.1.2.1 yamt uvm_page_array_peek(struct uvm_page_array *ar)
90 1.1.2.1 yamt {
91 1.1.2.1 yamt
92 1.1.2.1 yamt KASSERT(ar->ar_idx <= ar->ar_npages);
93 1.1.2.1 yamt if (ar->ar_idx == ar->ar_npages) {
94 1.1.2.1 yamt return NULL;
95 1.1.2.1 yamt }
96 1.1.2.1 yamt return ar->ar_pages[ar->ar_idx];
97 1.1.2.1 yamt }
98 1.1.2.1 yamt
99 1.1.2.1 yamt /*
100 1.1.2.1 yamt * uvm_page_array_advance: advance the array to the next cached page
101 1.1.2.1 yamt */
102 1.1.2.1 yamt
103 1.1.2.1 yamt void
104 1.1.2.1 yamt uvm_page_array_advance(struct uvm_page_array *ar)
105 1.1.2.1 yamt {
106 1.1.2.1 yamt
107 1.1.2.1 yamt KASSERT(ar->ar_idx <= ar->ar_npages);
108 1.1.2.1 yamt ar->ar_idx++;
109 1.1.2.1 yamt KASSERT(ar->ar_idx <= ar->ar_npages);
110 1.1.2.1 yamt }
111 1.1.2.1 yamt
112 1.1.2.1 yamt /*
113 1.1.2.1 yamt * uvm_page_array_fill: lookup pages and keep them cached.
114 1.1.2.1 yamt *
115 1.1.2.1 yamt * return 0 on success. in that case, cache the result in the array
116 1.1.2.1 yamt * so that they will be picked by later uvm_page_array_peek.
117 1.1.2.1 yamt *
118 1.1.2.1 yamt * return ENOENT if no pages are found.
119 1.1.2.1 yamt *
120 1.1.2.1 yamt * called with object lock held.
121 1.1.2.1 yamt */
122 1.1.2.1 yamt
123 1.1.2.1 yamt int
124 1.1.2.1 yamt uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
125 1.1.2.1 yamt voff_t off, bool dirtyonly)
126 1.1.2.1 yamt {
127 1.1.2.1 yamt unsigned int npages;
128 1.1.2.1 yamt #if defined(DEBUG)
129 1.1.2.1 yamt unsigned int i;
130 1.1.2.1 yamt #endif /* defined(DEBUG) */
131 1.1.2.1 yamt const unsigned int maxpages = __arraycount(ar->ar_pages);
132 1.1.2.1 yamt
133 1.1.2.1 yamt KASSERT(mutex_owned(uobj->vmobjlock));
134 1.1.2.1 yamt KASSERT(uvm_page_array_peek(ar) == NULL);
135 1.1.2.1 yamt if (dirtyonly) {
136 1.1.2.1 yamt npages = radix_tree_gang_lookup_tagged_node(
137 1.1.2.1 yamt &uobj->uo_pages, off >> PAGE_SHIFT,
138 1.1.2.1 yamt (void **)ar->ar_pages, maxpages,
139 1.1.2.1 yamt UVM_PAGE_DIRTY_TAG);
140 1.1.2.1 yamt } else {
141 1.1.2.1 yamt npages = radix_tree_gang_lookup_node(
142 1.1.2.1 yamt &uobj->uo_pages, off >> PAGE_SHIFT,
143 1.1.2.1 yamt (void **)ar->ar_pages, maxpages);
144 1.1.2.1 yamt }
145 1.1.2.1 yamt if (npages == 0) {
146 1.1.2.1 yamt uvm_page_array_clear(ar);
147 1.1.2.1 yamt return ENOENT;
148 1.1.2.1 yamt }
149 1.1.2.1 yamt KASSERT(npages <= maxpages);
150 1.1.2.1 yamt ar->ar_npages = npages;
151 1.1.2.1 yamt ar->ar_idx = 0;
152 1.1.2.1 yamt #if defined(DEBUG)
153 1.1.2.1 yamt for (i = 0; i < ar->ar_npages; i++) {
154 1.1.2.1 yamt struct vm_page * const pg = ar->ar_pages[i];
155 1.1.2.1 yamt
156 1.1.2.1 yamt KASSERT(pg != NULL);
157 1.1.2.1 yamt KASSERT(pg->uobject == uobj);
158 1.1.2.1 yamt KASSERT(pg->offset >= off);
159 1.1.2.1 yamt KASSERT(i == 0 || pg->offset > ar->ar_pages[i - 1]->offset);
160 1.1.2.1 yamt }
161 1.1.2.1 yamt #endif /* defined(DEBUG) */
162 1.1.2.1 yamt return 0;
163 1.1.2.1 yamt }
164