uvm_bio.c revision 1.126 1 1.126 simonb /* $NetBSD: uvm_bio.c,v 1.126 2021/04/01 06:26:26 simonb Exp $ */
2 1.2 chs
3 1.13 chs /*
4 1.2 chs * Copyright (c) 1998 Chuck Silvers.
5 1.2 chs * All rights reserved.
6 1.2 chs *
7 1.2 chs * Redistribution and use in source and binary forms, with or without
8 1.2 chs * modification, are permitted provided that the following conditions
9 1.2 chs * are met:
10 1.2 chs * 1. Redistributions of source code must retain the above copyright
11 1.2 chs * notice, this list of conditions and the following disclaimer.
12 1.2 chs * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 chs * notice, this list of conditions and the following disclaimer in the
14 1.2 chs * documentation and/or other materials provided with the distribution.
15 1.2 chs * 3. The name of the author may not be used to endorse or promote products
16 1.2 chs * derived from this software without specific prior written permission.
17 1.2 chs *
18 1.2 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.2 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.2 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.2 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.2 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.2 chs * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.2 chs * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.2 chs * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.2 chs * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.2 chs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.2 chs * SUCH DAMAGE.
29 1.2 chs *
30 1.2 chs */
31 1.2 chs
32 1.2 chs /*
33 1.33 chs * uvm_bio.c: buffered i/o object mapping cache
34 1.2 chs */
35 1.2 chs
36 1.21 lukem #include <sys/cdefs.h>
37 1.126 simonb __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.126 2021/04/01 06:26:26 simonb Exp $");
38 1.21 lukem
39 1.21 lukem #include "opt_uvmhist.h"
40 1.50 yamt #include "opt_ubc.h"
41 1.2 chs
42 1.2 chs #include <sys/param.h>
43 1.2 chs #include <sys/systm.h>
44 1.65 ad #include <sys/kmem.h>
45 1.2 chs #include <sys/kernel.h>
46 1.60 ad #include <sys/proc.h>
47 1.126 simonb #include <sys/sysctl.h>
48 1.67 pooka #include <sys/vnode.h>
49 1.118 jdolecek #include <sys/bitops.h> /* for ilog2() */
50 1.2 chs
51 1.2 chs #include <uvm/uvm.h>
52 1.115 ad #include <uvm/uvm_pdpolicy.h>
53 1.2 chs
54 1.95 jdolecek #ifdef PMAP_DIRECT
55 1.95 jdolecek # define UBC_USE_PMAP_DIRECT
56 1.95 jdolecek #endif
57 1.2 chs
58 1.2 chs /*
59 1.2 chs * local functions
60 1.2 chs */
61 1.2 chs
62 1.39 thorpej static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
63 1.44 drochner int, int, vm_prot_t, int);
64 1.39 thorpej static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
65 1.126 simonb static int ubchash_stats(struct hashstat_sysctl *hs, bool fill);
66 1.95 jdolecek #ifdef UBC_USE_PMAP_DIRECT
67 1.95 jdolecek static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
68 1.95 jdolecek int, int);
69 1.95 jdolecek static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
70 1.95 jdolecek
71 1.113 thorpej /* XXX disabled by default until the kinks are worked out. */
72 1.113 thorpej bool ubc_direct = false;
73 1.95 jdolecek #endif
74 1.2 chs
75 1.2 chs /*
76 1.2 chs * local data structues
77 1.2 chs */
78 1.2 chs
79 1.18 chs #define UBC_HASH(uobj, offset) \
80 1.18 chs (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
81 1.2 chs ubc_object.hashmask)
82 1.2 chs
83 1.18 chs #define UBC_QUEUE(offset) \
84 1.18 chs (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
85 1.18 chs (UBC_NQUEUES - 1)])
86 1.18 chs
87 1.18 chs #define UBC_UMAP_ADDR(u) \
88 1.18 chs (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
89 1.18 chs
90 1.18 chs
91 1.18 chs #define UMAP_PAGES_LOCKED 0x0001
92 1.18 chs #define UMAP_MAPPING_CACHED 0x0002
93 1.2 chs
94 1.73 rmind struct ubc_map {
95 1.2 chs struct uvm_object * uobj; /* mapped object */
96 1.2 chs voff_t offset; /* offset into uobj */
97 1.33 chs voff_t writeoff; /* write offset */
98 1.33 chs vsize_t writelen; /* write len */
99 1.18 chs int refcount; /* refcount on mapping */
100 1.18 chs int flags; /* extra state */
101 1.42 yamt int advice;
102 1.2 chs
103 1.2 chs LIST_ENTRY(ubc_map) hash; /* hash table */
104 1.2 chs TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
105 1.73 rmind LIST_ENTRY(ubc_map) list; /* per-object list */
106 1.2 chs };
107 1.2 chs
108 1.82 matt TAILQ_HEAD(ubc_inactive_head, ubc_map);
109 1.73 rmind static struct ubc_object {
110 1.2 chs struct uvm_object uobj; /* glue for uvm_map() */
111 1.2 chs char *kva; /* where ubc_object is mapped */
112 1.2 chs struct ubc_map *umap; /* array of ubc_map's */
113 1.2 chs
114 1.2 chs LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
115 1.2 chs u_long hashmask; /* mask for hashtable */
116 1.2 chs
117 1.82 matt struct ubc_inactive_head *inactive;
118 1.2 chs /* inactive queues for ubc_map's */
119 1.2 chs } ubc_object;
120 1.2 chs
121 1.63 yamt const struct uvm_pagerops ubc_pager = {
122 1.48 christos .pgo_fault = ubc_fault,
123 1.2 chs /* ... rest are NULL */
124 1.2 chs };
125 1.2 chs
126 1.118 jdolecek /* Use value at least as big as maximum page size supported by architecture */
127 1.118 jdolecek #define UBC_MAX_WINSHIFT \
128 1.118 jdolecek ((1 << UBC_WINSHIFT) > MAX_PAGE_SIZE ? UBC_WINSHIFT : ilog2(MAX_PAGE_SIZE))
129 1.118 jdolecek
130 1.2 chs int ubc_nwins = UBC_NWINS;
131 1.118 jdolecek const int ubc_winshift = UBC_MAX_WINSHIFT;
132 1.118 jdolecek const int ubc_winsize = 1 << UBC_MAX_WINSHIFT;
133 1.27 thorpej #if defined(PMAP_PREFER)
134 1.2 chs int ubc_nqueues;
135 1.2 chs #define UBC_NQUEUES ubc_nqueues
136 1.2 chs #else
137 1.2 chs #define UBC_NQUEUES 1
138 1.2 chs #endif
139 1.2 chs
140 1.50 yamt #if defined(UBC_STATS)
141 1.50 yamt
142 1.50 yamt #define UBC_EVCNT_DEFINE(name) \
143 1.50 yamt struct evcnt ubc_evcnt_##name = \
144 1.50 yamt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
145 1.50 yamt EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
146 1.50 yamt #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
147 1.50 yamt
148 1.50 yamt #else /* defined(UBC_STATS) */
149 1.50 yamt
150 1.50 yamt #define UBC_EVCNT_DEFINE(name) /* nothing */
151 1.50 yamt #define UBC_EVCNT_INCR(name) /* nothing */
152 1.50 yamt
153 1.50 yamt #endif /* defined(UBC_STATS) */
154 1.50 yamt
155 1.50 yamt UBC_EVCNT_DEFINE(wincachehit)
156 1.50 yamt UBC_EVCNT_DEFINE(wincachemiss)
157 1.57 yamt UBC_EVCNT_DEFINE(faultbusy)
158 1.50 yamt
159 1.2 chs /*
160 1.2 chs * ubc_init
161 1.2 chs *
162 1.2 chs * init pager private data structures.
163 1.2 chs */
164 1.2 chs
165 1.2 chs void
166 1.2 chs ubc_init(void)
167 1.2 chs {
168 1.15 simonb /*
169 1.15 simonb * Make sure ubc_winshift is sane.
170 1.15 simonb */
171 1.118 jdolecek KASSERT(ubc_winshift >= PAGE_SHIFT);
172 1.2 chs
173 1.2 chs /*
174 1.2 chs * init ubc_object.
175 1.2 chs * alloc and init ubc_map's.
176 1.2 chs * init inactive queues.
177 1.2 chs * alloc and init hashtable.
178 1.2 chs * map in ubc_object.
179 1.2 chs */
180 1.2 chs
181 1.73 rmind uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
182 1.2 chs
183 1.65 ad ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
184 1.65 ad KM_SLEEP);
185 1.7 enami if (ubc_object.umap == NULL)
186 1.7 enami panic("ubc_init: failed to allocate ubc_map");
187 1.2 chs
188 1.95 jdolecek vaddr_t va = (vaddr_t)1L;
189 1.2 chs #ifdef PMAP_PREFER
190 1.36 atatat PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
191 1.11 chs ubc_nqueues = va >> ubc_winshift;
192 1.11 chs if (ubc_nqueues == 0) {
193 1.11 chs ubc_nqueues = 1;
194 1.2 chs }
195 1.2 chs #endif
196 1.65 ad ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
197 1.65 ad sizeof(struct ubc_inactive_head), KM_SLEEP);
198 1.95 jdolecek for (int i = 0; i < UBC_NQUEUES; i++) {
199 1.2 chs TAILQ_INIT(&ubc_object.inactive[i]);
200 1.2 chs }
201 1.95 jdolecek for (int i = 0; i < ubc_nwins; i++) {
202 1.95 jdolecek struct ubc_map *umap;
203 1.2 chs umap = &ubc_object.umap[i];
204 1.2 chs TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
205 1.2 chs umap, inactive);
206 1.2 chs }
207 1.2 chs
208 1.65 ad ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
209 1.65 ad &ubc_object.hashmask);
210 1.95 jdolecek for (int i = 0; i <= ubc_object.hashmask; i++) {
211 1.2 chs LIST_INIT(&ubc_object.hash[i]);
212 1.2 chs }
213 1.2 chs
214 1.2 chs if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
215 1.11 chs ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
216 1.92 maxv UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
217 1.9 chs UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
218 1.26 provos panic("ubc_init: failed to map ubc_object");
219 1.2 chs }
220 1.126 simonb
221 1.126 simonb hashstat_register("ubchash", ubchash_stats);
222 1.81 riastrad }
223 1.81 riastrad
224 1.81 riastrad void
225 1.81 riastrad ubchist_init(void)
226 1.81 riastrad {
227 1.81 riastrad
228 1.2 chs UVMHIST_INIT(ubchist, 300);
229 1.2 chs }
230 1.2 chs
231 1.2 chs /*
232 1.69 rmind * ubc_fault_page: helper of ubc_fault to handle a single page.
233 1.70 rmind *
234 1.70 rmind * => Caller has UVM object locked.
235 1.73 rmind * => Caller will perform pmap_update().
236 1.69 rmind */
237 1.69 rmind
238 1.70 rmind static inline int
239 1.69 rmind ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
240 1.69 rmind struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
241 1.69 rmind {
242 1.123 rin vm_prot_t mask;
243 1.69 rmind int error;
244 1.123 rin bool rdonly;
245 1.69 rmind
246 1.104 ad KASSERT(rw_write_held(pg->uobject->vmobjlock));
247 1.70 rmind
248 1.69 rmind KASSERT((pg->flags & PG_FAKE) == 0);
249 1.69 rmind if (pg->flags & PG_RELEASED) {
250 1.69 rmind uvm_pagefree(pg);
251 1.70 rmind return 0;
252 1.69 rmind }
253 1.69 rmind if (pg->loan_count != 0) {
254 1.69 rmind
255 1.69 rmind /*
256 1.69 rmind * Avoid unneeded loan break, if possible.
257 1.69 rmind */
258 1.69 rmind
259 1.69 rmind if ((access_type & VM_PROT_WRITE) == 0) {
260 1.69 rmind prot &= ~VM_PROT_WRITE;
261 1.69 rmind }
262 1.69 rmind if (prot & VM_PROT_WRITE) {
263 1.69 rmind struct vm_page *newpg;
264 1.69 rmind
265 1.69 rmind newpg = uvm_loanbreak(pg);
266 1.69 rmind if (newpg == NULL) {
267 1.69 rmind uvm_page_unbusy(&pg, 1);
268 1.70 rmind return ENOMEM;
269 1.69 rmind }
270 1.69 rmind pg = newpg;
271 1.69 rmind }
272 1.69 rmind }
273 1.69 rmind
274 1.69 rmind /*
275 1.69 rmind * Note that a page whose backing store is partially allocated
276 1.69 rmind * is marked as PG_RDONLY.
277 1.103 ad *
278 1.103 ad * it's a responsibility of ubc_alloc's caller to allocate backing
279 1.103 ad * blocks before writing to the window.
280 1.69 rmind */
281 1.69 rmind
282 1.69 rmind KASSERT((pg->flags & PG_RDONLY) == 0 ||
283 1.69 rmind (access_type & VM_PROT_WRITE) == 0 ||
284 1.69 rmind pg->offset < umap->writeoff ||
285 1.69 rmind pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
286 1.69 rmind
287 1.123 rin rdonly = uvm_pagereadonly_p(pg);
288 1.123 rin mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
289 1.69 rmind
290 1.69 rmind error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
291 1.123 rin prot & mask, PMAP_CANFAIL | (access_type & mask));
292 1.69 rmind
293 1.102 ad uvm_pagelock(pg);
294 1.69 rmind uvm_pageactivate(pg);
295 1.106 ad uvm_pagewakeup(pg);
296 1.102 ad uvm_pageunlock(pg);
297 1.106 ad pg->flags &= ~PG_BUSY;
298 1.106 ad UVM_PAGE_OWN(pg, NULL);
299 1.69 rmind
300 1.70 rmind return error;
301 1.69 rmind }
302 1.69 rmind
303 1.69 rmind /*
304 1.2 chs * ubc_fault: fault routine for ubc mapping
305 1.2 chs */
306 1.18 chs
307 1.39 thorpej static int
308 1.54 yamt ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
309 1.54 yamt int ign3, int ign4, vm_prot_t access_type, int flags)
310 1.2 chs {
311 1.2 chs struct uvm_object *uobj;
312 1.2 chs struct ubc_map *umap;
313 1.2 chs vaddr_t va, eva, ubc_offset, slot_offset;
314 1.118 jdolecek struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
315 1.18 chs int i, error, npages;
316 1.23 chs vm_prot_t prot;
317 1.69 rmind
318 1.120 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
319 1.2 chs
320 1.2 chs /*
321 1.2 chs * no need to try with PGO_LOCKED...
322 1.2 chs * we don't need to have the map locked since we know that
323 1.2 chs * no one will mess with it until our reference is released.
324 1.2 chs */
325 1.18 chs
326 1.2 chs if (flags & PGO_LOCKED) {
327 1.73 rmind uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
328 1.2 chs flags &= ~PGO_LOCKED;
329 1.2 chs }
330 1.2 chs
331 1.2 chs va = ufi->orig_rvaddr;
332 1.2 chs ubc_offset = va - (vaddr_t)ubc_object.kva;
333 1.11 chs umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
334 1.2 chs KASSERT(umap->refcount != 0);
335 1.53 yamt KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
336 1.18 chs slot_offset = ubc_offset & (ubc_winsize - 1);
337 1.2 chs
338 1.34 chs /*
339 1.34 chs * some platforms cannot write to individual bytes atomically, so
340 1.34 chs * software has to do read/modify/write of larger quantities instead.
341 1.34 chs * this means that the access_type for "write" operations
342 1.34 chs * can be VM_PROT_READ, which confuses us mightily.
343 1.37 perry *
344 1.34 chs * deal with this by resetting access_type based on the info
345 1.34 chs * that ubc_alloc() stores for us.
346 1.34 chs */
347 1.34 chs
348 1.34 chs access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
349 1.125 skrll UVMHIST_LOG(ubchist, "va %#jx ubc_offset %#jx access_type %jd",
350 1.34 chs va, ubc_offset, access_type, 0);
351 1.34 chs
352 1.33 chs if ((access_type & VM_PROT_WRITE) != 0) {
353 1.87 kre #ifndef PRIxOFF /* XXX */
354 1.88 kre #define PRIxOFF "jx" /* XXX */
355 1.87 kre #endif /* XXX */
356 1.84 riastrad KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
357 1.87 kre "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
358 1.88 kre slot_offset, (intmax_t)umap->writeoff);
359 1.84 riastrad KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
360 1.87 kre "out of range write: slot=%#"PRIxVADDR
361 1.87 kre " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
362 1.89 ozaki slot_offset, (intmax_t)umap->writeoff, umap->writelen);
363 1.33 chs }
364 1.33 chs
365 1.2 chs /* no umap locking needed since we have a ref on the umap */
366 1.2 chs uobj = umap->uobj;
367 1.2 chs
368 1.33 chs if ((access_type & VM_PROT_WRITE) == 0) {
369 1.33 chs npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
370 1.33 chs } else {
371 1.33 chs npages = (round_page(umap->offset + umap->writeoff +
372 1.33 chs umap->writelen) - (umap->offset + slot_offset))
373 1.33 chs >> PAGE_SHIFT;
374 1.33 chs flags |= PGO_PASTEOF;
375 1.33 chs }
376 1.2 chs
377 1.2 chs again:
378 1.2 chs memset(pgs, 0, sizeof (pgs));
379 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
380 1.2 chs
381 1.125 skrll UVMHIST_LOG(ubchist, "slot_offset %#jx writeoff %#jx writelen %#jx ",
382 1.33 chs slot_offset, umap->writeoff, umap->writelen, 0);
383 1.125 skrll UVMHIST_LOG(ubchist, "getpages uobj %#jx offset %#jx npages %jd",
384 1.91 pgoyette (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
385 1.2 chs
386 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
387 1.42 yamt &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
388 1.41 yamt PGO_NOTIMESTAMP);
389 1.91 pgoyette UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
390 1.24 simonb 0);
391 1.2 chs
392 1.5 chs if (error == EAGAIN) {
393 1.70 rmind kpause("ubc_fault", false, hz >> 2, NULL);
394 1.2 chs goto again;
395 1.2 chs }
396 1.5 chs if (error) {
397 1.10 chs return error;
398 1.5 chs }
399 1.2 chs
400 1.69 rmind /*
401 1.69 rmind * For virtually-indexed, virtually-tagged caches we should avoid
402 1.69 rmind * creating writable mappings when we do not absolutely need them,
403 1.69 rmind * since the "compatible alias" trick does not work on such caches.
404 1.69 rmind * Otherwise, we can always map the pages writable.
405 1.69 rmind */
406 1.69 rmind
407 1.69 rmind #ifdef PMAP_CACHE_VIVT
408 1.69 rmind prot = VM_PROT_READ | access_type;
409 1.69 rmind #else
410 1.69 rmind prot = VM_PROT_READ | VM_PROT_WRITE;
411 1.69 rmind #endif
412 1.70 rmind
413 1.2 chs va = ufi->orig_rvaddr;
414 1.2 chs eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
415 1.2 chs
416 1.125 skrll UVMHIST_LOG(ubchist, "va %#jx eva %#jx", va, eva, 0, 0);
417 1.73 rmind
418 1.73 rmind /*
419 1.73 rmind * Note: normally all returned pages would have the same UVM object.
420 1.73 rmind * However, layered file-systems and e.g. tmpfs, may return pages
421 1.73 rmind * which belong to underlying UVM object. In such case, lock is
422 1.73 rmind * shared amongst the objects.
423 1.73 rmind */
424 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
425 1.28 yamt for (i = 0; va < eva; i++, va += PAGE_SIZE) {
426 1.69 rmind struct vm_page *pg;
427 1.34 chs
428 1.91 pgoyette UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
429 1.91 pgoyette 0, 0);
430 1.2 chs pg = pgs[i];
431 1.2 chs
432 1.2 chs if (pg == NULL || pg == PGO_DONTCARE) {
433 1.2 chs continue;
434 1.2 chs }
435 1.73 rmind KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
436 1.70 rmind error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
437 1.70 rmind if (error) {
438 1.70 rmind /*
439 1.70 rmind * Flush (there might be pages entered), drop the lock,
440 1.73 rmind * and perform uvm_wait(). Note: page will re-fault.
441 1.70 rmind */
442 1.70 rmind pmap_update(ufi->orig_map->pmap);
443 1.104 ad rw_exit(uobj->vmobjlock);
444 1.70 rmind uvm_wait("ubc_fault");
445 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
446 1.70 rmind }
447 1.70 rmind }
448 1.73 rmind /* Must make VA visible before the unlock. */
449 1.73 rmind pmap_update(ufi->orig_map->pmap);
450 1.104 ad rw_exit(uobj->vmobjlock);
451 1.73 rmind
452 1.8 chs return 0;
453 1.2 chs }
454 1.2 chs
455 1.2 chs /*
456 1.2 chs * local functions
457 1.2 chs */
458 1.2 chs
459 1.39 thorpej static struct ubc_map *
460 1.39 thorpej ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
461 1.2 chs {
462 1.2 chs struct ubc_map *umap;
463 1.2 chs
464 1.2 chs LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
465 1.2 chs if (umap->uobj == uobj && umap->offset == offset) {
466 1.2 chs return umap;
467 1.2 chs }
468 1.2 chs }
469 1.2 chs return NULL;
470 1.2 chs }
471 1.2 chs
472 1.2 chs
473 1.2 chs /*
474 1.2 chs * ubc interface functions
475 1.2 chs */
476 1.2 chs
477 1.2 chs /*
478 1.18 chs * ubc_alloc: allocate a file mapping window
479 1.2 chs */
480 1.18 chs
481 1.94 jdolecek static void * __noinline
482 1.42 yamt ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
483 1.115 ad int flags, struct vm_page **pgs, int *npagesp)
484 1.2 chs {
485 1.6 chs vaddr_t slot_offset, va;
486 1.2 chs struct ubc_map *umap;
487 1.6 chs voff_t umap_offset;
488 1.18 chs int error;
489 1.120 skrll UVMHIST_FUNC(__func__);
490 1.125 skrll UVMHIST_CALLARGS(ubchist, "uobj %#jx offset %#jx len %#jx",
491 1.91 pgoyette (uintptr_t)uobj, offset, *lenp, 0);
492 1.2 chs
493 1.34 chs KASSERT(*lenp > 0);
494 1.6 chs umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
495 1.4 enami slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
496 1.18 chs *lenp = MIN(*lenp, ubc_winsize - slot_offset);
497 1.115 ad KASSERT(*lenp > 0);
498 1.2 chs
499 1.104 ad rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
500 1.76 rmind again:
501 1.2 chs /*
502 1.76 rmind * The UVM object is already referenced.
503 1.73 rmind * Lock order: UBC object -> ubc_map::uobj.
504 1.2 chs */
505 1.2 chs umap = ubc_find_mapping(uobj, umap_offset);
506 1.2 chs if (umap == NULL) {
507 1.73 rmind struct uvm_object *oobj;
508 1.73 rmind
509 1.50 yamt UBC_EVCNT_INCR(wincachemiss);
510 1.2 chs umap = TAILQ_FIRST(UBC_QUEUE(offset));
511 1.2 chs if (umap == NULL) {
512 1.104 ad rw_exit(ubc_object.uobj.vmobjlock);
513 1.104 ad kpause("ubc_alloc", false, hz >> 2, NULL);
514 1.104 ad rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
515 1.2 chs goto again;
516 1.2 chs }
517 1.2 chs
518 1.73 rmind va = UBC_UMAP_ADDR(umap);
519 1.73 rmind oobj = umap->uobj;
520 1.73 rmind
521 1.2 chs /*
522 1.76 rmind * Remove from old hash (if any), add to new hash.
523 1.2 chs */
524 1.2 chs
525 1.73 rmind if (oobj != NULL) {
526 1.76 rmind /*
527 1.76 rmind * Mapping must be removed before the list entry,
528 1.76 rmind * since there is a race with ubc_purge().
529 1.76 rmind */
530 1.73 rmind if (umap->flags & UMAP_MAPPING_CACHED) {
531 1.73 rmind umap->flags &= ~UMAP_MAPPING_CACHED;
532 1.104 ad rw_enter(oobj->vmobjlock, RW_WRITER);
533 1.73 rmind pmap_remove(pmap_kernel(), va,
534 1.73 rmind va + ubc_winsize);
535 1.73 rmind pmap_update(pmap_kernel());
536 1.104 ad rw_exit(oobj->vmobjlock);
537 1.73 rmind }
538 1.75 hannken LIST_REMOVE(umap, hash);
539 1.75 hannken LIST_REMOVE(umap, list);
540 1.73 rmind } else {
541 1.73 rmind KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
542 1.2 chs }
543 1.2 chs umap->uobj = uobj;
544 1.2 chs umap->offset = umap_offset;
545 1.2 chs LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
546 1.18 chs umap, hash);
547 1.73 rmind LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
548 1.18 chs } else {
549 1.50 yamt UBC_EVCNT_INCR(wincachehit);
550 1.18 chs va = UBC_UMAP_ADDR(umap);
551 1.2 chs }
552 1.2 chs
553 1.2 chs if (umap->refcount == 0) {
554 1.2 chs TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
555 1.2 chs }
556 1.2 chs
557 1.2 chs if (flags & UBC_WRITE) {
558 1.73 rmind KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
559 1.79 jym "ubc_alloc: concurrent writes to uobj %p", uobj);
560 1.2 chs umap->writeoff = slot_offset;
561 1.2 chs umap->writelen = *lenp;
562 1.2 chs }
563 1.2 chs
564 1.2 chs umap->refcount++;
565 1.42 yamt umap->advice = advice;
566 1.104 ad rw_exit(ubc_object.uobj.vmobjlock);
567 1.125 skrll UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx",
568 1.91 pgoyette (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
569 1.18 chs
570 1.18 chs if (flags & UBC_FAULTBUSY) {
571 1.115 ad int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +
572 1.115 ad PAGE_SIZE - 1) >> PAGE_SHIFT;
573 1.40 yamt int gpflags =
574 1.41 yamt PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
575 1.41 yamt PGO_NOTIMESTAMP;
576 1.18 chs int i;
577 1.30 dbj KDASSERT(flags & UBC_WRITE);
578 1.115 ad KASSERT(npages <= *npagesp);
579 1.57 yamt KASSERT(umap->refcount == 1);
580 1.2 chs
581 1.57 yamt UBC_EVCNT_INCR(faultbusy);
582 1.73 rmind again_faultbusy:
583 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
584 1.18 chs if (umap->flags & UMAP_MAPPING_CACHED) {
585 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
586 1.18 chs pmap_remove(pmap_kernel(), va, va + ubc_winsize);
587 1.18 chs }
588 1.115 ad memset(pgs, 0, *npagesp * sizeof(pgs[0]));
589 1.73 rmind
590 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
591 1.42 yamt &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
592 1.91 pgoyette UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
593 1.18 chs if (error) {
594 1.83 rmind /*
595 1.83 rmind * Flush: the mapping above might have been removed.
596 1.83 rmind */
597 1.83 rmind pmap_update(pmap_kernel());
598 1.18 chs goto out;
599 1.18 chs }
600 1.18 chs for (i = 0; i < npages; i++) {
601 1.59 yamt struct vm_page *pg = pgs[i];
602 1.59 yamt
603 1.59 yamt KASSERT(pg->uobject == uobj);
604 1.59 yamt if (pg->loan_count != 0) {
605 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
606 1.59 yamt if (pg->loan_count != 0) {
607 1.59 yamt pg = uvm_loanbreak(pg);
608 1.59 yamt }
609 1.59 yamt if (pg == NULL) {
610 1.59 yamt pmap_kremove(va, ubc_winsize);
611 1.59 yamt pmap_update(pmap_kernel());
612 1.59 yamt uvm_page_unbusy(pgs, npages);
613 1.104 ad rw_exit(uobj->vmobjlock);
614 1.59 yamt uvm_wait("ubc_alloc");
615 1.59 yamt goto again_faultbusy;
616 1.59 yamt }
617 1.104 ad rw_exit(uobj->vmobjlock);
618 1.59 yamt pgs[i] = pg;
619 1.59 yamt }
620 1.121 rin pmap_kenter_pa(
621 1.121 rin va + trunc_page(slot_offset) + (i << PAGE_SHIFT),
622 1.68 cegger VM_PAGE_TO_PHYS(pg),
623 1.68 cegger VM_PROT_READ | VM_PROT_WRITE, 0);
624 1.18 chs }
625 1.18 chs pmap_update(pmap_kernel());
626 1.18 chs umap->flags |= UMAP_PAGES_LOCKED;
627 1.115 ad *npagesp = npages;
628 1.57 yamt } else {
629 1.57 yamt KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
630 1.18 chs }
631 1.18 chs
632 1.18 chs out:
633 1.18 chs return (void *)(va + slot_offset);
634 1.2 chs }
635 1.2 chs
636 1.18 chs /*
637 1.18 chs * ubc_release: free a file mapping window.
638 1.18 chs */
639 1.2 chs
640 1.94 jdolecek static void __noinline
641 1.115 ad ubc_release(void *va, int flags, struct vm_page **pgs, int npages)
642 1.2 chs {
643 1.2 chs struct ubc_map *umap;
644 1.2 chs struct uvm_object *uobj;
645 1.18 chs vaddr_t umapva;
646 1.55 thorpej bool unmapped;
647 1.120 skrll UVMHIST_FUNC(__func__);
648 1.120 skrll UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
649 1.2 chs
650 1.11 chs umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
651 1.18 chs umapva = UBC_UMAP_ADDR(umap);
652 1.2 chs uobj = umap->uobj;
653 1.2 chs KASSERT(uobj != NULL);
654 1.2 chs
655 1.18 chs if (umap->flags & UMAP_PAGES_LOCKED) {
656 1.72 rmind const voff_t endoff = umap->writeoff + umap->writelen;
657 1.72 rmind const voff_t zerolen = round_page(endoff) - endoff;
658 1.18 chs
659 1.115 ad KASSERT(npages == (round_page(endoff) -
660 1.115 ad trunc_page(umap->writeoff)) >> PAGE_SHIFT);
661 1.57 yamt KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
662 1.18 chs if (zerolen) {
663 1.18 chs memset((char *)umapva + endoff, 0, zerolen);
664 1.18 chs }
665 1.18 chs umap->flags &= ~UMAP_PAGES_LOCKED;
666 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
667 1.72 rmind for (u_int i = 0; i < npages; i++) {
668 1.115 ad struct vm_page *pg = pgs[i];
669 1.115 ad #ifdef DIAGNOSTIC
670 1.72 rmind paddr_t pa;
671 1.115 ad bool rv;
672 1.115 ad rv = pmap_extract(pmap_kernel(), umapva +
673 1.115 ad umap->writeoff + (i << PAGE_SHIFT), &pa);
674 1.18 chs KASSERT(rv);
675 1.115 ad KASSERT(PHYS_TO_VM_PAGE(pa) == pg);
676 1.115 ad #endif
677 1.115 ad pg->flags &= ~PG_FAKE;
678 1.115 ad KASSERTMSG(uvm_pagegetdirty(pg) ==
679 1.103 ad UVM_PAGE_STATUS_DIRTY,
680 1.115 ad "page %p not dirty", pg);
681 1.115 ad KASSERT(pg->loan_count == 0);
682 1.115 ad if (uvmpdpol_pageactivate_p(pg)) {
683 1.115 ad uvm_pagelock(pg);
684 1.115 ad uvm_pageactivate(pg);
685 1.115 ad uvm_pageunlock(pg);
686 1.115 ad }
687 1.18 chs }
688 1.18 chs pmap_kremove(umapva, ubc_winsize);
689 1.18 chs pmap_update(pmap_kernel());
690 1.18 chs uvm_page_unbusy(pgs, npages);
691 1.104 ad rw_exit(uobj->vmobjlock);
692 1.56 thorpej unmapped = true;
693 1.18 chs } else {
694 1.56 thorpej unmapped = false;
695 1.18 chs }
696 1.18 chs
697 1.104 ad rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
698 1.2 chs umap->writeoff = 0;
699 1.2 chs umap->writelen = 0;
700 1.2 chs umap->refcount--;
701 1.2 chs if (umap->refcount == 0) {
702 1.33 chs if (flags & UBC_UNMAP) {
703 1.2 chs /*
704 1.33 chs * Invalidate any cached mappings if requested.
705 1.33 chs * This is typically used to avoid leaving
706 1.33 chs * incompatible cache aliases around indefinitely.
707 1.2 chs */
708 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
709 1.18 chs pmap_remove(pmap_kernel(), umapva,
710 1.18 chs umapva + ubc_winsize);
711 1.73 rmind pmap_update(pmap_kernel());
712 1.104 ad rw_exit(uobj->vmobjlock);
713 1.73 rmind
714 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
715 1.2 chs LIST_REMOVE(umap, hash);
716 1.77 rmind LIST_REMOVE(umap, list);
717 1.2 chs umap->uobj = NULL;
718 1.2 chs TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
719 1.2 chs inactive);
720 1.2 chs } else {
721 1.18 chs if (!unmapped) {
722 1.18 chs umap->flags |= UMAP_MAPPING_CACHED;
723 1.18 chs }
724 1.2 chs TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
725 1.2 chs inactive);
726 1.2 chs }
727 1.2 chs }
728 1.100 skrll UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
729 1.91 pgoyette umap->refcount, 0, 0);
730 1.104 ad rw_exit(ubc_object.uobj.vmobjlock);
731 1.2 chs }
732 1.2 chs
733 1.58 yamt /*
734 1.58 yamt * ubc_uiomove: move data to/from an object.
735 1.58 yamt */
736 1.58 yamt
737 1.58 yamt int
738 1.62 yamt ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
739 1.62 yamt int flags)
740 1.58 yamt {
741 1.72 rmind const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
742 1.118 jdolecek struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
743 1.58 yamt voff_t off;
744 1.115 ad int error, npages;
745 1.58 yamt
746 1.58 yamt KASSERT(todo <= uio->uio_resid);
747 1.58 yamt KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
748 1.58 yamt ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
749 1.58 yamt
750 1.95 jdolecek #ifdef UBC_USE_PMAP_DIRECT
751 1.110 ad /*
752 1.110 ad * during direct access pages need to be held busy to prevent them
753 1.110 ad * changing identity, and therefore if we read or write an object
754 1.119 skrll * into a mapped view of same we could deadlock while faulting.
755 1.110 ad *
756 1.110 ad * avoid the problem by disallowing direct access if the object
757 1.110 ad * might be visible somewhere via mmap().
758 1.116 ad *
759 1.119 skrll * XXX concurrent reads cause thundering herd issues with PG_BUSY.
760 1.116 ad * In the future enable by default for writes or if ncpu<=2, and
761 1.116 ad * make the toggle override that.
762 1.110 ad */
763 1.116 ad if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) ||
764 1.116 ad (flags & UBC_FAULTBUSY) != 0) {
765 1.110 ad return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
766 1.95 jdolecek }
767 1.95 jdolecek #endif
768 1.95 jdolecek
769 1.58 yamt off = uio->uio_offset;
770 1.58 yamt error = 0;
771 1.58 yamt while (todo > 0) {
772 1.58 yamt vsize_t bytelen = todo;
773 1.58 yamt void *win;
774 1.58 yamt
775 1.115 ad npages = __arraycount(pgs);
776 1.115 ad win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs,
777 1.115 ad &npages);
778 1.58 yamt if (error == 0) {
779 1.58 yamt error = uiomove(win, bytelen, uio);
780 1.58 yamt }
781 1.58 yamt if (error != 0 && overwrite) {
782 1.58 yamt /*
783 1.58 yamt * if we haven't initialized the pages yet,
784 1.58 yamt * do it now. it's safe to use memset here
785 1.58 yamt * because we just mapped the pages above.
786 1.58 yamt */
787 1.58 yamt memset(win, 0, bytelen);
788 1.58 yamt }
789 1.115 ad ubc_release(win, flags, pgs, npages);
790 1.58 yamt off += bytelen;
791 1.58 yamt todo -= bytelen;
792 1.58 yamt if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
793 1.58 yamt break;
794 1.58 yamt }
795 1.58 yamt }
796 1.58 yamt
797 1.58 yamt return error;
798 1.58 yamt }
799 1.67 pooka
800 1.67 pooka /*
801 1.74 hannken * ubc_zerorange: set a range of bytes in an object to zero.
802 1.67 pooka */
803 1.67 pooka
804 1.67 pooka void
805 1.74 hannken ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
806 1.67 pooka {
807 1.118 jdolecek struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
808 1.115 ad int npages;
809 1.95 jdolecek
810 1.95 jdolecek #ifdef UBC_USE_PMAP_DIRECT
811 1.115 ad if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) {
812 1.95 jdolecek ubc_zerorange_direct(uobj, off, len, flags);
813 1.95 jdolecek return;
814 1.95 jdolecek }
815 1.95 jdolecek #endif
816 1.67 pooka
817 1.67 pooka /*
818 1.67 pooka * XXXUBC invent kzero() and use it
819 1.67 pooka */
820 1.67 pooka
821 1.67 pooka while (len) {
822 1.95 jdolecek void *win;
823 1.67 pooka vsize_t bytelen = len;
824 1.67 pooka
825 1.115 ad npages = __arraycount(pgs);
826 1.115 ad win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE,
827 1.115 ad pgs, &npages);
828 1.67 pooka memset(win, 0, bytelen);
829 1.115 ad ubc_release(win, flags, pgs, npages);
830 1.67 pooka
831 1.67 pooka off += bytelen;
832 1.67 pooka len -= bytelen;
833 1.67 pooka }
834 1.67 pooka }
835 1.73 rmind
836 1.95 jdolecek #ifdef UBC_USE_PMAP_DIRECT
837 1.95 jdolecek /* Copy data using direct map */
838 1.95 jdolecek
839 1.95 jdolecek /*
840 1.95 jdolecek * ubc_alloc_direct: allocate a file mapping window using direct map
841 1.95 jdolecek */
842 1.95 jdolecek static int __noinline
843 1.95 jdolecek ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
844 1.95 jdolecek int advice, int flags, struct vm_page **pgs, int *npages)
845 1.95 jdolecek {
846 1.95 jdolecek voff_t pgoff;
847 1.95 jdolecek int error;
848 1.114 ad int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
849 1.95 jdolecek int access_type = VM_PROT_READ;
850 1.120 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
851 1.95 jdolecek
852 1.95 jdolecek if (flags & UBC_WRITE) {
853 1.95 jdolecek if (flags & UBC_FAULTBUSY)
854 1.116 ad gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC;
855 1.95 jdolecek #if 0
856 1.95 jdolecek KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
857 1.95 jdolecek #endif
858 1.95 jdolecek
859 1.99 jdolecek /*
860 1.99 jdolecek * Tell genfs_getpages() we already have the journal lock,
861 1.99 jdolecek * allow allocation past current EOF.
862 1.99 jdolecek */
863 1.99 jdolecek gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
864 1.95 jdolecek access_type |= VM_PROT_WRITE;
865 1.99 jdolecek } else {
866 1.99 jdolecek /* Don't need the empty blocks allocated, PG_RDONLY is okay */
867 1.99 jdolecek gpflags |= PGO_NOBLOCKALLOC;
868 1.95 jdolecek }
869 1.95 jdolecek
870 1.95 jdolecek pgoff = (offset & PAGE_MASK);
871 1.95 jdolecek *lenp = MIN(*lenp, ubc_winsize - pgoff);
872 1.95 jdolecek
873 1.95 jdolecek again:
874 1.95 jdolecek *npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
875 1.95 jdolecek KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
876 1.95 jdolecek KASSERT(*lenp + pgoff <= ubc_winsize);
877 1.95 jdolecek memset(pgs, 0, *npages * sizeof(pgs[0]));
878 1.95 jdolecek
879 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
880 1.95 jdolecek error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
881 1.95 jdolecek npages, 0, access_type, advice, gpflags);
882 1.95 jdolecek UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
883 1.95 jdolecek if (error) {
884 1.95 jdolecek if (error == EAGAIN) {
885 1.95 jdolecek kpause("ubc_alloc_directg", false, hz >> 2, NULL);
886 1.95 jdolecek goto again;
887 1.95 jdolecek }
888 1.95 jdolecek return error;
889 1.95 jdolecek }
890 1.95 jdolecek
891 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
892 1.95 jdolecek for (int i = 0; i < *npages; i++) {
893 1.95 jdolecek struct vm_page *pg = pgs[i];
894 1.95 jdolecek
895 1.95 jdolecek KASSERT(pg != NULL);
896 1.95 jdolecek KASSERT(pg != PGO_DONTCARE);
897 1.95 jdolecek KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
898 1.95 jdolecek KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
899 1.95 jdolecek
900 1.95 jdolecek /* Avoid breaking loan if possible, only do it on write */
901 1.95 jdolecek if ((flags & UBC_WRITE) && pg->loan_count != 0) {
902 1.95 jdolecek pg = uvm_loanbreak(pg);
903 1.95 jdolecek if (pg == NULL) {
904 1.95 jdolecek uvm_page_unbusy(pgs, *npages);
905 1.104 ad rw_exit(uobj->vmobjlock);
906 1.95 jdolecek uvm_wait("ubc_alloc_directl");
907 1.95 jdolecek goto again;
908 1.95 jdolecek }
909 1.95 jdolecek pgs[i] = pg;
910 1.95 jdolecek }
911 1.95 jdolecek
912 1.95 jdolecek /* Page must be writable by now */
913 1.95 jdolecek KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
914 1.112 ad
915 1.116 ad /*
916 1.116 ad * XXX For aobj pages. No managed mapping - mark the page
917 1.116 ad * dirty.
918 1.116 ad */
919 1.112 ad if ((flags & UBC_WRITE) != 0) {
920 1.119 skrll uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
921 1.112 ad }
922 1.96 jdolecek }
923 1.104 ad rw_exit(uobj->vmobjlock);
924 1.96 jdolecek
925 1.96 jdolecek return 0;
926 1.96 jdolecek }
927 1.96 jdolecek
928 1.96 jdolecek static void __noinline
929 1.96 jdolecek ubc_direct_release(struct uvm_object *uobj,
930 1.96 jdolecek int flags, struct vm_page **pgs, int npages)
931 1.96 jdolecek {
932 1.104 ad rw_enter(uobj->vmobjlock, RW_WRITER);
933 1.96 jdolecek for (int i = 0; i < npages; i++) {
934 1.96 jdolecek struct vm_page *pg = pgs[i];
935 1.95 jdolecek
936 1.109 ad pg->flags &= ~PG_BUSY;
937 1.109 ad UVM_PAGE_OWN(pg, NULL);
938 1.109 ad if (pg->flags & PG_RELEASED) {
939 1.109 ad pg->flags &= ~PG_RELEASED;
940 1.109 ad uvm_pagefree(pg);
941 1.109 ad continue;
942 1.109 ad }
943 1.116 ad
944 1.116 ad if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) {
945 1.116 ad uvm_pagelock(pg);
946 1.116 ad uvm_pageactivate(pg);
947 1.116 ad uvm_pagewakeup(pg);
948 1.116 ad uvm_pageunlock(pg);
949 1.116 ad }
950 1.95 jdolecek
951 1.108 ad /* Page was changed, no longer fake and neither clean. */
952 1.103 ad if (flags & UBC_WRITE) {
953 1.103 ad KASSERTMSG(uvm_pagegetdirty(pg) ==
954 1.103 ad UVM_PAGE_STATUS_DIRTY,
955 1.103 ad "page %p not dirty", pg);
956 1.112 ad pg->flags &= ~PG_FAKE;
957 1.103 ad }
958 1.95 jdolecek }
959 1.104 ad rw_exit(uobj->vmobjlock);
960 1.95 jdolecek }
961 1.95 jdolecek
962 1.95 jdolecek static int
963 1.95 jdolecek ubc_uiomove_process(void *win, size_t len, void *arg)
964 1.95 jdolecek {
965 1.95 jdolecek struct uio *uio = (struct uio *)arg;
966 1.95 jdolecek
967 1.95 jdolecek return uiomove(win, len, uio);
968 1.95 jdolecek }
969 1.95 jdolecek
970 1.95 jdolecek static int
971 1.95 jdolecek ubc_zerorange_process(void *win, size_t len, void *arg)
972 1.95 jdolecek {
973 1.95 jdolecek memset(win, 0, len);
974 1.95 jdolecek return 0;
975 1.95 jdolecek }
976 1.95 jdolecek
977 1.95 jdolecek static int __noinline
978 1.95 jdolecek ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
979 1.95 jdolecek int flags)
980 1.95 jdolecek {
981 1.95 jdolecek const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
982 1.95 jdolecek voff_t off;
983 1.95 jdolecek int error, npages;
984 1.118 jdolecek struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
985 1.95 jdolecek
986 1.95 jdolecek KASSERT(todo <= uio->uio_resid);
987 1.95 jdolecek KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
988 1.95 jdolecek ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
989 1.95 jdolecek
990 1.95 jdolecek off = uio->uio_offset;
991 1.95 jdolecek error = 0;
992 1.95 jdolecek while (todo > 0) {
993 1.95 jdolecek vsize_t bytelen = todo;
994 1.95 jdolecek
995 1.95 jdolecek error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
996 1.95 jdolecek pgs, &npages);
997 1.95 jdolecek if (error != 0) {
998 1.95 jdolecek /* can't do anything, failed to get the pages */
999 1.95 jdolecek break;
1000 1.95 jdolecek }
1001 1.95 jdolecek
1002 1.95 jdolecek if (error == 0) {
1003 1.95 jdolecek error = uvm_direct_process(pgs, npages, off, bytelen,
1004 1.95 jdolecek ubc_uiomove_process, uio);
1005 1.95 jdolecek }
1006 1.117 ad
1007 1.117 ad if (overwrite) {
1008 1.117 ad voff_t endoff;
1009 1.117 ad
1010 1.117 ad /*
1011 1.117 ad * if we haven't initialized the pages yet due to an
1012 1.117 ad * error above, do it now.
1013 1.117 ad */
1014 1.117 ad if (error != 0) {
1015 1.117 ad (void) uvm_direct_process(pgs, npages, off,
1016 1.117 ad bytelen, ubc_zerorange_process, NULL);
1017 1.117 ad }
1018 1.117 ad
1019 1.117 ad off += bytelen;
1020 1.117 ad todo -= bytelen;
1021 1.117 ad endoff = off & (PAGE_SIZE - 1);
1022 1.117 ad
1023 1.95 jdolecek /*
1024 1.117 ad * zero out the remaining portion of the final page
1025 1.117 ad * (if any).
1026 1.95 jdolecek */
1027 1.117 ad if (todo == 0 && endoff != 0) {
1028 1.117 ad vsize_t zlen = PAGE_SIZE - endoff;
1029 1.117 ad (void) uvm_direct_process(pgs + npages - 1, 1,
1030 1.117 ad off, zlen, ubc_zerorange_process, NULL);
1031 1.117 ad }
1032 1.117 ad } else {
1033 1.117 ad off += bytelen;
1034 1.117 ad todo -= bytelen;
1035 1.95 jdolecek }
1036 1.95 jdolecek
1037 1.96 jdolecek ubc_direct_release(uobj, flags, pgs, npages);
1038 1.95 jdolecek
1039 1.95 jdolecek if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
1040 1.95 jdolecek break;
1041 1.95 jdolecek }
1042 1.95 jdolecek }
1043 1.95 jdolecek
1044 1.95 jdolecek return error;
1045 1.95 jdolecek }
1046 1.95 jdolecek
1047 1.95 jdolecek static void __noinline
1048 1.95 jdolecek ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
1049 1.95 jdolecek {
1050 1.95 jdolecek int error, npages;
1051 1.118 jdolecek struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)];
1052 1.95 jdolecek
1053 1.96 jdolecek flags |= UBC_WRITE;
1054 1.96 jdolecek
1055 1.95 jdolecek error = 0;
1056 1.95 jdolecek while (todo > 0) {
1057 1.95 jdolecek vsize_t bytelen = todo;
1058 1.95 jdolecek
1059 1.95 jdolecek error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
1060 1.96 jdolecek flags, pgs, &npages);
1061 1.95 jdolecek if (error != 0) {
1062 1.95 jdolecek /* can't do anything, failed to get the pages */
1063 1.95 jdolecek break;
1064 1.95 jdolecek }
1065 1.95 jdolecek
1066 1.95 jdolecek error = uvm_direct_process(pgs, npages, off, bytelen,
1067 1.95 jdolecek ubc_zerorange_process, NULL);
1068 1.95 jdolecek
1069 1.96 jdolecek ubc_direct_release(uobj, flags, pgs, npages);
1070 1.95 jdolecek
1071 1.95 jdolecek off += bytelen;
1072 1.95 jdolecek todo -= bytelen;
1073 1.95 jdolecek }
1074 1.95 jdolecek }
1075 1.95 jdolecek
1076 1.95 jdolecek #endif /* UBC_USE_PMAP_DIRECT */
1077 1.95 jdolecek
1078 1.73 rmind /*
1079 1.73 rmind * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
1080 1.73 rmind */
1081 1.73 rmind
1082 1.73 rmind void
1083 1.73 rmind ubc_purge(struct uvm_object *uobj)
1084 1.73 rmind {
1085 1.73 rmind struct ubc_map *umap;
1086 1.73 rmind vaddr_t va;
1087 1.73 rmind
1088 1.73 rmind KASSERT(uobj->uo_npages == 0);
1089 1.73 rmind
1090 1.76 rmind /*
1091 1.76 rmind * Safe to check without lock held, as ubc_alloc() removes
1092 1.76 rmind * the mapping and list entry in the correct order.
1093 1.76 rmind */
1094 1.76 rmind if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
1095 1.76 rmind return;
1096 1.76 rmind }
1097 1.104 ad rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
1098 1.73 rmind while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
1099 1.73 rmind KASSERT(umap->refcount == 0);
1100 1.73 rmind for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
1101 1.73 rmind KASSERT(!pmap_extract(pmap_kernel(),
1102 1.73 rmind va + UBC_UMAP_ADDR(umap), NULL));
1103 1.73 rmind }
1104 1.73 rmind LIST_REMOVE(umap, list);
1105 1.73 rmind LIST_REMOVE(umap, hash);
1106 1.73 rmind umap->flags &= ~UMAP_MAPPING_CACHED;
1107 1.73 rmind umap->uobj = NULL;
1108 1.73 rmind }
1109 1.104 ad rw_exit(ubc_object.uobj.vmobjlock);
1110 1.73 rmind }
1111 1.126 simonb
1112 1.126 simonb static int
1113 1.126 simonb ubchash_stats(struct hashstat_sysctl *hs, bool fill)
1114 1.126 simonb {
1115 1.126 simonb struct ubc_map *umap;
1116 1.126 simonb uint64_t chain;
1117 1.126 simonb
1118 1.126 simonb strlcpy(hs->hash_name, "ubchash", sizeof(hs->hash_name));
1119 1.126 simonb strlcpy(hs->hash_desc, "ubc object hash", sizeof(hs->hash_desc));
1120 1.126 simonb if (!fill)
1121 1.126 simonb return 0;
1122 1.126 simonb
1123 1.126 simonb hs->hash_size = ubc_object.hashmask + 1;
1124 1.126 simonb
1125 1.126 simonb for (size_t i = 0; i < hs->hash_size; i++) {
1126 1.126 simonb chain = 0;
1127 1.126 simonb rw_enter(ubc_object.uobj.vmobjlock, RW_READER);
1128 1.126 simonb LIST_FOREACH(umap, &ubc_object.hash[i], hash) {
1129 1.126 simonb chain++;
1130 1.126 simonb }
1131 1.126 simonb rw_exit(ubc_object.uobj.vmobjlock);
1132 1.126 simonb if (chain > 0) {
1133 1.126 simonb hs->hash_used++;
1134 1.126 simonb hs->hash_items += chain;
1135 1.126 simonb if (chain > hs->hash_maxchain)
1136 1.126 simonb hs->hash_maxchain = chain;
1137 1.126 simonb }
1138 1.126 simonb preempt_point();
1139 1.126 simonb }
1140 1.126 simonb
1141 1.126 simonb return 0;
1142 1.126 simonb }
1143