uvm_bio.c revision 1.106 1 /* $NetBSD: uvm_bio.c,v 1.106 2020/03/17 18:31:39 ad Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.106 2020/03/17 18:31:39 ad Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48
49 #include <uvm/uvm.h>
50
51 #ifdef PMAP_DIRECT
52 # define UBC_USE_PMAP_DIRECT
53 #endif
54
55 /*
56 * local functions
57 */
58
59 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
60 int, int, vm_prot_t, int);
61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
62 #ifdef UBC_USE_PMAP_DIRECT
63 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
64 int, int);
65 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
66
67 bool ubc_direct = false; /* XXX */
68 #endif
69
70 /*
71 * local data structues
72 */
73
74 #define UBC_HASH(uobj, offset) \
75 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
76 ubc_object.hashmask)
77
78 #define UBC_QUEUE(offset) \
79 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
80 (UBC_NQUEUES - 1)])
81
82 #define UBC_UMAP_ADDR(u) \
83 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
84
85
86 #define UMAP_PAGES_LOCKED 0x0001
87 #define UMAP_MAPPING_CACHED 0x0002
88
89 struct ubc_map {
90 struct uvm_object * uobj; /* mapped object */
91 voff_t offset; /* offset into uobj */
92 voff_t writeoff; /* write offset */
93 vsize_t writelen; /* write len */
94 int refcount; /* refcount on mapping */
95 int flags; /* extra state */
96 int advice;
97
98 LIST_ENTRY(ubc_map) hash; /* hash table */
99 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
100 LIST_ENTRY(ubc_map) list; /* per-object list */
101 };
102
103 TAILQ_HEAD(ubc_inactive_head, ubc_map);
104 static struct ubc_object {
105 struct uvm_object uobj; /* glue for uvm_map() */
106 char *kva; /* where ubc_object is mapped */
107 struct ubc_map *umap; /* array of ubc_map's */
108
109 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
110 u_long hashmask; /* mask for hashtable */
111
112 struct ubc_inactive_head *inactive;
113 /* inactive queues for ubc_map's */
114 } ubc_object;
115
116 const struct uvm_pagerops ubc_pager = {
117 .pgo_fault = ubc_fault,
118 /* ... rest are NULL */
119 };
120
121 int ubc_nwins = UBC_NWINS;
122 int ubc_winshift __read_mostly = UBC_WINSHIFT;
123 int ubc_winsize __read_mostly;
124 #if defined(PMAP_PREFER)
125 int ubc_nqueues;
126 #define UBC_NQUEUES ubc_nqueues
127 #else
128 #define UBC_NQUEUES 1
129 #endif
130
131 #if defined(UBC_STATS)
132
133 #define UBC_EVCNT_DEFINE(name) \
134 struct evcnt ubc_evcnt_##name = \
135 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
136 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
137 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
138
139 #else /* defined(UBC_STATS) */
140
141 #define UBC_EVCNT_DEFINE(name) /* nothing */
142 #define UBC_EVCNT_INCR(name) /* nothing */
143
144 #endif /* defined(UBC_STATS) */
145
146 UBC_EVCNT_DEFINE(wincachehit)
147 UBC_EVCNT_DEFINE(wincachemiss)
148 UBC_EVCNT_DEFINE(faultbusy)
149
150 /*
151 * ubc_init
152 *
153 * init pager private data structures.
154 */
155
156 void
157 ubc_init(void)
158 {
159 /*
160 * Make sure ubc_winshift is sane.
161 */
162 if (ubc_winshift < PAGE_SHIFT)
163 ubc_winshift = PAGE_SHIFT;
164 ubc_winsize = 1 << ubc_winshift;
165
166 /*
167 * init ubc_object.
168 * alloc and init ubc_map's.
169 * init inactive queues.
170 * alloc and init hashtable.
171 * map in ubc_object.
172 */
173
174 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
175
176 ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
177 KM_SLEEP);
178 if (ubc_object.umap == NULL)
179 panic("ubc_init: failed to allocate ubc_map");
180
181 vaddr_t va = (vaddr_t)1L;
182 #ifdef PMAP_PREFER
183 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
184 ubc_nqueues = va >> ubc_winshift;
185 if (ubc_nqueues == 0) {
186 ubc_nqueues = 1;
187 }
188 #endif
189 ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
190 sizeof(struct ubc_inactive_head), KM_SLEEP);
191 for (int i = 0; i < UBC_NQUEUES; i++) {
192 TAILQ_INIT(&ubc_object.inactive[i]);
193 }
194 for (int i = 0; i < ubc_nwins; i++) {
195 struct ubc_map *umap;
196 umap = &ubc_object.umap[i];
197 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
198 umap, inactive);
199 }
200
201 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
202 &ubc_object.hashmask);
203 for (int i = 0; i <= ubc_object.hashmask; i++) {
204 LIST_INIT(&ubc_object.hash[i]);
205 }
206
207 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
208 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
209 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
210 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
211 panic("ubc_init: failed to map ubc_object");
212 }
213 }
214
215 void
216 ubchist_init(void)
217 {
218
219 UVMHIST_INIT(ubchist, 300);
220 }
221
222 /*
223 * ubc_fault_page: helper of ubc_fault to handle a single page.
224 *
225 * => Caller has UVM object locked.
226 * => Caller will perform pmap_update().
227 */
228
229 static inline int
230 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
231 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
232 {
233 vm_prot_t mask;
234 int error;
235 bool rdonly;
236
237 KASSERT(rw_write_held(pg->uobject->vmobjlock));
238
239 KASSERT((pg->flags & PG_FAKE) == 0);
240 if (pg->flags & PG_RELEASED) {
241 uvm_pagefree(pg);
242 return 0;
243 }
244 if (pg->loan_count != 0) {
245
246 /*
247 * Avoid unneeded loan break, if possible.
248 */
249
250 if ((access_type & VM_PROT_WRITE) == 0) {
251 prot &= ~VM_PROT_WRITE;
252 }
253 if (prot & VM_PROT_WRITE) {
254 struct vm_page *newpg;
255
256 newpg = uvm_loanbreak(pg);
257 if (newpg == NULL) {
258 uvm_page_unbusy(&pg, 1);
259 return ENOMEM;
260 }
261 pg = newpg;
262 }
263 }
264
265 /*
266 * Note that a page whose backing store is partially allocated
267 * is marked as PG_RDONLY.
268 *
269 * it's a responsibility of ubc_alloc's caller to allocate backing
270 * blocks before writing to the window.
271 */
272
273 KASSERT((pg->flags & PG_RDONLY) == 0 ||
274 (access_type & VM_PROT_WRITE) == 0 ||
275 pg->offset < umap->writeoff ||
276 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
277
278 rdonly = uvm_pagereadonly_p(pg);
279 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
280
281 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
282 prot & mask, PMAP_CANFAIL | (access_type & mask));
283
284 uvm_pagelock(pg);
285 uvm_pageactivate(pg);
286 uvm_pagewakeup(pg);
287 uvm_pageunlock(pg);
288 pg->flags &= ~PG_BUSY;
289 UVM_PAGE_OWN(pg, NULL);
290
291 return error;
292 }
293
294 /*
295 * ubc_fault: fault routine for ubc mapping
296 */
297
298 static int
299 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
300 int ign3, int ign4, vm_prot_t access_type, int flags)
301 {
302 struct uvm_object *uobj;
303 struct ubc_map *umap;
304 vaddr_t va, eva, ubc_offset, slot_offset;
305 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
306 int i, error, npages;
307 vm_prot_t prot;
308
309 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
310
311 /*
312 * no need to try with PGO_LOCKED...
313 * we don't need to have the map locked since we know that
314 * no one will mess with it until our reference is released.
315 */
316
317 if (flags & PGO_LOCKED) {
318 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
319 flags &= ~PGO_LOCKED;
320 }
321
322 va = ufi->orig_rvaddr;
323 ubc_offset = va - (vaddr_t)ubc_object.kva;
324 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
325 KASSERT(umap->refcount != 0);
326 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
327 slot_offset = ubc_offset & (ubc_winsize - 1);
328
329 /*
330 * some platforms cannot write to individual bytes atomically, so
331 * software has to do read/modify/write of larger quantities instead.
332 * this means that the access_type for "write" operations
333 * can be VM_PROT_READ, which confuses us mightily.
334 *
335 * deal with this by resetting access_type based on the info
336 * that ubc_alloc() stores for us.
337 */
338
339 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
340 UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
341 va, ubc_offset, access_type, 0);
342
343 if ((access_type & VM_PROT_WRITE) != 0) {
344 #ifndef PRIxOFF /* XXX */
345 #define PRIxOFF "jx" /* XXX */
346 #endif /* XXX */
347 KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
348 "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
349 slot_offset, (intmax_t)umap->writeoff);
350 KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
351 "out of range write: slot=%#"PRIxVADDR
352 " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
353 slot_offset, (intmax_t)umap->writeoff, umap->writelen);
354 }
355
356 /* no umap locking needed since we have a ref on the umap */
357 uobj = umap->uobj;
358
359 if ((access_type & VM_PROT_WRITE) == 0) {
360 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
361 } else {
362 npages = (round_page(umap->offset + umap->writeoff +
363 umap->writelen) - (umap->offset + slot_offset))
364 >> PAGE_SHIFT;
365 flags |= PGO_PASTEOF;
366 }
367
368 again:
369 memset(pgs, 0, sizeof (pgs));
370 rw_enter(uobj->vmobjlock, RW_WRITER);
371
372 UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
373 slot_offset, umap->writeoff, umap->writelen, 0);
374 UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
375 (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
376
377 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
378 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
379 PGO_NOTIMESTAMP);
380 UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
381 0);
382
383 if (error == EAGAIN) {
384 kpause("ubc_fault", false, hz >> 2, NULL);
385 goto again;
386 }
387 if (error) {
388 return error;
389 }
390
391 /*
392 * For virtually-indexed, virtually-tagged caches we should avoid
393 * creating writable mappings when we do not absolutely need them,
394 * since the "compatible alias" trick does not work on such caches.
395 * Otherwise, we can always map the pages writable.
396 */
397
398 #ifdef PMAP_CACHE_VIVT
399 prot = VM_PROT_READ | access_type;
400 #else
401 prot = VM_PROT_READ | VM_PROT_WRITE;
402 #endif
403
404 va = ufi->orig_rvaddr;
405 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
406
407 UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
408
409 /*
410 * Note: normally all returned pages would have the same UVM object.
411 * However, layered file-systems and e.g. tmpfs, may return pages
412 * which belong to underlying UVM object. In such case, lock is
413 * shared amongst the objects.
414 */
415 rw_enter(uobj->vmobjlock, RW_WRITER);
416 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
417 struct vm_page *pg;
418
419 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
420 0, 0);
421 pg = pgs[i];
422
423 if (pg == NULL || pg == PGO_DONTCARE) {
424 continue;
425 }
426 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
427 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
428 if (error) {
429 /*
430 * Flush (there might be pages entered), drop the lock,
431 * and perform uvm_wait(). Note: page will re-fault.
432 */
433 pmap_update(ufi->orig_map->pmap);
434 rw_exit(uobj->vmobjlock);
435 uvm_wait("ubc_fault");
436 rw_enter(uobj->vmobjlock, RW_WRITER);
437 }
438 }
439 /* Must make VA visible before the unlock. */
440 pmap_update(ufi->orig_map->pmap);
441 rw_exit(uobj->vmobjlock);
442
443 return 0;
444 }
445
446 /*
447 * local functions
448 */
449
450 static struct ubc_map *
451 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
452 {
453 struct ubc_map *umap;
454
455 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
456 if (umap->uobj == uobj && umap->offset == offset) {
457 return umap;
458 }
459 }
460 return NULL;
461 }
462
463
464 /*
465 * ubc interface functions
466 */
467
468 /*
469 * ubc_alloc: allocate a file mapping window
470 */
471
472 static void * __noinline
473 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
474 int flags)
475 {
476 vaddr_t slot_offset, va;
477 struct ubc_map *umap;
478 voff_t umap_offset;
479 int error;
480 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
481
482 UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
483 (uintptr_t)uobj, offset, *lenp, 0);
484
485 KASSERT(*lenp > 0);
486 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
487 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
488 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
489
490 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
491 again:
492 /*
493 * The UVM object is already referenced.
494 * Lock order: UBC object -> ubc_map::uobj.
495 */
496 umap = ubc_find_mapping(uobj, umap_offset);
497 if (umap == NULL) {
498 struct uvm_object *oobj;
499
500 UBC_EVCNT_INCR(wincachemiss);
501 umap = TAILQ_FIRST(UBC_QUEUE(offset));
502 if (umap == NULL) {
503 rw_exit(ubc_object.uobj.vmobjlock);
504 kpause("ubc_alloc", false, hz >> 2, NULL);
505 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
506 goto again;
507 }
508
509 va = UBC_UMAP_ADDR(umap);
510 oobj = umap->uobj;
511
512 /*
513 * Remove from old hash (if any), add to new hash.
514 */
515
516 if (oobj != NULL) {
517 /*
518 * Mapping must be removed before the list entry,
519 * since there is a race with ubc_purge().
520 */
521 if (umap->flags & UMAP_MAPPING_CACHED) {
522 umap->flags &= ~UMAP_MAPPING_CACHED;
523 rw_enter(oobj->vmobjlock, RW_WRITER);
524 pmap_remove(pmap_kernel(), va,
525 va + ubc_winsize);
526 pmap_update(pmap_kernel());
527 rw_exit(oobj->vmobjlock);
528 }
529 LIST_REMOVE(umap, hash);
530 LIST_REMOVE(umap, list);
531 } else {
532 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
533 }
534 umap->uobj = uobj;
535 umap->offset = umap_offset;
536 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
537 umap, hash);
538 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
539 } else {
540 UBC_EVCNT_INCR(wincachehit);
541 va = UBC_UMAP_ADDR(umap);
542 }
543
544 if (umap->refcount == 0) {
545 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
546 }
547
548 if (flags & UBC_WRITE) {
549 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
550 "ubc_alloc: concurrent writes to uobj %p", uobj);
551 umap->writeoff = slot_offset;
552 umap->writelen = *lenp;
553 }
554
555 umap->refcount++;
556 umap->advice = advice;
557 rw_exit(ubc_object.uobj.vmobjlock);
558 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
559 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
560
561 if (flags & UBC_FAULTBUSY) {
562 // XXX add offset from slot_offset?
563 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
564 struct vm_page *pgs[npages];
565 int gpflags =
566 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
567 PGO_NOTIMESTAMP;
568 int i;
569 KDASSERT(flags & UBC_WRITE);
570 KASSERT(umap->refcount == 1);
571
572 UBC_EVCNT_INCR(faultbusy);
573 again_faultbusy:
574 rw_enter(uobj->vmobjlock, RW_WRITER);
575 if (umap->flags & UMAP_MAPPING_CACHED) {
576 umap->flags &= ~UMAP_MAPPING_CACHED;
577 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
578 }
579 memset(pgs, 0, sizeof(pgs));
580
581 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
582 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
583 UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
584 if (error) {
585 /*
586 * Flush: the mapping above might have been removed.
587 */
588 pmap_update(pmap_kernel());
589 goto out;
590 }
591 for (i = 0; i < npages; i++) {
592 struct vm_page *pg = pgs[i];
593
594 KASSERT(pg->uobject == uobj);
595 if (pg->loan_count != 0) {
596 rw_enter(uobj->vmobjlock, RW_WRITER);
597 if (pg->loan_count != 0) {
598 pg = uvm_loanbreak(pg);
599 }
600 if (pg == NULL) {
601 pmap_kremove(va, ubc_winsize);
602 pmap_update(pmap_kernel());
603 uvm_page_unbusy(pgs, npages);
604 rw_exit(uobj->vmobjlock);
605 uvm_wait("ubc_alloc");
606 goto again_faultbusy;
607 }
608 rw_exit(uobj->vmobjlock);
609 pgs[i] = pg;
610 }
611 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
612 VM_PAGE_TO_PHYS(pg),
613 VM_PROT_READ | VM_PROT_WRITE, 0);
614 }
615 pmap_update(pmap_kernel());
616 umap->flags |= UMAP_PAGES_LOCKED;
617 } else {
618 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
619 }
620
621 out:
622 return (void *)(va + slot_offset);
623 }
624
625 /*
626 * ubc_release: free a file mapping window.
627 */
628
629 static void __noinline
630 ubc_release(void *va, int flags)
631 {
632 struct ubc_map *umap;
633 struct uvm_object *uobj;
634 vaddr_t umapva;
635 bool unmapped;
636 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
637
638 UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
639 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
640 umapva = UBC_UMAP_ADDR(umap);
641 uobj = umap->uobj;
642 KASSERT(uobj != NULL);
643
644 if (umap->flags & UMAP_PAGES_LOCKED) {
645 const voff_t slot_offset = umap->writeoff;
646 const voff_t endoff = umap->writeoff + umap->writelen;
647 const voff_t zerolen = round_page(endoff) - endoff;
648 const u_int npages = (round_page(endoff) -
649 trunc_page(slot_offset)) >> PAGE_SHIFT;
650 struct vm_page *pgs[npages];
651
652 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
653 if (zerolen) {
654 memset((char *)umapva + endoff, 0, zerolen);
655 }
656 umap->flags &= ~UMAP_PAGES_LOCKED;
657 rw_enter(uobj->vmobjlock, RW_WRITER);
658 for (u_int i = 0; i < npages; i++) {
659 paddr_t pa;
660 bool rv __diagused;
661
662 rv = pmap_extract(pmap_kernel(),
663 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
664 KASSERT(rv);
665 pgs[i] = PHYS_TO_VM_PAGE(pa);
666 pgs[i]->flags &= ~PG_FAKE;
667 KASSERTMSG(uvm_pagegetdirty(pgs[i]) ==
668 UVM_PAGE_STATUS_DIRTY,
669 "page %p not dirty", pgs[i]);
670 KASSERT(pgs[i]->loan_count == 0);
671 uvm_pagelock(pgs[i]);
672 uvm_pageactivate(pgs[i]);
673 uvm_pageunlock(pgs[i]);
674 }
675 pmap_kremove(umapva, ubc_winsize);
676 pmap_update(pmap_kernel());
677 uvm_page_unbusy(pgs, npages);
678 rw_exit(uobj->vmobjlock);
679 unmapped = true;
680 } else {
681 unmapped = false;
682 }
683
684 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
685 umap->writeoff = 0;
686 umap->writelen = 0;
687 umap->refcount--;
688 if (umap->refcount == 0) {
689 if (flags & UBC_UNMAP) {
690 /*
691 * Invalidate any cached mappings if requested.
692 * This is typically used to avoid leaving
693 * incompatible cache aliases around indefinitely.
694 */
695 rw_enter(uobj->vmobjlock, RW_WRITER);
696 pmap_remove(pmap_kernel(), umapva,
697 umapva + ubc_winsize);
698 pmap_update(pmap_kernel());
699 rw_exit(uobj->vmobjlock);
700
701 umap->flags &= ~UMAP_MAPPING_CACHED;
702 LIST_REMOVE(umap, hash);
703 LIST_REMOVE(umap, list);
704 umap->uobj = NULL;
705 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
706 inactive);
707 } else {
708 if (!unmapped) {
709 umap->flags |= UMAP_MAPPING_CACHED;
710 }
711 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
712 inactive);
713 }
714 }
715 UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
716 umap->refcount, 0, 0);
717 rw_exit(ubc_object.uobj.vmobjlock);
718 }
719
720 /*
721 * ubc_uiomove: move data to/from an object.
722 */
723
724 int
725 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
726 int flags)
727 {
728 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
729 voff_t off;
730 int error;
731
732 KASSERT(todo <= uio->uio_resid);
733 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
734 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
735
736 #ifdef UBC_USE_PMAP_DIRECT
737 if (ubc_direct) {
738 return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
739 }
740 #endif
741
742 off = uio->uio_offset;
743 error = 0;
744 while (todo > 0) {
745 vsize_t bytelen = todo;
746 void *win;
747
748 win = ubc_alloc(uobj, off, &bytelen, advice, flags);
749 if (error == 0) {
750 error = uiomove(win, bytelen, uio);
751 }
752 if (error != 0 && overwrite) {
753 /*
754 * if we haven't initialized the pages yet,
755 * do it now. it's safe to use memset here
756 * because we just mapped the pages above.
757 */
758 printf("%s: error=%d\n", __func__, error);
759 memset(win, 0, bytelen);
760 }
761 ubc_release(win, flags);
762 off += bytelen;
763 todo -= bytelen;
764 if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
765 break;
766 }
767 }
768
769 return error;
770 }
771
772 /*
773 * ubc_zerorange: set a range of bytes in an object to zero.
774 */
775
776 void
777 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
778 {
779
780 #ifdef UBC_USE_PMAP_DIRECT
781 if (ubc_direct) {
782 ubc_zerorange_direct(uobj, off, len, flags);
783 return;
784 }
785 #endif
786
787 /*
788 * XXXUBC invent kzero() and use it
789 */
790
791 while (len) {
792 void *win;
793 vsize_t bytelen = len;
794
795 win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
796 memset(win, 0, bytelen);
797 ubc_release(win, flags);
798
799 off += bytelen;
800 len -= bytelen;
801 }
802 }
803
804 #ifdef UBC_USE_PMAP_DIRECT
805 /* Copy data using direct map */
806
807 /*
808 * ubc_alloc_direct: allocate a file mapping window using direct map
809 */
810 static int __noinline
811 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
812 int advice, int flags, struct vm_page **pgs, int *npages)
813 {
814 voff_t pgoff;
815 int error;
816 int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO | PGO_ALLPAGES;
817 int access_type = VM_PROT_READ;
818 UVMHIST_FUNC("ubc_alloc_direct"); UVMHIST_CALLED(ubchist);
819
820 if (flags & UBC_WRITE) {
821 if (flags & UBC_FAULTBUSY)
822 gpflags |= PGO_OVERWRITE;
823 #if 0
824 KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
825 #endif
826
827 /*
828 * Tell genfs_getpages() we already have the journal lock,
829 * allow allocation past current EOF.
830 */
831 gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
832 access_type |= VM_PROT_WRITE;
833 } else {
834 /* Don't need the empty blocks allocated, PG_RDONLY is okay */
835 gpflags |= PGO_NOBLOCKALLOC;
836 }
837
838 pgoff = (offset & PAGE_MASK);
839 *lenp = MIN(*lenp, ubc_winsize - pgoff);
840
841 again:
842 *npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
843 KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
844 KASSERT(*lenp + pgoff <= ubc_winsize);
845 memset(pgs, 0, *npages * sizeof(pgs[0]));
846
847 rw_enter(uobj->vmobjlock, RW_WRITER);
848 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
849 npages, 0, access_type, advice, gpflags);
850 UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
851 if (error) {
852 if (error == EAGAIN) {
853 kpause("ubc_alloc_directg", false, hz >> 2, NULL);
854 goto again;
855 }
856 return error;
857 }
858
859 rw_enter(uobj->vmobjlock, RW_WRITER);
860 for (int i = 0; i < *npages; i++) {
861 struct vm_page *pg = pgs[i];
862
863 KASSERT(pg != NULL);
864 KASSERT(pg != PGO_DONTCARE);
865 KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
866 KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
867
868 /* Avoid breaking loan if possible, only do it on write */
869 if ((flags & UBC_WRITE) && pg->loan_count != 0) {
870 pg = uvm_loanbreak(pg);
871 if (pg == NULL) {
872 uvm_page_unbusy(pgs, *npages);
873 rw_exit(uobj->vmobjlock);
874 uvm_wait("ubc_alloc_directl");
875 goto again;
876 }
877 pgs[i] = pg;
878 }
879
880 /* Page must be writable by now */
881 KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
882 }
883 rw_exit(uobj->vmobjlock);
884
885 return 0;
886 }
887
888 static void __noinline
889 ubc_direct_release(struct uvm_object *uobj,
890 int flags, struct vm_page **pgs, int npages)
891 {
892 rw_enter(uobj->vmobjlock, RW_WRITER);
893 for (int i = 0; i < npages; i++) {
894 struct vm_page *pg = pgs[i];
895
896 uvm_pagelock(pg);
897 uvm_pageactivate(pg);
898 uvm_pageunlock(pg);
899
900 /*
901 * Page was changed, no longer fake and neither clean.
902 * There's no managed mapping in the direct case, so
903 * mark the page dirty manually.
904 */
905 if (flags & UBC_WRITE) {
906 pg->flags &= ~PG_FAKE;
907 KASSERTMSG(uvm_pagegetdirty(pg) ==
908 UVM_PAGE_STATUS_DIRTY,
909 "page %p not dirty", pg);
910 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
911 }
912 }
913 uvm_page_unbusy(pgs, npages);
914 rw_exit(uobj->vmobjlock);
915 }
916
917 static int
918 ubc_uiomove_process(void *win, size_t len, void *arg)
919 {
920 struct uio *uio = (struct uio *)arg;
921
922 return uiomove(win, len, uio);
923 }
924
925 static int
926 ubc_zerorange_process(void *win, size_t len, void *arg)
927 {
928 memset(win, 0, len);
929 return 0;
930 }
931
932 static int __noinline
933 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
934 int flags)
935 {
936 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
937 voff_t off;
938 int error, npages;
939 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
940
941 KASSERT(todo <= uio->uio_resid);
942 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
943 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
944
945 off = uio->uio_offset;
946 error = 0;
947 while (todo > 0) {
948 vsize_t bytelen = todo;
949
950 error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
951 pgs, &npages);
952 if (error != 0) {
953 /* can't do anything, failed to get the pages */
954 break;
955 }
956
957 if (error == 0) {
958 error = uvm_direct_process(pgs, npages, off, bytelen,
959 ubc_uiomove_process, uio);
960 }
961 if (error != 0 && overwrite) {
962 /*
963 * if we haven't initialized the pages yet,
964 * do it now. it's safe to use memset here
965 * because we just mapped the pages above.
966 */
967 printf("%s: error=%d\n", __func__, error);
968 (void) uvm_direct_process(pgs, npages, off, bytelen,
969 ubc_zerorange_process, NULL);
970 }
971
972 ubc_direct_release(uobj, flags, pgs, npages);
973
974 off += bytelen;
975 todo -= bytelen;
976
977 if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
978 break;
979 }
980 }
981
982 return error;
983 }
984
985 static void __noinline
986 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
987 {
988 int error, npages;
989 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
990
991 flags |= UBC_WRITE;
992
993 error = 0;
994 while (todo > 0) {
995 vsize_t bytelen = todo;
996
997 error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
998 flags, pgs, &npages);
999 if (error != 0) {
1000 /* can't do anything, failed to get the pages */
1001 break;
1002 }
1003
1004 error = uvm_direct_process(pgs, npages, off, bytelen,
1005 ubc_zerorange_process, NULL);
1006
1007 ubc_direct_release(uobj, flags, pgs, npages);
1008
1009 off += bytelen;
1010 todo -= bytelen;
1011 }
1012 }
1013
1014 #endif /* UBC_USE_PMAP_DIRECT */
1015
1016 /*
1017 * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
1018 */
1019
1020 void
1021 ubc_purge(struct uvm_object *uobj)
1022 {
1023 struct ubc_map *umap;
1024 vaddr_t va;
1025
1026 KASSERT(uobj->uo_npages == 0);
1027
1028 /*
1029 * Safe to check without lock held, as ubc_alloc() removes
1030 * the mapping and list entry in the correct order.
1031 */
1032 if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
1033 return;
1034 }
1035 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
1036 while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
1037 KASSERT(umap->refcount == 0);
1038 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
1039 KASSERT(!pmap_extract(pmap_kernel(),
1040 va + UBC_UMAP_ADDR(umap), NULL));
1041 }
1042 LIST_REMOVE(umap, list);
1043 LIST_REMOVE(umap, hash);
1044 umap->flags &= ~UMAP_MAPPING_CACHED;
1045 umap->uobj = NULL;
1046 }
1047 rw_exit(ubc_object.uobj.vmobjlock);
1048 }
1049