uvm_bio.c revision 1.117 1 /* $NetBSD: uvm_bio.c,v 1.117 2020/05/25 19:29:08 ad Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.117 2020/05/25 19:29:08 ad Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48
49 #include <uvm/uvm.h>
50 #include <uvm/uvm_pdpolicy.h>
51
52 #ifdef PMAP_DIRECT
53 # define UBC_USE_PMAP_DIRECT
54 #endif
55
56 /*
57 * local functions
58 */
59
60 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
61 int, int, vm_prot_t, int);
62 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
63 #ifdef UBC_USE_PMAP_DIRECT
64 static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
65 int, int);
66 static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
67
68 /* XXX disabled by default until the kinks are worked out. */
69 bool ubc_direct = false;
70 #endif
71
72 /*
73 * local data structues
74 */
75
76 #define UBC_HASH(uobj, offset) \
77 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
78 ubc_object.hashmask)
79
80 #define UBC_QUEUE(offset) \
81 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
82 (UBC_NQUEUES - 1)])
83
84 #define UBC_UMAP_ADDR(u) \
85 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
86
87
88 #define UMAP_PAGES_LOCKED 0x0001
89 #define UMAP_MAPPING_CACHED 0x0002
90
91 struct ubc_map {
92 struct uvm_object * uobj; /* mapped object */
93 voff_t offset; /* offset into uobj */
94 voff_t writeoff; /* write offset */
95 vsize_t writelen; /* write len */
96 int refcount; /* refcount on mapping */
97 int flags; /* extra state */
98 int advice;
99
100 LIST_ENTRY(ubc_map) hash; /* hash table */
101 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
102 LIST_ENTRY(ubc_map) list; /* per-object list */
103 };
104
105 TAILQ_HEAD(ubc_inactive_head, ubc_map);
106 static struct ubc_object {
107 struct uvm_object uobj; /* glue for uvm_map() */
108 char *kva; /* where ubc_object is mapped */
109 struct ubc_map *umap; /* array of ubc_map's */
110
111 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
112 u_long hashmask; /* mask for hashtable */
113
114 struct ubc_inactive_head *inactive;
115 /* inactive queues for ubc_map's */
116 } ubc_object;
117
118 const struct uvm_pagerops ubc_pager = {
119 .pgo_fault = ubc_fault,
120 /* ... rest are NULL */
121 };
122
123 int ubc_nwins = UBC_NWINS;
124 int ubc_winshift __read_mostly = UBC_WINSHIFT;
125 int ubc_winsize __read_mostly;
126 #if defined(PMAP_PREFER)
127 int ubc_nqueues;
128 #define UBC_NQUEUES ubc_nqueues
129 #else
130 #define UBC_NQUEUES 1
131 #endif
132
133 #if defined(UBC_STATS)
134
135 #define UBC_EVCNT_DEFINE(name) \
136 struct evcnt ubc_evcnt_##name = \
137 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
138 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
139 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
140
141 #else /* defined(UBC_STATS) */
142
143 #define UBC_EVCNT_DEFINE(name) /* nothing */
144 #define UBC_EVCNT_INCR(name) /* nothing */
145
146 #endif /* defined(UBC_STATS) */
147
148 UBC_EVCNT_DEFINE(wincachehit)
149 UBC_EVCNT_DEFINE(wincachemiss)
150 UBC_EVCNT_DEFINE(faultbusy)
151
152 /*
153 * ubc_init
154 *
155 * init pager private data structures.
156 */
157
158 void
159 ubc_init(void)
160 {
161 /*
162 * Make sure ubc_winshift is sane.
163 */
164 if (ubc_winshift < PAGE_SHIFT)
165 ubc_winshift = PAGE_SHIFT;
166 ubc_winsize = 1 << ubc_winshift;
167
168 /*
169 * init ubc_object.
170 * alloc and init ubc_map's.
171 * init inactive queues.
172 * alloc and init hashtable.
173 * map in ubc_object.
174 */
175
176 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
177
178 ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
179 KM_SLEEP);
180 if (ubc_object.umap == NULL)
181 panic("ubc_init: failed to allocate ubc_map");
182
183 vaddr_t va = (vaddr_t)1L;
184 #ifdef PMAP_PREFER
185 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
186 ubc_nqueues = va >> ubc_winshift;
187 if (ubc_nqueues == 0) {
188 ubc_nqueues = 1;
189 }
190 #endif
191 ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
192 sizeof(struct ubc_inactive_head), KM_SLEEP);
193 for (int i = 0; i < UBC_NQUEUES; i++) {
194 TAILQ_INIT(&ubc_object.inactive[i]);
195 }
196 for (int i = 0; i < ubc_nwins; i++) {
197 struct ubc_map *umap;
198 umap = &ubc_object.umap[i];
199 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
200 umap, inactive);
201 }
202
203 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
204 &ubc_object.hashmask);
205 for (int i = 0; i <= ubc_object.hashmask; i++) {
206 LIST_INIT(&ubc_object.hash[i]);
207 }
208
209 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
210 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
211 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
212 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
213 panic("ubc_init: failed to map ubc_object");
214 }
215 }
216
217 void
218 ubchist_init(void)
219 {
220
221 UVMHIST_INIT(ubchist, 300);
222 }
223
224 /*
225 * ubc_fault_page: helper of ubc_fault to handle a single page.
226 *
227 * => Caller has UVM object locked.
228 * => Caller will perform pmap_update().
229 */
230
231 static inline int
232 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
233 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
234 {
235 vm_prot_t mask;
236 int error;
237 bool rdonly;
238
239 KASSERT(rw_write_held(pg->uobject->vmobjlock));
240
241 KASSERT((pg->flags & PG_FAKE) == 0);
242 if (pg->flags & PG_RELEASED) {
243 uvm_pagefree(pg);
244 return 0;
245 }
246 if (pg->loan_count != 0) {
247
248 /*
249 * Avoid unneeded loan break, if possible.
250 */
251
252 if ((access_type & VM_PROT_WRITE) == 0) {
253 prot &= ~VM_PROT_WRITE;
254 }
255 if (prot & VM_PROT_WRITE) {
256 struct vm_page *newpg;
257
258 newpg = uvm_loanbreak(pg);
259 if (newpg == NULL) {
260 uvm_page_unbusy(&pg, 1);
261 return ENOMEM;
262 }
263 pg = newpg;
264 }
265 }
266
267 /*
268 * Note that a page whose backing store is partially allocated
269 * is marked as PG_RDONLY.
270 *
271 * it's a responsibility of ubc_alloc's caller to allocate backing
272 * blocks before writing to the window.
273 */
274
275 KASSERT((pg->flags & PG_RDONLY) == 0 ||
276 (access_type & VM_PROT_WRITE) == 0 ||
277 pg->offset < umap->writeoff ||
278 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
279
280 rdonly = uvm_pagereadonly_p(pg);
281 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
282
283 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
284 prot & mask, PMAP_CANFAIL | (access_type & mask));
285
286 uvm_pagelock(pg);
287 uvm_pageactivate(pg);
288 uvm_pagewakeup(pg);
289 uvm_pageunlock(pg);
290 pg->flags &= ~PG_BUSY;
291 UVM_PAGE_OWN(pg, NULL);
292
293 return error;
294 }
295
296 /*
297 * ubc_fault: fault routine for ubc mapping
298 */
299
300 static int
301 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
302 int ign3, int ign4, vm_prot_t access_type, int flags)
303 {
304 struct uvm_object *uobj;
305 struct ubc_map *umap;
306 vaddr_t va, eva, ubc_offset, slot_offset;
307 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
308 int i, error, npages;
309 vm_prot_t prot;
310
311 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
312
313 /*
314 * no need to try with PGO_LOCKED...
315 * we don't need to have the map locked since we know that
316 * no one will mess with it until our reference is released.
317 */
318
319 if (flags & PGO_LOCKED) {
320 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
321 flags &= ~PGO_LOCKED;
322 }
323
324 va = ufi->orig_rvaddr;
325 ubc_offset = va - (vaddr_t)ubc_object.kva;
326 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
327 KASSERT(umap->refcount != 0);
328 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
329 slot_offset = ubc_offset & (ubc_winsize - 1);
330
331 /*
332 * some platforms cannot write to individual bytes atomically, so
333 * software has to do read/modify/write of larger quantities instead.
334 * this means that the access_type for "write" operations
335 * can be VM_PROT_READ, which confuses us mightily.
336 *
337 * deal with this by resetting access_type based on the info
338 * that ubc_alloc() stores for us.
339 */
340
341 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
342 UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
343 va, ubc_offset, access_type, 0);
344
345 if ((access_type & VM_PROT_WRITE) != 0) {
346 #ifndef PRIxOFF /* XXX */
347 #define PRIxOFF "jx" /* XXX */
348 #endif /* XXX */
349 KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
350 "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
351 slot_offset, (intmax_t)umap->writeoff);
352 KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
353 "out of range write: slot=%#"PRIxVADDR
354 " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
355 slot_offset, (intmax_t)umap->writeoff, umap->writelen);
356 }
357
358 /* no umap locking needed since we have a ref on the umap */
359 uobj = umap->uobj;
360
361 if ((access_type & VM_PROT_WRITE) == 0) {
362 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
363 } else {
364 npages = (round_page(umap->offset + umap->writeoff +
365 umap->writelen) - (umap->offset + slot_offset))
366 >> PAGE_SHIFT;
367 flags |= PGO_PASTEOF;
368 }
369
370 again:
371 memset(pgs, 0, sizeof (pgs));
372 rw_enter(uobj->vmobjlock, RW_WRITER);
373
374 UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
375 slot_offset, umap->writeoff, umap->writelen, 0);
376 UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
377 (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
378
379 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
380 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
381 PGO_NOTIMESTAMP);
382 UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
383 0);
384
385 if (error == EAGAIN) {
386 kpause("ubc_fault", false, hz >> 2, NULL);
387 goto again;
388 }
389 if (error) {
390 return error;
391 }
392
393 /*
394 * For virtually-indexed, virtually-tagged caches we should avoid
395 * creating writable mappings when we do not absolutely need them,
396 * since the "compatible alias" trick does not work on such caches.
397 * Otherwise, we can always map the pages writable.
398 */
399
400 #ifdef PMAP_CACHE_VIVT
401 prot = VM_PROT_READ | access_type;
402 #else
403 prot = VM_PROT_READ | VM_PROT_WRITE;
404 #endif
405
406 va = ufi->orig_rvaddr;
407 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
408
409 UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
410
411 /*
412 * Note: normally all returned pages would have the same UVM object.
413 * However, layered file-systems and e.g. tmpfs, may return pages
414 * which belong to underlying UVM object. In such case, lock is
415 * shared amongst the objects.
416 */
417 rw_enter(uobj->vmobjlock, RW_WRITER);
418 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
419 struct vm_page *pg;
420
421 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
422 0, 0);
423 pg = pgs[i];
424
425 if (pg == NULL || pg == PGO_DONTCARE) {
426 continue;
427 }
428 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
429 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
430 if (error) {
431 /*
432 * Flush (there might be pages entered), drop the lock,
433 * and perform uvm_wait(). Note: page will re-fault.
434 */
435 pmap_update(ufi->orig_map->pmap);
436 rw_exit(uobj->vmobjlock);
437 uvm_wait("ubc_fault");
438 rw_enter(uobj->vmobjlock, RW_WRITER);
439 }
440 }
441 /* Must make VA visible before the unlock. */
442 pmap_update(ufi->orig_map->pmap);
443 rw_exit(uobj->vmobjlock);
444
445 return 0;
446 }
447
448 /*
449 * local functions
450 */
451
452 static struct ubc_map *
453 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
454 {
455 struct ubc_map *umap;
456
457 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
458 if (umap->uobj == uobj && umap->offset == offset) {
459 return umap;
460 }
461 }
462 return NULL;
463 }
464
465
466 /*
467 * ubc interface functions
468 */
469
470 /*
471 * ubc_alloc: allocate a file mapping window
472 */
473
474 static void * __noinline
475 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
476 int flags, struct vm_page **pgs, int *npagesp)
477 {
478 vaddr_t slot_offset, va;
479 struct ubc_map *umap;
480 voff_t umap_offset;
481 int error;
482 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
483
484 UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
485 (uintptr_t)uobj, offset, *lenp, 0);
486
487 KASSERT(*lenp > 0);
488 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
489 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
490 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
491 KASSERT(*lenp > 0);
492
493 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
494 again:
495 /*
496 * The UVM object is already referenced.
497 * Lock order: UBC object -> ubc_map::uobj.
498 */
499 umap = ubc_find_mapping(uobj, umap_offset);
500 if (umap == NULL) {
501 struct uvm_object *oobj;
502
503 UBC_EVCNT_INCR(wincachemiss);
504 umap = TAILQ_FIRST(UBC_QUEUE(offset));
505 if (umap == NULL) {
506 rw_exit(ubc_object.uobj.vmobjlock);
507 kpause("ubc_alloc", false, hz >> 2, NULL);
508 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
509 goto again;
510 }
511
512 va = UBC_UMAP_ADDR(umap);
513 oobj = umap->uobj;
514
515 /*
516 * Remove from old hash (if any), add to new hash.
517 */
518
519 if (oobj != NULL) {
520 /*
521 * Mapping must be removed before the list entry,
522 * since there is a race with ubc_purge().
523 */
524 if (umap->flags & UMAP_MAPPING_CACHED) {
525 umap->flags &= ~UMAP_MAPPING_CACHED;
526 rw_enter(oobj->vmobjlock, RW_WRITER);
527 pmap_remove(pmap_kernel(), va,
528 va + ubc_winsize);
529 pmap_update(pmap_kernel());
530 rw_exit(oobj->vmobjlock);
531 }
532 LIST_REMOVE(umap, hash);
533 LIST_REMOVE(umap, list);
534 } else {
535 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
536 }
537 umap->uobj = uobj;
538 umap->offset = umap_offset;
539 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
540 umap, hash);
541 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
542 } else {
543 UBC_EVCNT_INCR(wincachehit);
544 va = UBC_UMAP_ADDR(umap);
545 }
546
547 if (umap->refcount == 0) {
548 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
549 }
550
551 if (flags & UBC_WRITE) {
552 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
553 "ubc_alloc: concurrent writes to uobj %p", uobj);
554 umap->writeoff = slot_offset;
555 umap->writelen = *lenp;
556 }
557
558 umap->refcount++;
559 umap->advice = advice;
560 rw_exit(ubc_object.uobj.vmobjlock);
561 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
562 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
563
564 if (flags & UBC_FAULTBUSY) {
565 int npages = (*lenp + (offset & (PAGE_SIZE - 1)) +
566 PAGE_SIZE - 1) >> PAGE_SHIFT;
567 int gpflags =
568 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
569 PGO_NOTIMESTAMP;
570 int i;
571 KDASSERT(flags & UBC_WRITE);
572 KASSERT(npages <= *npagesp);
573 KASSERT(umap->refcount == 1);
574
575 UBC_EVCNT_INCR(faultbusy);
576 again_faultbusy:
577 rw_enter(uobj->vmobjlock, RW_WRITER);
578 if (umap->flags & UMAP_MAPPING_CACHED) {
579 umap->flags &= ~UMAP_MAPPING_CACHED;
580 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
581 }
582 memset(pgs, 0, *npagesp * sizeof(pgs[0]));
583
584 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
585 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
586 UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
587 if (error) {
588 /*
589 * Flush: the mapping above might have been removed.
590 */
591 pmap_update(pmap_kernel());
592 goto out;
593 }
594 for (i = 0; i < npages; i++) {
595 struct vm_page *pg = pgs[i];
596
597 KASSERT(pg->uobject == uobj);
598 if (pg->loan_count != 0) {
599 rw_enter(uobj->vmobjlock, RW_WRITER);
600 if (pg->loan_count != 0) {
601 pg = uvm_loanbreak(pg);
602 }
603 if (pg == NULL) {
604 pmap_kremove(va, ubc_winsize);
605 pmap_update(pmap_kernel());
606 uvm_page_unbusy(pgs, npages);
607 rw_exit(uobj->vmobjlock);
608 uvm_wait("ubc_alloc");
609 goto again_faultbusy;
610 }
611 rw_exit(uobj->vmobjlock);
612 pgs[i] = pg;
613 }
614 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
615 VM_PAGE_TO_PHYS(pg),
616 VM_PROT_READ | VM_PROT_WRITE, 0);
617 }
618 pmap_update(pmap_kernel());
619 umap->flags |= UMAP_PAGES_LOCKED;
620 *npagesp = npages;
621 } else {
622 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
623 }
624
625 out:
626 return (void *)(va + slot_offset);
627 }
628
629 /*
630 * ubc_release: free a file mapping window.
631 */
632
633 static void __noinline
634 ubc_release(void *va, int flags, struct vm_page **pgs, int npages)
635 {
636 struct ubc_map *umap;
637 struct uvm_object *uobj;
638 vaddr_t umapva;
639 bool unmapped;
640 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
641
642 UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
643 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
644 umapva = UBC_UMAP_ADDR(umap);
645 uobj = umap->uobj;
646 KASSERT(uobj != NULL);
647
648 if (umap->flags & UMAP_PAGES_LOCKED) {
649 const voff_t endoff = umap->writeoff + umap->writelen;
650 const voff_t zerolen = round_page(endoff) - endoff;
651
652 KASSERT(npages == (round_page(endoff) -
653 trunc_page(umap->writeoff)) >> PAGE_SHIFT);
654 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
655 if (zerolen) {
656 memset((char *)umapva + endoff, 0, zerolen);
657 }
658 umap->flags &= ~UMAP_PAGES_LOCKED;
659 rw_enter(uobj->vmobjlock, RW_WRITER);
660 for (u_int i = 0; i < npages; i++) {
661 struct vm_page *pg = pgs[i];
662 #ifdef DIAGNOSTIC
663 paddr_t pa;
664 bool rv;
665 rv = pmap_extract(pmap_kernel(), umapva +
666 umap->writeoff + (i << PAGE_SHIFT), &pa);
667 KASSERT(rv);
668 KASSERT(PHYS_TO_VM_PAGE(pa) == pg);
669 #endif
670 pg->flags &= ~PG_FAKE;
671 KASSERTMSG(uvm_pagegetdirty(pg) ==
672 UVM_PAGE_STATUS_DIRTY,
673 "page %p not dirty", pg);
674 KASSERT(pg->loan_count == 0);
675 if (uvmpdpol_pageactivate_p(pg)) {
676 uvm_pagelock(pg);
677 uvm_pageactivate(pg);
678 uvm_pageunlock(pg);
679 }
680 }
681 pmap_kremove(umapva, ubc_winsize);
682 pmap_update(pmap_kernel());
683 uvm_page_unbusy(pgs, npages);
684 rw_exit(uobj->vmobjlock);
685 unmapped = true;
686 } else {
687 unmapped = false;
688 }
689
690 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
691 umap->writeoff = 0;
692 umap->writelen = 0;
693 umap->refcount--;
694 if (umap->refcount == 0) {
695 if (flags & UBC_UNMAP) {
696 /*
697 * Invalidate any cached mappings if requested.
698 * This is typically used to avoid leaving
699 * incompatible cache aliases around indefinitely.
700 */
701 rw_enter(uobj->vmobjlock, RW_WRITER);
702 pmap_remove(pmap_kernel(), umapva,
703 umapva + ubc_winsize);
704 pmap_update(pmap_kernel());
705 rw_exit(uobj->vmobjlock);
706
707 umap->flags &= ~UMAP_MAPPING_CACHED;
708 LIST_REMOVE(umap, hash);
709 LIST_REMOVE(umap, list);
710 umap->uobj = NULL;
711 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
712 inactive);
713 } else {
714 if (!unmapped) {
715 umap->flags |= UMAP_MAPPING_CACHED;
716 }
717 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
718 inactive);
719 }
720 }
721 UVMHIST_LOG(ubchist, "umap %#jx refs %jd", (uintptr_t)umap,
722 umap->refcount, 0, 0);
723 rw_exit(ubc_object.uobj.vmobjlock);
724 }
725
726 /*
727 * ubc_uiomove: move data to/from an object.
728 */
729
730 int
731 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
732 int flags)
733 {
734 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
735 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
736 voff_t off;
737 int error, npages;
738
739 KASSERT(todo <= uio->uio_resid);
740 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
741 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
742
743 #ifdef UBC_USE_PMAP_DIRECT
744 /*
745 * during direct access pages need to be held busy to prevent them
746 * changing identity, and therefore if we read or write an object
747 * into a mapped view of same we could deadlock while faulting.
748 *
749 * avoid the problem by disallowing direct access if the object
750 * might be visible somewhere via mmap().
751 *
752 * XXX concurrent reads cause thundering herd issues with PG_BUSY.
753 * In the future enable by default for writes or if ncpu<=2, and
754 * make the toggle override that.
755 */
756 if ((ubc_direct && (flags & UBC_ISMAPPED) == 0) ||
757 (flags & UBC_FAULTBUSY) != 0) {
758 return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
759 }
760 #endif
761
762 off = uio->uio_offset;
763 error = 0;
764 while (todo > 0) {
765 vsize_t bytelen = todo;
766 void *win;
767
768 npages = __arraycount(pgs);
769 win = ubc_alloc(uobj, off, &bytelen, advice, flags, pgs,
770 &npages);
771 if (error == 0) {
772 error = uiomove(win, bytelen, uio);
773 }
774 if (error != 0 && overwrite) {
775 /*
776 * if we haven't initialized the pages yet,
777 * do it now. it's safe to use memset here
778 * because we just mapped the pages above.
779 */
780 printf("%s: error=%d\n", __func__, error);
781 memset(win, 0, bytelen);
782 }
783 ubc_release(win, flags, pgs, npages);
784 off += bytelen;
785 todo -= bytelen;
786 if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
787 break;
788 }
789 }
790
791 return error;
792 }
793
794 /*
795 * ubc_zerorange: set a range of bytes in an object to zero.
796 */
797
798 void
799 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
800 {
801 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
802 int npages;
803
804 #ifdef UBC_USE_PMAP_DIRECT
805 if (ubc_direct || (flags & UBC_FAULTBUSY) != 0) {
806 ubc_zerorange_direct(uobj, off, len, flags);
807 return;
808 }
809 #endif
810
811 /*
812 * XXXUBC invent kzero() and use it
813 */
814
815 while (len) {
816 void *win;
817 vsize_t bytelen = len;
818
819 npages = __arraycount(pgs);
820 win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE,
821 pgs, &npages);
822 memset(win, 0, bytelen);
823 ubc_release(win, flags, pgs, npages);
824
825 off += bytelen;
826 len -= bytelen;
827 }
828 }
829
830 #ifdef UBC_USE_PMAP_DIRECT
831 /* Copy data using direct map */
832
833 /*
834 * ubc_alloc_direct: allocate a file mapping window using direct map
835 */
836 static int __noinline
837 ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
838 int advice, int flags, struct vm_page **pgs, int *npages)
839 {
840 voff_t pgoff;
841 int error;
842 int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
843 int access_type = VM_PROT_READ;
844 UVMHIST_FUNC("ubc_alloc_direct"); UVMHIST_CALLED(ubchist);
845
846 if (flags & UBC_WRITE) {
847 if (flags & UBC_FAULTBUSY)
848 gpflags |= PGO_OVERWRITE | PGO_NOBLOCKALLOC;
849 #if 0
850 KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
851 #endif
852
853 /*
854 * Tell genfs_getpages() we already have the journal lock,
855 * allow allocation past current EOF.
856 */
857 gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
858 access_type |= VM_PROT_WRITE;
859 } else {
860 /* Don't need the empty blocks allocated, PG_RDONLY is okay */
861 gpflags |= PGO_NOBLOCKALLOC;
862 }
863
864 pgoff = (offset & PAGE_MASK);
865 *lenp = MIN(*lenp, ubc_winsize - pgoff);
866
867 again:
868 *npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
869 KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
870 KASSERT(*lenp + pgoff <= ubc_winsize);
871 memset(pgs, 0, *npages * sizeof(pgs[0]));
872
873 rw_enter(uobj->vmobjlock, RW_WRITER);
874 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
875 npages, 0, access_type, advice, gpflags);
876 UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
877 if (error) {
878 if (error == EAGAIN) {
879 kpause("ubc_alloc_directg", false, hz >> 2, NULL);
880 goto again;
881 }
882 return error;
883 }
884
885 rw_enter(uobj->vmobjlock, RW_WRITER);
886 for (int i = 0; i < *npages; i++) {
887 struct vm_page *pg = pgs[i];
888
889 KASSERT(pg != NULL);
890 KASSERT(pg != PGO_DONTCARE);
891 KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
892 KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
893
894 /* Avoid breaking loan if possible, only do it on write */
895 if ((flags & UBC_WRITE) && pg->loan_count != 0) {
896 pg = uvm_loanbreak(pg);
897 if (pg == NULL) {
898 uvm_page_unbusy(pgs, *npages);
899 rw_exit(uobj->vmobjlock);
900 uvm_wait("ubc_alloc_directl");
901 goto again;
902 }
903 pgs[i] = pg;
904 }
905
906 /* Page must be writable by now */
907 KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
908
909 /*
910 * XXX For aobj pages. No managed mapping - mark the page
911 * dirty.
912 */
913 if ((flags & UBC_WRITE) != 0) {
914 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
915 }
916 }
917 rw_exit(uobj->vmobjlock);
918
919 return 0;
920 }
921
922 static void __noinline
923 ubc_direct_release(struct uvm_object *uobj,
924 int flags, struct vm_page **pgs, int npages)
925 {
926 rw_enter(uobj->vmobjlock, RW_WRITER);
927 for (int i = 0; i < npages; i++) {
928 struct vm_page *pg = pgs[i];
929
930 pg->flags &= ~PG_BUSY;
931 UVM_PAGE_OWN(pg, NULL);
932 if (pg->flags & PG_RELEASED) {
933 pg->flags &= ~PG_RELEASED;
934 uvm_pagefree(pg);
935 continue;
936 }
937
938 if (uvm_pagewanted_p(pg) || uvmpdpol_pageactivate_p(pg)) {
939 uvm_pagelock(pg);
940 uvm_pageactivate(pg);
941 uvm_pagewakeup(pg);
942 uvm_pageunlock(pg);
943 }
944
945 /* Page was changed, no longer fake and neither clean. */
946 if (flags & UBC_WRITE) {
947 KASSERTMSG(uvm_pagegetdirty(pg) ==
948 UVM_PAGE_STATUS_DIRTY,
949 "page %p not dirty", pg);
950 pg->flags &= ~PG_FAKE;
951 }
952 }
953 rw_exit(uobj->vmobjlock);
954 }
955
956 static int
957 ubc_uiomove_process(void *win, size_t len, void *arg)
958 {
959 struct uio *uio = (struct uio *)arg;
960
961 return uiomove(win, len, uio);
962 }
963
964 static int
965 ubc_zerorange_process(void *win, size_t len, void *arg)
966 {
967 memset(win, 0, len);
968 return 0;
969 }
970
971 static int __noinline
972 ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
973 int flags)
974 {
975 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
976 voff_t off;
977 int error, npages;
978 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
979
980 KASSERT(todo <= uio->uio_resid);
981 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
982 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
983
984 off = uio->uio_offset;
985 error = 0;
986 while (todo > 0) {
987 vsize_t bytelen = todo;
988
989 error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
990 pgs, &npages);
991 if (error != 0) {
992 /* can't do anything, failed to get the pages */
993 break;
994 }
995
996 if (error == 0) {
997 error = uvm_direct_process(pgs, npages, off, bytelen,
998 ubc_uiomove_process, uio);
999 }
1000
1001 if (overwrite) {
1002 voff_t endoff;
1003
1004 /*
1005 * if we haven't initialized the pages yet due to an
1006 * error above, do it now.
1007 */
1008 if (error != 0) {
1009 printf("%s: error=%d\n", __func__, error);
1010 (void) uvm_direct_process(pgs, npages, off,
1011 bytelen, ubc_zerorange_process, NULL);
1012 }
1013
1014 off += bytelen;
1015 todo -= bytelen;
1016 endoff = off & (PAGE_SIZE - 1);
1017
1018 /*
1019 * zero out the remaining portion of the final page
1020 * (if any).
1021 */
1022 if (todo == 0 && endoff != 0) {
1023 vsize_t zlen = PAGE_SIZE - endoff;
1024 (void) uvm_direct_process(pgs + npages - 1, 1,
1025 off, zlen, ubc_zerorange_process, NULL);
1026 }
1027 } else {
1028 off += bytelen;
1029 todo -= bytelen;
1030 }
1031
1032 ubc_direct_release(uobj, flags, pgs, npages);
1033
1034 if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
1035 break;
1036 }
1037 }
1038
1039 return error;
1040 }
1041
1042 static void __noinline
1043 ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
1044 {
1045 int error, npages;
1046 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
1047
1048 flags |= UBC_WRITE;
1049
1050 error = 0;
1051 while (todo > 0) {
1052 vsize_t bytelen = todo;
1053
1054 error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
1055 flags, pgs, &npages);
1056 if (error != 0) {
1057 /* can't do anything, failed to get the pages */
1058 break;
1059 }
1060
1061 error = uvm_direct_process(pgs, npages, off, bytelen,
1062 ubc_zerorange_process, NULL);
1063
1064 ubc_direct_release(uobj, flags, pgs, npages);
1065
1066 off += bytelen;
1067 todo -= bytelen;
1068 }
1069 }
1070
1071 #endif /* UBC_USE_PMAP_DIRECT */
1072
1073 /*
1074 * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
1075 */
1076
1077 void
1078 ubc_purge(struct uvm_object *uobj)
1079 {
1080 struct ubc_map *umap;
1081 vaddr_t va;
1082
1083 KASSERT(uobj->uo_npages == 0);
1084
1085 /*
1086 * Safe to check without lock held, as ubc_alloc() removes
1087 * the mapping and list entry in the correct order.
1088 */
1089 if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
1090 return;
1091 }
1092 rw_enter(ubc_object.uobj.vmobjlock, RW_WRITER);
1093 while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
1094 KASSERT(umap->refcount == 0);
1095 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
1096 KASSERT(!pmap_extract(pmap_kernel(),
1097 va + UBC_UMAP_ADDR(umap), NULL));
1098 }
1099 LIST_REMOVE(umap, list);
1100 LIST_REMOVE(umap, hash);
1101 umap->flags &= ~UMAP_MAPPING_CACHED;
1102 umap->uobj = NULL;
1103 }
1104 rw_exit(ubc_object.uobj.vmobjlock);
1105 }
1106