uvm_bio.c revision 1.94 1 /* $NetBSD: uvm_bio.c,v 1.94 2018/04/20 18:58:10 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.94 2018/04/20 18:58:10 jdolecek Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48
49 #include <uvm/uvm.h>
50
51 /*
52 * global data structures
53 */
54
55 /*
56 * local functions
57 */
58
59 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
60 int, int, vm_prot_t, int);
61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
62
63 /*
64 * local data structues
65 */
66
67 #define UBC_HASH(uobj, offset) \
68 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
69 ubc_object.hashmask)
70
71 #define UBC_QUEUE(offset) \
72 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
73 (UBC_NQUEUES - 1)])
74
75 #define UBC_UMAP_ADDR(u) \
76 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
77
78
79 #define UMAP_PAGES_LOCKED 0x0001
80 #define UMAP_MAPPING_CACHED 0x0002
81
82 struct ubc_map {
83 struct uvm_object * uobj; /* mapped object */
84 voff_t offset; /* offset into uobj */
85 voff_t writeoff; /* write offset */
86 vsize_t writelen; /* write len */
87 int refcount; /* refcount on mapping */
88 int flags; /* extra state */
89 int advice;
90
91 LIST_ENTRY(ubc_map) hash; /* hash table */
92 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
93 LIST_ENTRY(ubc_map) list; /* per-object list */
94 };
95
96 TAILQ_HEAD(ubc_inactive_head, ubc_map);
97 static struct ubc_object {
98 struct uvm_object uobj; /* glue for uvm_map() */
99 char *kva; /* where ubc_object is mapped */
100 struct ubc_map *umap; /* array of ubc_map's */
101
102 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
103 u_long hashmask; /* mask for hashtable */
104
105 struct ubc_inactive_head *inactive;
106 /* inactive queues for ubc_map's */
107 } ubc_object;
108
109 const struct uvm_pagerops ubc_pager = {
110 .pgo_fault = ubc_fault,
111 /* ... rest are NULL */
112 };
113
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift __read_mostly = UBC_WINSHIFT;
116 int ubc_winsize __read_mostly;
117 #if defined(PMAP_PREFER)
118 int ubc_nqueues;
119 #define UBC_NQUEUES ubc_nqueues
120 #else
121 #define UBC_NQUEUES 1
122 #endif
123
124 #if defined(UBC_STATS)
125
126 #define UBC_EVCNT_DEFINE(name) \
127 struct evcnt ubc_evcnt_##name = \
128 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
129 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
130 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
131
132 #else /* defined(UBC_STATS) */
133
134 #define UBC_EVCNT_DEFINE(name) /* nothing */
135 #define UBC_EVCNT_INCR(name) /* nothing */
136
137 #endif /* defined(UBC_STATS) */
138
139 UBC_EVCNT_DEFINE(wincachehit)
140 UBC_EVCNT_DEFINE(wincachemiss)
141 UBC_EVCNT_DEFINE(faultbusy)
142
143 /*
144 * ubc_init
145 *
146 * init pager private data structures.
147 */
148
149 void
150 ubc_init(void)
151 {
152 struct ubc_map *umap;
153 vaddr_t va;
154 int i;
155
156 /*
157 * Make sure ubc_winshift is sane.
158 */
159 if (ubc_winshift < PAGE_SHIFT)
160 ubc_winshift = PAGE_SHIFT;
161
162 /*
163 * init ubc_object.
164 * alloc and init ubc_map's.
165 * init inactive queues.
166 * alloc and init hashtable.
167 * map in ubc_object.
168 */
169
170 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
171
172 ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
173 KM_SLEEP);
174 if (ubc_object.umap == NULL)
175 panic("ubc_init: failed to allocate ubc_map");
176
177 if (ubc_winshift < PAGE_SHIFT) {
178 ubc_winshift = PAGE_SHIFT;
179 }
180 va = (vaddr_t)1L;
181 #ifdef PMAP_PREFER
182 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
183 ubc_nqueues = va >> ubc_winshift;
184 if (ubc_nqueues == 0) {
185 ubc_nqueues = 1;
186 }
187 #endif
188 ubc_winsize = 1 << ubc_winshift;
189 ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
190 sizeof(struct ubc_inactive_head), KM_SLEEP);
191 for (i = 0; i < UBC_NQUEUES; i++) {
192 TAILQ_INIT(&ubc_object.inactive[i]);
193 }
194 for (i = 0; i < ubc_nwins; i++) {
195 umap = &ubc_object.umap[i];
196 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
197 umap, inactive);
198 }
199
200 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
201 &ubc_object.hashmask);
202 for (i = 0; i <= ubc_object.hashmask; i++) {
203 LIST_INIT(&ubc_object.hash[i]);
204 }
205
206 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
207 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
208 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
209 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
210 panic("ubc_init: failed to map ubc_object");
211 }
212 }
213
214 void
215 ubchist_init(void)
216 {
217
218 UVMHIST_INIT(ubchist, 300);
219 }
220
221 /*
222 * ubc_fault_page: helper of ubc_fault to handle a single page.
223 *
224 * => Caller has UVM object locked.
225 * => Caller will perform pmap_update().
226 */
227
228 static inline int
229 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
230 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
231 {
232 struct uvm_object *uobj;
233 vm_prot_t mask;
234 int error;
235 bool rdonly;
236
237 uobj = pg->uobject;
238 KASSERT(mutex_owned(uobj->vmobjlock));
239
240 if (pg->flags & PG_WANTED) {
241 wakeup(pg);
242 }
243 KASSERT((pg->flags & PG_FAKE) == 0);
244 if (pg->flags & PG_RELEASED) {
245 mutex_enter(&uvm_pageqlock);
246 uvm_pagefree(pg);
247 mutex_exit(&uvm_pageqlock);
248 return 0;
249 }
250 if (pg->loan_count != 0) {
251
252 /*
253 * Avoid unneeded loan break, if possible.
254 */
255
256 if ((access_type & VM_PROT_WRITE) == 0) {
257 prot &= ~VM_PROT_WRITE;
258 }
259 if (prot & VM_PROT_WRITE) {
260 struct vm_page *newpg;
261
262 newpg = uvm_loanbreak(pg);
263 if (newpg == NULL) {
264 uvm_page_unbusy(&pg, 1);
265 return ENOMEM;
266 }
267 pg = newpg;
268 }
269 }
270
271 /*
272 * Note that a page whose backing store is partially allocated
273 * is marked as PG_RDONLY.
274 */
275
276 KASSERT((pg->flags & PG_RDONLY) == 0 ||
277 (access_type & VM_PROT_WRITE) == 0 ||
278 pg->offset < umap->writeoff ||
279 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
280
281 rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
282 (pg->flags & PG_RDONLY) != 0) ||
283 UVM_OBJ_NEEDS_WRITEFAULT(uobj);
284 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
285
286 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
287 prot & mask, PMAP_CANFAIL | (access_type & mask));
288
289 mutex_enter(&uvm_pageqlock);
290 uvm_pageactivate(pg);
291 mutex_exit(&uvm_pageqlock);
292 pg->flags &= ~(PG_BUSY|PG_WANTED);
293 UVM_PAGE_OWN(pg, NULL);
294
295 return error;
296 }
297
298 /*
299 * ubc_fault: fault routine for ubc mapping
300 */
301
302 static int
303 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
304 int ign3, int ign4, vm_prot_t access_type, int flags)
305 {
306 struct uvm_object *uobj;
307 struct ubc_map *umap;
308 vaddr_t va, eva, ubc_offset, slot_offset;
309 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
310 int i, error, npages;
311 vm_prot_t prot;
312
313 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
314
315 /*
316 * no need to try with PGO_LOCKED...
317 * we don't need to have the map locked since we know that
318 * no one will mess with it until our reference is released.
319 */
320
321 if (flags & PGO_LOCKED) {
322 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
323 flags &= ~PGO_LOCKED;
324 }
325
326 va = ufi->orig_rvaddr;
327 ubc_offset = va - (vaddr_t)ubc_object.kva;
328 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
329 KASSERT(umap->refcount != 0);
330 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
331 slot_offset = ubc_offset & (ubc_winsize - 1);
332
333 /*
334 * some platforms cannot write to individual bytes atomically, so
335 * software has to do read/modify/write of larger quantities instead.
336 * this means that the access_type for "write" operations
337 * can be VM_PROT_READ, which confuses us mightily.
338 *
339 * deal with this by resetting access_type based on the info
340 * that ubc_alloc() stores for us.
341 */
342
343 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
344 UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
345 va, ubc_offset, access_type, 0);
346
347 if ((access_type & VM_PROT_WRITE) != 0) {
348 #ifndef PRIxOFF /* XXX */
349 #define PRIxOFF "jx" /* XXX */
350 #endif /* XXX */
351 KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
352 "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
353 slot_offset, (intmax_t)umap->writeoff);
354 KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
355 "out of range write: slot=%#"PRIxVADDR
356 " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
357 slot_offset, (intmax_t)umap->writeoff, umap->writelen);
358 }
359
360 /* no umap locking needed since we have a ref on the umap */
361 uobj = umap->uobj;
362
363 if ((access_type & VM_PROT_WRITE) == 0) {
364 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
365 } else {
366 npages = (round_page(umap->offset + umap->writeoff +
367 umap->writelen) - (umap->offset + slot_offset))
368 >> PAGE_SHIFT;
369 flags |= PGO_PASTEOF;
370 }
371
372 again:
373 memset(pgs, 0, sizeof (pgs));
374 mutex_enter(uobj->vmobjlock);
375
376 UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
377 slot_offset, umap->writeoff, umap->writelen, 0);
378 UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
379 (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
380
381 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
382 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
383 PGO_NOTIMESTAMP);
384 UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
385 0);
386
387 if (error == EAGAIN) {
388 kpause("ubc_fault", false, hz >> 2, NULL);
389 goto again;
390 }
391 if (error) {
392 return error;
393 }
394
395 /*
396 * For virtually-indexed, virtually-tagged caches we should avoid
397 * creating writable mappings when we do not absolutely need them,
398 * since the "compatible alias" trick does not work on such caches.
399 * Otherwise, we can always map the pages writable.
400 */
401
402 #ifdef PMAP_CACHE_VIVT
403 prot = VM_PROT_READ | access_type;
404 #else
405 prot = VM_PROT_READ | VM_PROT_WRITE;
406 #endif
407
408 va = ufi->orig_rvaddr;
409 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
410
411 UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
412
413 /*
414 * Note: normally all returned pages would have the same UVM object.
415 * However, layered file-systems and e.g. tmpfs, may return pages
416 * which belong to underlying UVM object. In such case, lock is
417 * shared amongst the objects.
418 */
419 mutex_enter(uobj->vmobjlock);
420 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
421 struct vm_page *pg;
422
423 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
424 0, 0);
425 pg = pgs[i];
426
427 if (pg == NULL || pg == PGO_DONTCARE) {
428 continue;
429 }
430 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
431 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
432 if (error) {
433 /*
434 * Flush (there might be pages entered), drop the lock,
435 * and perform uvm_wait(). Note: page will re-fault.
436 */
437 pmap_update(ufi->orig_map->pmap);
438 mutex_exit(uobj->vmobjlock);
439 uvm_wait("ubc_fault");
440 mutex_enter(uobj->vmobjlock);
441 }
442 }
443 /* Must make VA visible before the unlock. */
444 pmap_update(ufi->orig_map->pmap);
445 mutex_exit(uobj->vmobjlock);
446
447 return 0;
448 }
449
450 /*
451 * local functions
452 */
453
454 static struct ubc_map *
455 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
456 {
457 struct ubc_map *umap;
458
459 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
460 if (umap->uobj == uobj && umap->offset == offset) {
461 return umap;
462 }
463 }
464 return NULL;
465 }
466
467
468 /*
469 * ubc interface functions
470 */
471
472 /*
473 * ubc_alloc: allocate a file mapping window
474 */
475
476 static void * __noinline
477 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
478 int flags)
479 {
480 vaddr_t slot_offset, va;
481 struct ubc_map *umap;
482 voff_t umap_offset;
483 int error;
484 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
485
486 UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
487 (uintptr_t)uobj, offset, *lenp, 0);
488
489 KASSERT(*lenp > 0);
490 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
491 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
492 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
493
494 mutex_enter(ubc_object.uobj.vmobjlock);
495 again:
496 /*
497 * The UVM object is already referenced.
498 * Lock order: UBC object -> ubc_map::uobj.
499 */
500 umap = ubc_find_mapping(uobj, umap_offset);
501 if (umap == NULL) {
502 struct uvm_object *oobj;
503
504 UBC_EVCNT_INCR(wincachemiss);
505 umap = TAILQ_FIRST(UBC_QUEUE(offset));
506 if (umap == NULL) {
507 kpause("ubc_alloc", false, hz >> 2,
508 ubc_object.uobj.vmobjlock);
509 goto again;
510 }
511
512 va = UBC_UMAP_ADDR(umap);
513 oobj = umap->uobj;
514
515 /*
516 * Remove from old hash (if any), add to new hash.
517 */
518
519 if (oobj != NULL) {
520 /*
521 * Mapping must be removed before the list entry,
522 * since there is a race with ubc_purge().
523 */
524 if (umap->flags & UMAP_MAPPING_CACHED) {
525 umap->flags &= ~UMAP_MAPPING_CACHED;
526 mutex_enter(oobj->vmobjlock);
527 pmap_remove(pmap_kernel(), va,
528 va + ubc_winsize);
529 pmap_update(pmap_kernel());
530 mutex_exit(oobj->vmobjlock);
531 }
532 LIST_REMOVE(umap, hash);
533 LIST_REMOVE(umap, list);
534 } else {
535 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
536 }
537 umap->uobj = uobj;
538 umap->offset = umap_offset;
539 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
540 umap, hash);
541 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
542 } else {
543 UBC_EVCNT_INCR(wincachehit);
544 va = UBC_UMAP_ADDR(umap);
545 }
546
547 if (umap->refcount == 0) {
548 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
549 }
550
551 if (flags & UBC_WRITE) {
552 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
553 "ubc_alloc: concurrent writes to uobj %p", uobj);
554 umap->writeoff = slot_offset;
555 umap->writelen = *lenp;
556 }
557
558 umap->refcount++;
559 umap->advice = advice;
560 mutex_exit(ubc_object.uobj.vmobjlock);
561 UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
562 (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
563
564 if (flags & UBC_FAULTBUSY) {
565 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
566 struct vm_page *pgs[npages];
567 int gpflags =
568 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
569 PGO_NOTIMESTAMP;
570 int i;
571 KDASSERT(flags & UBC_WRITE);
572 KASSERT(umap->refcount == 1);
573
574 UBC_EVCNT_INCR(faultbusy);
575 again_faultbusy:
576 mutex_enter(uobj->vmobjlock);
577 if (umap->flags & UMAP_MAPPING_CACHED) {
578 umap->flags &= ~UMAP_MAPPING_CACHED;
579 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
580 }
581 memset(pgs, 0, sizeof(pgs));
582
583 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
584 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
585 UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
586 if (error) {
587 /*
588 * Flush: the mapping above might have been removed.
589 */
590 pmap_update(pmap_kernel());
591 goto out;
592 }
593 for (i = 0; i < npages; i++) {
594 struct vm_page *pg = pgs[i];
595
596 KASSERT(pg->uobject == uobj);
597 if (pg->loan_count != 0) {
598 mutex_enter(uobj->vmobjlock);
599 if (pg->loan_count != 0) {
600 pg = uvm_loanbreak(pg);
601 }
602 if (pg == NULL) {
603 pmap_kremove(va, ubc_winsize);
604 pmap_update(pmap_kernel());
605 uvm_page_unbusy(pgs, npages);
606 mutex_exit(uobj->vmobjlock);
607 uvm_wait("ubc_alloc");
608 goto again_faultbusy;
609 }
610 mutex_exit(uobj->vmobjlock);
611 pgs[i] = pg;
612 }
613 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
614 VM_PAGE_TO_PHYS(pg),
615 VM_PROT_READ | VM_PROT_WRITE, 0);
616 }
617 pmap_update(pmap_kernel());
618 umap->flags |= UMAP_PAGES_LOCKED;
619 } else {
620 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
621 }
622
623 out:
624 return (void *)(va + slot_offset);
625 }
626
627 /*
628 * ubc_release: free a file mapping window.
629 */
630
631 static void __noinline
632 ubc_release(void *va, int flags)
633 {
634 struct ubc_map *umap;
635 struct uvm_object *uobj;
636 vaddr_t umapva;
637 bool unmapped;
638 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
639
640 UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
641 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
642 umapva = UBC_UMAP_ADDR(umap);
643 uobj = umap->uobj;
644 KASSERT(uobj != NULL);
645
646 if (umap->flags & UMAP_PAGES_LOCKED) {
647 const voff_t slot_offset = umap->writeoff;
648 const voff_t endoff = umap->writeoff + umap->writelen;
649 const voff_t zerolen = round_page(endoff) - endoff;
650 const u_int npages = (round_page(endoff) -
651 trunc_page(slot_offset)) >> PAGE_SHIFT;
652 struct vm_page *pgs[npages];
653
654 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
655 if (zerolen) {
656 memset((char *)umapva + endoff, 0, zerolen);
657 }
658 umap->flags &= ~UMAP_PAGES_LOCKED;
659 mutex_enter(uobj->vmobjlock);
660 mutex_enter(&uvm_pageqlock);
661 for (u_int i = 0; i < npages; i++) {
662 paddr_t pa;
663 bool rv __diagused;
664
665 rv = pmap_extract(pmap_kernel(),
666 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
667 KASSERT(rv);
668 pgs[i] = PHYS_TO_VM_PAGE(pa);
669 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
670 KASSERT(pgs[i]->loan_count == 0);
671 uvm_pageactivate(pgs[i]);
672 }
673 mutex_exit(&uvm_pageqlock);
674 pmap_kremove(umapva, ubc_winsize);
675 pmap_update(pmap_kernel());
676 uvm_page_unbusy(pgs, npages);
677 mutex_exit(uobj->vmobjlock);
678 unmapped = true;
679 } else {
680 unmapped = false;
681 }
682
683 mutex_enter(ubc_object.uobj.vmobjlock);
684 umap->writeoff = 0;
685 umap->writelen = 0;
686 umap->refcount--;
687 if (umap->refcount == 0) {
688 if (flags & UBC_UNMAP) {
689 /*
690 * Invalidate any cached mappings if requested.
691 * This is typically used to avoid leaving
692 * incompatible cache aliases around indefinitely.
693 */
694 mutex_enter(uobj->vmobjlock);
695 pmap_remove(pmap_kernel(), umapva,
696 umapva + ubc_winsize);
697 pmap_update(pmap_kernel());
698 mutex_exit(uobj->vmobjlock);
699
700 umap->flags &= ~UMAP_MAPPING_CACHED;
701 LIST_REMOVE(umap, hash);
702 LIST_REMOVE(umap, list);
703 umap->uobj = NULL;
704 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
705 inactive);
706 } else {
707 if (!unmapped) {
708 umap->flags |= UMAP_MAPPING_CACHED;
709 }
710 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
711 inactive);
712 }
713 }
714 UVMHIST_LOG(ubchist, "umap %cw#jxp refs %jd", (uintptr_t)umap,
715 umap->refcount, 0, 0);
716 mutex_exit(ubc_object.uobj.vmobjlock);
717 }
718
719 /*
720 * ubc_uiomove: move data to/from an object.
721 */
722
723 int
724 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
725 int flags)
726 {
727 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
728 voff_t off;
729 int error;
730
731 KASSERT(todo <= uio->uio_resid);
732 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
733 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
734
735 off = uio->uio_offset;
736 error = 0;
737 while (todo > 0) {
738 vsize_t bytelen = todo;
739 void *win;
740
741 win = ubc_alloc(uobj, off, &bytelen, advice, flags);
742 if (error == 0) {
743 error = uiomove(win, bytelen, uio);
744 }
745 if (error != 0 && overwrite) {
746 /*
747 * if we haven't initialized the pages yet,
748 * do it now. it's safe to use memset here
749 * because we just mapped the pages above.
750 */
751 printf("%s: error=%d\n", __func__, error);
752 memset(win, 0, bytelen);
753 }
754 ubc_release(win, flags);
755 off += bytelen;
756 todo -= bytelen;
757 if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
758 break;
759 }
760 }
761
762 return error;
763 }
764
765 /*
766 * ubc_zerorange: set a range of bytes in an object to zero.
767 */
768
769 void
770 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
771 {
772 void *win;
773
774 /*
775 * XXXUBC invent kzero() and use it
776 */
777
778 while (len) {
779 vsize_t bytelen = len;
780
781 win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
782 memset(win, 0, bytelen);
783 ubc_release(win, flags);
784
785 off += bytelen;
786 len -= bytelen;
787 }
788 }
789
790 /*
791 * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
792 */
793
794 void
795 ubc_purge(struct uvm_object *uobj)
796 {
797 struct ubc_map *umap;
798 vaddr_t va;
799
800 KASSERT(uobj->uo_npages == 0);
801
802 /*
803 * Safe to check without lock held, as ubc_alloc() removes
804 * the mapping and list entry in the correct order.
805 */
806 if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
807 return;
808 }
809 mutex_enter(ubc_object.uobj.vmobjlock);
810 while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
811 KASSERT(umap->refcount == 0);
812 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
813 KASSERT(!pmap_extract(pmap_kernel(),
814 va + UBC_UMAP_ADDR(umap), NULL));
815 }
816 LIST_REMOVE(umap, list);
817 LIST_REMOVE(umap, hash);
818 umap->flags &= ~UMAP_MAPPING_CACHED;
819 umap->uobj = NULL;
820 }
821 mutex_exit(ubc_object.uobj.vmobjlock);
822 }
823