uvm_bio.c revision 1.73 1 /* $NetBSD: uvm_bio.c,v 1.73 2011/06/12 03:36:02 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.73 2011/06/12 03:36:02 rmind Exp $");
38
39 #include "opt_uvmhist.h"
40 #include "opt_ubc.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48
49 #include <uvm/uvm.h>
50
51 /*
52 * global data structures
53 */
54
55 /*
56 * local functions
57 */
58
59 static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
60 int, int, vm_prot_t, int);
61 static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
62
63 /*
64 * local data structues
65 */
66
67 #define UBC_HASH(uobj, offset) \
68 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
69 ubc_object.hashmask)
70
71 #define UBC_QUEUE(offset) \
72 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
73 (UBC_NQUEUES - 1)])
74
75 #define UBC_UMAP_ADDR(u) \
76 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
77
78
79 #define UMAP_PAGES_LOCKED 0x0001
80 #define UMAP_MAPPING_CACHED 0x0002
81
82 struct ubc_map {
83 struct uvm_object * uobj; /* mapped object */
84 voff_t offset; /* offset into uobj */
85 voff_t writeoff; /* write offset */
86 vsize_t writelen; /* write len */
87 int refcount; /* refcount on mapping */
88 int flags; /* extra state */
89 int advice;
90
91 LIST_ENTRY(ubc_map) hash; /* hash table */
92 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
93 LIST_ENTRY(ubc_map) list; /* per-object list */
94 };
95
96 static struct ubc_object {
97 struct uvm_object uobj; /* glue for uvm_map() */
98 char *kva; /* where ubc_object is mapped */
99 struct ubc_map *umap; /* array of ubc_map's */
100
101 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
102 u_long hashmask; /* mask for hashtable */
103
104 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
105 /* inactive queues for ubc_map's */
106 } ubc_object;
107
108 const struct uvm_pagerops ubc_pager = {
109 .pgo_fault = ubc_fault,
110 /* ... rest are NULL */
111 };
112
113 int ubc_nwins = UBC_NWINS;
114 int ubc_winshift = UBC_WINSHIFT;
115 int ubc_winsize;
116 #if defined(PMAP_PREFER)
117 int ubc_nqueues;
118 #define UBC_NQUEUES ubc_nqueues
119 #else
120 #define UBC_NQUEUES 1
121 #endif
122
123 #if defined(UBC_STATS)
124
125 #define UBC_EVCNT_DEFINE(name) \
126 struct evcnt ubc_evcnt_##name = \
127 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
128 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
129 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
130
131 #else /* defined(UBC_STATS) */
132
133 #define UBC_EVCNT_DEFINE(name) /* nothing */
134 #define UBC_EVCNT_INCR(name) /* nothing */
135
136 #endif /* defined(UBC_STATS) */
137
138 UBC_EVCNT_DEFINE(wincachehit)
139 UBC_EVCNT_DEFINE(wincachemiss)
140 UBC_EVCNT_DEFINE(faultbusy)
141
142 /*
143 * ubc_init
144 *
145 * init pager private data structures.
146 */
147
148 void
149 ubc_init(void)
150 {
151 struct ubc_map *umap;
152 vaddr_t va;
153 int i;
154
155 /*
156 * Make sure ubc_winshift is sane.
157 */
158 if (ubc_winshift < PAGE_SHIFT)
159 ubc_winshift = PAGE_SHIFT;
160
161 /*
162 * init ubc_object.
163 * alloc and init ubc_map's.
164 * init inactive queues.
165 * alloc and init hashtable.
166 * map in ubc_object.
167 */
168
169 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
170
171 ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
172 KM_SLEEP);
173 if (ubc_object.umap == NULL)
174 panic("ubc_init: failed to allocate ubc_map");
175
176 if (ubc_winshift < PAGE_SHIFT) {
177 ubc_winshift = PAGE_SHIFT;
178 }
179 va = (vaddr_t)1L;
180 #ifdef PMAP_PREFER
181 PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
182 ubc_nqueues = va >> ubc_winshift;
183 if (ubc_nqueues == 0) {
184 ubc_nqueues = 1;
185 }
186 #endif
187 ubc_winsize = 1 << ubc_winshift;
188 ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
189 sizeof(struct ubc_inactive_head), KM_SLEEP);
190 if (ubc_object.inactive == NULL)
191 panic("ubc_init: failed to allocate inactive queue heads");
192 for (i = 0; i < UBC_NQUEUES; i++) {
193 TAILQ_INIT(&ubc_object.inactive[i]);
194 }
195 for (i = 0; i < ubc_nwins; i++) {
196 umap = &ubc_object.umap[i];
197 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
198 umap, inactive);
199 }
200
201 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
202 &ubc_object.hashmask);
203 for (i = 0; i <= ubc_object.hashmask; i++) {
204 LIST_INIT(&ubc_object.hash[i]);
205 }
206
207 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
208 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
209 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
210 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
211 panic("ubc_init: failed to map ubc_object");
212 }
213 UVMHIST_INIT(ubchist, 300);
214 }
215
216 /*
217 * ubc_fault_page: helper of ubc_fault to handle a single page.
218 *
219 * => Caller has UVM object locked.
220 * => Caller will perform pmap_update().
221 */
222
223 static inline int
224 ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
225 struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
226 {
227 struct uvm_object *uobj;
228 vm_prot_t mask;
229 int error;
230 bool rdonly;
231
232 uobj = pg->uobject;
233 KASSERT(mutex_owned(uobj->vmobjlock));
234
235 if (pg->flags & PG_WANTED) {
236 wakeup(pg);
237 }
238 KASSERT((pg->flags & PG_FAKE) == 0);
239 if (pg->flags & PG_RELEASED) {
240 mutex_enter(&uvm_pageqlock);
241 uvm_pagefree(pg);
242 mutex_exit(&uvm_pageqlock);
243 return 0;
244 }
245 if (pg->loan_count != 0) {
246
247 /*
248 * Avoid unneeded loan break, if possible.
249 */
250
251 if ((access_type & VM_PROT_WRITE) == 0) {
252 prot &= ~VM_PROT_WRITE;
253 }
254 if (prot & VM_PROT_WRITE) {
255 struct vm_page *newpg;
256
257 newpg = uvm_loanbreak(pg);
258 if (newpg == NULL) {
259 uvm_page_unbusy(&pg, 1);
260 return ENOMEM;
261 }
262 pg = newpg;
263 }
264 }
265
266 /*
267 * Note that a page whose backing store is partially allocated
268 * is marked as PG_RDONLY.
269 */
270
271 KASSERT((pg->flags & PG_RDONLY) == 0 ||
272 (access_type & VM_PROT_WRITE) == 0 ||
273 pg->offset < umap->writeoff ||
274 pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
275
276 rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
277 (pg->flags & PG_RDONLY) != 0) ||
278 UVM_OBJ_NEEDS_WRITEFAULT(uobj);
279 mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
280
281 error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
282 prot & mask, PMAP_CANFAIL | (access_type & mask));
283
284 mutex_enter(&uvm_pageqlock);
285 uvm_pageactivate(pg);
286 mutex_exit(&uvm_pageqlock);
287 pg->flags &= ~(PG_BUSY|PG_WANTED);
288 UVM_PAGE_OWN(pg, NULL);
289
290 return error;
291 }
292
293 /*
294 * ubc_fault: fault routine for ubc mapping
295 */
296
297 static int
298 ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
299 int ign3, int ign4, vm_prot_t access_type, int flags)
300 {
301 struct uvm_object *uobj;
302 struct ubc_map *umap;
303 vaddr_t va, eva, ubc_offset, slot_offset;
304 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
305 int i, error, npages;
306 vm_prot_t prot;
307
308 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
309
310 /*
311 * no need to try with PGO_LOCKED...
312 * we don't need to have the map locked since we know that
313 * no one will mess with it until our reference is released.
314 */
315
316 if (flags & PGO_LOCKED) {
317 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
318 flags &= ~PGO_LOCKED;
319 }
320
321 va = ufi->orig_rvaddr;
322 ubc_offset = va - (vaddr_t)ubc_object.kva;
323 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
324 KASSERT(umap->refcount != 0);
325 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
326 slot_offset = ubc_offset & (ubc_winsize - 1);
327
328 /*
329 * some platforms cannot write to individual bytes atomically, so
330 * software has to do read/modify/write of larger quantities instead.
331 * this means that the access_type for "write" operations
332 * can be VM_PROT_READ, which confuses us mightily.
333 *
334 * deal with this by resetting access_type based on the info
335 * that ubc_alloc() stores for us.
336 */
337
338 access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
339 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
340 va, ubc_offset, access_type, 0);
341
342 #ifdef DIAGNOSTIC
343 if ((access_type & VM_PROT_WRITE) != 0) {
344 if (slot_offset < trunc_page(umap->writeoff) ||
345 umap->writeoff + umap->writelen <= slot_offset) {
346 panic("ubc_fault: out of range write");
347 }
348 }
349 #endif
350
351 /* no umap locking needed since we have a ref on the umap */
352 uobj = umap->uobj;
353
354 if ((access_type & VM_PROT_WRITE) == 0) {
355 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
356 } else {
357 npages = (round_page(umap->offset + umap->writeoff +
358 umap->writelen) - (umap->offset + slot_offset))
359 >> PAGE_SHIFT;
360 flags |= PGO_PASTEOF;
361 }
362
363 again:
364 memset(pgs, 0, sizeof (pgs));
365 mutex_enter(uobj->vmobjlock);
366
367 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
368 slot_offset, umap->writeoff, umap->writelen, 0);
369 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
370 uobj, umap->offset + slot_offset, npages, 0);
371
372 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
373 &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
374 PGO_NOTIMESTAMP);
375 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
376 0);
377
378 if (error == EAGAIN) {
379 kpause("ubc_fault", false, hz >> 2, NULL);
380 goto again;
381 }
382 if (error) {
383 return error;
384 }
385
386 /*
387 * For virtually-indexed, virtually-tagged caches we should avoid
388 * creating writable mappings when we do not absolutely need them,
389 * since the "compatible alias" trick does not work on such caches.
390 * Otherwise, we can always map the pages writable.
391 */
392
393 #ifdef PMAP_CACHE_VIVT
394 prot = VM_PROT_READ | access_type;
395 #else
396 prot = VM_PROT_READ | VM_PROT_WRITE;
397 #endif
398
399 va = ufi->orig_rvaddr;
400 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
401
402 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
403
404 /*
405 * Note: normally all returned pages would have the same UVM object.
406 * However, layered file-systems and e.g. tmpfs, may return pages
407 * which belong to underlying UVM object. In such case, lock is
408 * shared amongst the objects.
409 */
410 mutex_enter(uobj->vmobjlock);
411 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
412 struct vm_page *pg;
413
414 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
415 pg = pgs[i];
416
417 if (pg == NULL || pg == PGO_DONTCARE) {
418 continue;
419 }
420 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
421 error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
422 if (error) {
423 /*
424 * Flush (there might be pages entered), drop the lock,
425 * and perform uvm_wait(). Note: page will re-fault.
426 */
427 pmap_update(ufi->orig_map->pmap);
428 mutex_exit(uobj->vmobjlock);
429 uvm_wait("ubc_fault");
430 mutex_enter(uobj->vmobjlock);
431 }
432 }
433 /* Must make VA visible before the unlock. */
434 pmap_update(ufi->orig_map->pmap);
435 mutex_exit(uobj->vmobjlock);
436
437 return 0;
438 }
439
440 /*
441 * local functions
442 */
443
444 static struct ubc_map *
445 ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
446 {
447 struct ubc_map *umap;
448
449 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
450 if (umap->uobj == uobj && umap->offset == offset) {
451 return umap;
452 }
453 }
454 return NULL;
455 }
456
457
458 /*
459 * ubc interface functions
460 */
461
462 /*
463 * ubc_alloc: allocate a file mapping window
464 */
465
466 void *
467 ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
468 int flags)
469 {
470 vaddr_t slot_offset, va;
471 struct ubc_map *umap;
472 voff_t umap_offset;
473 int error;
474 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
475
476 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
477 uobj, offset, *lenp, 0);
478
479 KASSERT(*lenp > 0);
480 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
481 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
482 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
483
484 /*
485 * The object is already referenced, so we do not need to add a ref.
486 * Lock order: UBC object -> ubc_map::uobj.
487 */
488 mutex_enter(ubc_object.uobj.vmobjlock);
489 again:
490 umap = ubc_find_mapping(uobj, umap_offset);
491 if (umap == NULL) {
492 struct uvm_object *oobj;
493
494 UBC_EVCNT_INCR(wincachemiss);
495 umap = TAILQ_FIRST(UBC_QUEUE(offset));
496 if (umap == NULL) {
497 kpause("ubc_alloc", false, hz >> 2,
498 ubc_object.uobj.vmobjlock);
499 goto again;
500 }
501
502 va = UBC_UMAP_ADDR(umap);
503 oobj = umap->uobj;
504
505 /*
506 * remove from old hash (if any), add to new hash.
507 */
508
509 if (oobj != NULL) {
510 LIST_REMOVE(umap, hash);
511 LIST_REMOVE(umap, list);
512 if (umap->flags & UMAP_MAPPING_CACHED) {
513 umap->flags &= ~UMAP_MAPPING_CACHED;
514 mutex_enter(oobj->vmobjlock);
515 pmap_remove(pmap_kernel(), va,
516 va + ubc_winsize);
517 pmap_update(pmap_kernel());
518 mutex_exit(oobj->vmobjlock);
519 }
520 } else {
521 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
522 }
523 umap->uobj = uobj;
524 umap->offset = umap_offset;
525 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
526 umap, hash);
527 LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
528 } else {
529 UBC_EVCNT_INCR(wincachehit);
530 va = UBC_UMAP_ADDR(umap);
531 }
532
533 if (umap->refcount == 0) {
534 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
535 }
536
537 if (flags & UBC_WRITE) {
538 KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
539 ("ubc_alloc: concurrent writes to uobj %p", uobj)
540 );
541 umap->writeoff = slot_offset;
542 umap->writelen = *lenp;
543 }
544
545 umap->refcount++;
546 umap->advice = advice;
547 mutex_exit(ubc_object.uobj.vmobjlock);
548 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
549 umap, umap->refcount, va, flags);
550
551 if (flags & UBC_FAULTBUSY) {
552 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
553 struct vm_page *pgs[npages];
554 int gpflags =
555 PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
556 PGO_NOTIMESTAMP;
557 int i;
558 KDASSERT(flags & UBC_WRITE);
559 KASSERT(umap->refcount == 1);
560
561 UBC_EVCNT_INCR(faultbusy);
562 again_faultbusy:
563 mutex_enter(uobj->vmobjlock);
564 if (umap->flags & UMAP_MAPPING_CACHED) {
565 umap->flags &= ~UMAP_MAPPING_CACHED;
566 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
567 }
568 memset(pgs, 0, sizeof(pgs));
569
570 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
571 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
572 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
573 if (error) {
574 goto out;
575 }
576 for (i = 0; i < npages; i++) {
577 struct vm_page *pg = pgs[i];
578
579 KASSERT(pg->uobject == uobj);
580 if (pg->loan_count != 0) {
581 mutex_enter(uobj->vmobjlock);
582 if (pg->loan_count != 0) {
583 pg = uvm_loanbreak(pg);
584 }
585 if (pg == NULL) {
586 pmap_kremove(va, ubc_winsize);
587 pmap_update(pmap_kernel());
588 uvm_page_unbusy(pgs, npages);
589 mutex_exit(uobj->vmobjlock);
590 uvm_wait("ubc_alloc");
591 goto again_faultbusy;
592 }
593 mutex_exit(uobj->vmobjlock);
594 pgs[i] = pg;
595 }
596 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
597 VM_PAGE_TO_PHYS(pg),
598 VM_PROT_READ | VM_PROT_WRITE, 0);
599 }
600 pmap_update(pmap_kernel());
601 umap->flags |= UMAP_PAGES_LOCKED;
602 } else {
603 KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
604 }
605
606 out:
607 return (void *)(va + slot_offset);
608 }
609
610 /*
611 * ubc_release: free a file mapping window.
612 */
613
614 void
615 ubc_release(void *va, int flags)
616 {
617 struct ubc_map *umap;
618 struct uvm_object *uobj;
619 vaddr_t umapva;
620 bool unmapped;
621 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
622
623 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
624 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
625 umapva = UBC_UMAP_ADDR(umap);
626 uobj = umap->uobj;
627 KASSERT(uobj != NULL);
628
629 if (umap->flags & UMAP_PAGES_LOCKED) {
630 const voff_t slot_offset = umap->writeoff;
631 const voff_t endoff = umap->writeoff + umap->writelen;
632 const voff_t zerolen = round_page(endoff) - endoff;
633 const u_int npages = (round_page(endoff) -
634 trunc_page(slot_offset)) >> PAGE_SHIFT;
635 struct vm_page *pgs[npages];
636
637 KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
638 if (zerolen) {
639 memset((char *)umapva + endoff, 0, zerolen);
640 }
641 umap->flags &= ~UMAP_PAGES_LOCKED;
642 mutex_enter(uobj->vmobjlock);
643 mutex_enter(&uvm_pageqlock);
644 for (u_int i = 0; i < npages; i++) {
645 paddr_t pa;
646 bool rv;
647
648 rv = pmap_extract(pmap_kernel(),
649 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
650 KASSERT(rv);
651 pgs[i] = PHYS_TO_VM_PAGE(pa);
652 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
653 KASSERT(pgs[i]->loan_count == 0);
654 uvm_pageactivate(pgs[i]);
655 }
656 mutex_exit(&uvm_pageqlock);
657 pmap_kremove(umapva, ubc_winsize);
658 pmap_update(pmap_kernel());
659 uvm_page_unbusy(pgs, npages);
660 mutex_exit(uobj->vmobjlock);
661 unmapped = true;
662 } else {
663 unmapped = false;
664 }
665
666 mutex_enter(ubc_object.uobj.vmobjlock);
667 umap->writeoff = 0;
668 umap->writelen = 0;
669 umap->refcount--;
670 if (umap->refcount == 0) {
671 if (flags & UBC_UNMAP) {
672 /*
673 * Invalidate any cached mappings if requested.
674 * This is typically used to avoid leaving
675 * incompatible cache aliases around indefinitely.
676 */
677 mutex_enter(uobj->vmobjlock);
678 pmap_remove(pmap_kernel(), umapva,
679 umapva + ubc_winsize);
680 pmap_update(pmap_kernel());
681 mutex_exit(uobj->vmobjlock);
682
683 umap->flags &= ~UMAP_MAPPING_CACHED;
684 LIST_REMOVE(umap, hash);
685 umap->uobj = NULL;
686 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
687 inactive);
688 } else {
689 if (!unmapped) {
690 umap->flags |= UMAP_MAPPING_CACHED;
691 }
692 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
693 inactive);
694 }
695 }
696 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
697 mutex_exit(ubc_object.uobj.vmobjlock);
698 }
699
700 /*
701 * ubc_uiomove: move data to/from an object.
702 */
703
704 int
705 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
706 int flags)
707 {
708 const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
709 voff_t off;
710 int error;
711
712 KASSERT(todo <= uio->uio_resid);
713 KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
714 ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
715
716 off = uio->uio_offset;
717 error = 0;
718 while (todo > 0) {
719 vsize_t bytelen = todo;
720 void *win;
721
722 win = ubc_alloc(uobj, off, &bytelen, advice, flags);
723 if (error == 0) {
724 error = uiomove(win, bytelen, uio);
725 }
726 if (error != 0 && overwrite) {
727 /*
728 * if we haven't initialized the pages yet,
729 * do it now. it's safe to use memset here
730 * because we just mapped the pages above.
731 */
732 printf("%s: error=%d\n", __func__, error);
733 memset(win, 0, bytelen);
734 }
735 ubc_release(win, flags);
736 off += bytelen;
737 todo -= bytelen;
738 if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
739 break;
740 }
741 }
742
743 return error;
744 }
745
746 /*
747 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
748 */
749
750 void
751 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
752 {
753 void *win;
754 int flags;
755
756 /*
757 * XXXUBC invent kzero() and use it
758 */
759
760 while (len) {
761 vsize_t bytelen = len;
762
763 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
764 UBC_WRITE);
765 memset(win, 0, bytelen);
766 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
767 ubc_release(win, flags);
768
769 off += bytelen;
770 len -= bytelen;
771 }
772 }
773
774 /*
775 * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
776 */
777
778 void
779 ubc_purge(struct uvm_object *uobj)
780 {
781 struct ubc_map *umap;
782 vaddr_t va;
783
784 KASSERT(uobj->uo_npages == 0);
785
786 mutex_enter(ubc_object.uobj.vmobjlock);
787 while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
788 KASSERT(umap->refcount == 0);
789 for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
790 KASSERT(!pmap_extract(pmap_kernel(),
791 va + UBC_UMAP_ADDR(umap), NULL));
792 }
793 LIST_REMOVE(umap, list);
794 LIST_REMOVE(umap, hash);
795 umap->flags &= ~UMAP_MAPPING_CACHED;
796 umap->uobj = NULL;
797 }
798 mutex_exit(ubc_object.uobj.vmobjlock);
799 }
800