Home | History | Annotate | Line # | Download | only in rumpvfs
vm_vfs.c revision 1.34.34.1
      1 /*	$NetBSD: vm_vfs.c,v 1.34.34.1 2021/07/06 04:22:34 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.34.34.1 2021/07/06 04:22:34 martin Exp $");
     30 
     31 #include <sys/param.h>
     32 
     33 #include <sys/buf.h>
     34 #include <sys/vnode.h>
     35 
     36 #include <uvm/uvm.h>
     37 #include <uvm/uvm_readahead.h>
     38 
     39 void
     40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
     41 {
     42 	struct uvm_object *uobj = pgs[0]->uobject;
     43 	struct vm_page *pg;
     44 	int i;
     45 
     46 	mutex_enter(uobj->vmobjlock);
     47 	for (i = 0; i < npages; i++) {
     48 		pg = pgs[i];
     49 		KASSERT((pg->flags & PG_PAGEOUT) == 0 ||
     50 			(pg->flags & PG_FAKE) == 0);
     51 
     52 		if (pg->flags & PG_FAKE) {
     53 			KASSERT(!write);
     54 			pg->flags &= ~PG_FAKE;
     55 			KASSERT((pg->flags & PG_CLEAN) != 0);
     56 			uvm_pageenqueue(pg);
     57 			pmap_clear_modify(pg);
     58 		}
     59 
     60 	}
     61 	uvm_page_unbusy(pgs, npages);
     62 	mutex_exit(uobj->vmobjlock);
     63 }
     64 
     65 /*
     66  * Release resources held during async io.
     67  */
     68 void
     69 uvm_aio_aiodone(struct buf *bp)
     70 {
     71 	struct uvm_object *uobj = NULL;
     72 	int npages = bp->b_bufsize >> PAGE_SHIFT;
     73 	struct vm_page **pgs;
     74 	vaddr_t va;
     75 	int i, error;
     76 	bool write;
     77 
     78 	error = bp->b_error;
     79 	write = BUF_ISWRITE(bp);
     80 
     81 	KASSERT(npages > 0);
     82 	pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
     83 	for (i = 0; i < npages; i++) {
     84 		va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT);
     85 		pgs[i] = uvm_pageratop(va);
     86 
     87 		if (uobj == NULL) {
     88 			uobj = pgs[i]->uobject;
     89 			KASSERT(uobj != NULL);
     90 		} else {
     91 			KASSERT(uobj == pgs[i]->uobject);
     92 		}
     93 	}
     94 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
     95 
     96 	uvm_aio_aiodone_pages(pgs, npages, write, error);
     97 
     98 	if (write && (bp->b_cflags & BC_AGE) != 0) {
     99 		mutex_enter(bp->b_objlock);
    100 		vwakeup(bp);
    101 		mutex_exit(bp->b_objlock);
    102 	}
    103 
    104 	putiobuf(bp);
    105 
    106 	kmem_free(pgs, npages * sizeof(*pgs));
    107 }
    108 
    109 void
    110 uvm_aio_biodone(struct buf *bp)
    111 {
    112 
    113 	uvm_aio_aiodone(bp);
    114 }
    115 
    116 /*
    117  * UBC
    118  */
    119 
    120 #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP)
    121 
    122 void
    123 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    124 {
    125 	struct vm_page **pgs;
    126 	int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
    127 	int npages, i;
    128 
    129 	if (maxpages == 0)
    130 		return;
    131 
    132 	pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
    133 	mutex_enter(uobj->vmobjlock);
    134 	while (len) {
    135 		npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
    136 		memset(pgs, 0, npages * sizeof(struct vm_page *));
    137 		(void)uobj->pgops->pgo_get(uobj, trunc_page(off),
    138 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE,
    139 		    0, PAGERFLAGS | PGO_PASTEOF);
    140 		KASSERT(npages > 0);
    141 
    142 		mutex_enter(uobj->vmobjlock);
    143 		for (i = 0; i < npages; i++) {
    144 			struct vm_page *pg;
    145 			uint8_t *start;
    146 			size_t chunkoff, chunklen;
    147 
    148 			pg = pgs[i];
    149 			if (pg == NULL)
    150 				break;
    151 
    152 			KASSERT(pg->uobject != NULL);
    153 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    154 
    155 			chunkoff = off & PAGE_MASK;
    156 			chunklen = MIN(PAGE_SIZE - chunkoff, len);
    157 			start = (uint8_t *)pg->uanon + chunkoff;
    158 
    159 			memset(start, 0, chunklen);
    160 			pg->flags &= ~PG_CLEAN;
    161 
    162 			off += chunklen;
    163 			len -= chunklen;
    164 		}
    165 		uvm_page_unbusy(pgs, npages);
    166 	}
    167 	mutex_exit(uobj->vmobjlock);
    168 	kmem_free(pgs, maxpages * sizeof(pgs));
    169 }
    170 
    171 #define len2npages(off, len)						\
    172     ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT)
    173 
    174 int
    175 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
    176 	int advice, int flags)
    177 {
    178 	struct vm_page **pgs;
    179 	int npages = len2npages(uio->uio_offset, todo);
    180 	size_t pgalloc;
    181 	int i, rv, pagerflags;
    182 	vm_prot_t prot;
    183 
    184 	pgalloc = npages * sizeof(pgs);
    185 	pgs = kmem_alloc(pgalloc, KM_SLEEP);
    186 
    187 	pagerflags = PAGERFLAGS;
    188 	if (flags & UBC_WRITE)
    189 		pagerflags |= PGO_PASTEOF;
    190 	if (flags & UBC_FAULTBUSY)
    191 		pagerflags |= PGO_OVERWRITE;
    192 
    193 	prot = VM_PROT_READ;
    194 	if (flags & UBC_WRITE)
    195 		prot |= VM_PROT_WRITE;
    196 
    197 	mutex_enter(uobj->vmobjlock);
    198 	do {
    199 		npages = len2npages(uio->uio_offset, todo);
    200 		memset(pgs, 0, pgalloc);
    201 		rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset),
    202 		    pgs, &npages, 0, prot, 0, pagerflags);
    203 		if (rv)
    204 			goto out;
    205 
    206 		mutex_enter(uobj->vmobjlock);
    207 		for (i = 0; i < npages; i++) {
    208 			struct vm_page *pg;
    209 			size_t xfersize;
    210 			off_t pageoff;
    211 
    212 			pg = pgs[i];
    213 			if (pg == NULL)
    214 				break;
    215 
    216 			KASSERT(pg->uobject != NULL);
    217 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    218 			pageoff = uio->uio_offset & PAGE_MASK;
    219 
    220 			xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
    221 			KASSERT(xfersize > 0);
    222 			rv = uiomove((uint8_t *)pg->uanon + pageoff,
    223 			    xfersize, uio);
    224 			if (rv) {
    225 				uvm_page_unbusy(pgs, npages);
    226 				mutex_exit(uobj->vmobjlock);
    227 				goto out;
    228 			}
    229 			if (uio->uio_rw == UIO_WRITE)
    230 				pg->flags &= ~(PG_CLEAN | PG_FAKE);
    231 			todo -= xfersize;
    232 		}
    233 		uvm_page_unbusy(pgs, npages);
    234 	} while (todo);
    235 	mutex_exit(uobj->vmobjlock);
    236 
    237  out:
    238 	kmem_free(pgs, pgalloc);
    239 	return rv;
    240 }
    241