Home | History | Annotate | Line # | Download | only in rumpvfs
vm_vfs.c revision 1.39
      1 /*	$NetBSD: vm_vfs.c,v 1.39 2020/10/18 18:22:29 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.39 2020/10/18 18:22:29 chs Exp $");
     30 
     31 #include <sys/param.h>
     32 
     33 #include <sys/buf.h>
     34 #include <sys/vnode.h>
     35 
     36 #include <uvm/uvm.h>
     37 #include <uvm/uvm_readahead.h>
     38 
     39 void
     40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
     41 {
     42 	struct uvm_object *uobj = pgs[0]->uobject;
     43 	struct vm_page *pg;
     44 	int i;
     45 
     46 	rw_enter(uobj->vmobjlock, RW_WRITER);
     47 	for (i = 0; i < npages; i++) {
     48 		pg = pgs[i];
     49 		KASSERT((pg->flags & PG_FAKE) == 0);
     50 	}
     51 	uvm_page_unbusy(pgs, npages);
     52 	rw_exit(uobj->vmobjlock);
     53 }
     54 
     55 /*
     56  * Release resources held during async io.
     57  */
     58 void
     59 uvm_aio_aiodone(struct buf *bp)
     60 {
     61 	struct uvm_object *uobj = NULL;
     62 	int npages = bp->b_bufsize >> PAGE_SHIFT;
     63 	struct vm_page **pgs;
     64 	vaddr_t va;
     65 	int i, error;
     66 	bool write;
     67 
     68 	error = bp->b_error;
     69 	write = BUF_ISWRITE(bp);
     70 
     71 	KASSERT(npages > 0);
     72 	pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
     73 	for (i = 0; i < npages; i++) {
     74 		va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT);
     75 		pgs[i] = uvm_pageratop(va);
     76 
     77 		if (uobj == NULL) {
     78 			uobj = pgs[i]->uobject;
     79 			KASSERT(uobj != NULL);
     80 		} else {
     81 			KASSERT(uobj == pgs[i]->uobject);
     82 		}
     83 	}
     84 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
     85 
     86 	uvm_aio_aiodone_pages(pgs, npages, write, error);
     87 
     88 	if (write && (bp->b_cflags & BC_AGE) != 0) {
     89 		mutex_enter(bp->b_objlock);
     90 		vwakeup(bp);
     91 		mutex_exit(bp->b_objlock);
     92 	}
     93 
     94 	putiobuf(bp);
     95 
     96 	kmem_free(pgs, npages * sizeof(*pgs));
     97 }
     98 
     99 /*
    100  * UBC
    101  */
    102 
    103 #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP)
    104 
    105 void
    106 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    107 {
    108 	struct vm_page **pgs;
    109 	int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
    110 	int npages, i;
    111 
    112 	if (maxpages == 0)
    113 		return;
    114 
    115 	pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
    116 	rw_enter(uobj->vmobjlock, RW_WRITER);
    117 	while (len) {
    118 		npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
    119 		memset(pgs, 0, npages * sizeof(struct vm_page *));
    120 		(void)uobj->pgops->pgo_get(uobj, trunc_page(off),
    121 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE,
    122 		    0, PAGERFLAGS | PGO_PASTEOF);
    123 		KASSERT(npages > 0);
    124 
    125 		rw_enter(uobj->vmobjlock, RW_WRITER);
    126 		for (i = 0; i < npages; i++) {
    127 			struct vm_page *pg;
    128 			uint8_t *start;
    129 			size_t chunkoff, chunklen;
    130 
    131 			pg = pgs[i];
    132 			if (pg == NULL)
    133 				break;
    134 
    135 			KASSERT(pg->uobject != NULL);
    136 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    137 
    138 			chunkoff = off & PAGE_MASK;
    139 			chunklen = MIN(PAGE_SIZE - chunkoff, len);
    140 			start = (uint8_t *)pg->uanon + chunkoff;
    141 
    142 			memset(start, 0, chunklen);
    143 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    144 
    145 			off += chunklen;
    146 			len -= chunklen;
    147 		}
    148 		uvm_page_unbusy(pgs, npages);
    149 	}
    150 	rw_exit(uobj->vmobjlock);
    151 	kmem_free(pgs, maxpages * sizeof(pgs));
    152 }
    153 
    154 #define len2npages(off, len)						\
    155     ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT)
    156 
    157 int
    158 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
    159 	int advice, int flags)
    160 {
    161 	struct vm_page **pgs;
    162 	int npages = len2npages(uio->uio_offset, todo);
    163 	size_t pgalloc;
    164 	int i, rv, pagerflags;
    165 	vm_prot_t prot;
    166 
    167 	pgalloc = npages * sizeof(pgs);
    168 	pgs = kmem_alloc(pgalloc, KM_SLEEP);
    169 
    170 	pagerflags = PAGERFLAGS;
    171 	if (flags & UBC_WRITE)
    172 		pagerflags |= PGO_PASTEOF;
    173 	if (flags & UBC_FAULTBUSY)
    174 		pagerflags |= PGO_OVERWRITE;
    175 
    176 	prot = VM_PROT_READ;
    177 	if (flags & UBC_WRITE)
    178 		prot |= VM_PROT_WRITE;
    179 
    180 	rw_enter(uobj->vmobjlock, RW_WRITER);
    181 	do {
    182 		npages = len2npages(uio->uio_offset, todo);
    183 		memset(pgs, 0, pgalloc);
    184 		rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset),
    185 		    pgs, &npages, 0, prot, 0, pagerflags);
    186 		if (rv)
    187 			goto out;
    188 
    189 		rw_enter(uobj->vmobjlock, RW_WRITER);
    190 		for (i = 0; i < npages; i++) {
    191 			struct vm_page *pg;
    192 			size_t xfersize;
    193 			off_t pageoff;
    194 
    195 			pg = pgs[i];
    196 			if (pg == NULL)
    197 				break;
    198 
    199 			KASSERT(pg->uobject != NULL);
    200 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    201 			pageoff = uio->uio_offset & PAGE_MASK;
    202 
    203 			xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
    204 			KASSERT(xfersize > 0);
    205 			rv = uiomove((uint8_t *)pg->uanon + pageoff,
    206 			    xfersize, uio);
    207 			if (rv) {
    208 				uvm_page_unbusy(pgs, npages);
    209 				rw_exit(uobj->vmobjlock);
    210 				goto out;
    211 			}
    212 			if (uio->uio_rw == UIO_WRITE) {
    213 				pg->flags &= ~PG_FAKE;
    214 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    215 			}
    216 			todo -= xfersize;
    217 		}
    218 		uvm_page_unbusy(pgs, npages);
    219 	} while (todo);
    220 	rw_exit(uobj->vmobjlock);
    221 
    222  out:
    223 	kmem_free(pgs, pgalloc);
    224 	return rv;
    225 }
    226