Home | History | Annotate | Line # | Download | only in rumpvfs
vm_vfs.c revision 1.36
      1 /*	$NetBSD: vm_vfs.c,v 1.36 2020/01/15 17:55:44 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.36 2020/01/15 17:55:44 ad Exp $");
     30 
     31 #include <sys/param.h>
     32 
     33 #include <sys/buf.h>
     34 #include <sys/vnode.h>
     35 
     36 #include <uvm/uvm.h>
     37 #include <uvm/uvm_readahead.h>
     38 
     39 /*
     40  * release resources held during async io.  this is almost the
     41  * same as uvm_aio_aiodone() from uvm_pager.c and only lacks the
     42  * call to uvm_aio_aiodone_pages(): unbusies pages directly here.
     43  */
     44 void
     45 uvm_aio_aiodone(struct buf *bp)
     46 {
     47 	struct uvm_object *uobj = NULL;
     48 	int i, npages = bp->b_bufsize >> PAGE_SHIFT;
     49 	struct vm_page **pgs;
     50 	vaddr_t va;
     51 	int pageout = 0;
     52 
     53 	KASSERT(npages > 0);
     54 	pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
     55 	for (i = 0; i < npages; i++) {
     56 		va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT);
     57 		pgs[i] = uvm_pageratop(va);
     58 
     59 		if (uobj == NULL) {
     60 			uobj = pgs[i]->uobject;
     61 			KASSERT(uobj != NULL);
     62 			mutex_enter(uobj->vmobjlock);
     63 		} else {
     64 			KASSERT(uobj == pgs[i]->uobject);
     65 		}
     66 
     67 		if (pgs[i]->flags & PG_PAGEOUT) {
     68 			KASSERT((pgs[i]->flags & PG_FAKE) == 0);
     69 			pageout++;
     70 			pgs[i]->flags &= ~PG_PAGEOUT;
     71 			pgs[i]->flags |= PG_RELEASED;
     72 		}
     73 	}
     74 	KASSERT(mutex_owned(uobj->vmobjlock));
     75 
     76 	uvm_page_unbusy(pgs, npages);
     77 	mutex_exit(uobj->vmobjlock);
     78 
     79 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
     80 	uvm_pageout_done(pageout);
     81 
     82 	if (BUF_ISWRITE(bp) && (bp->b_cflags & BC_AGE) != 0) {
     83 		mutex_enter(bp->b_objlock);
     84 		vwakeup(bp);
     85 		mutex_exit(bp->b_objlock);
     86 	}
     87 
     88 	putiobuf(bp);
     89 
     90 	kmem_free(pgs, npages * sizeof(*pgs));
     91 }
     92 
     93 void
     94 uvm_aio_biodone(struct buf *bp)
     95 {
     96 
     97 	uvm_aio_aiodone(bp);
     98 }
     99 
    100 /*
    101  * UBC
    102  */
    103 
    104 #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP)
    105 
    106 void
    107 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    108 {
    109 	struct vm_page **pgs;
    110 	int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
    111 	int npages, i;
    112 
    113 	if (maxpages == 0)
    114 		return;
    115 
    116 	pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
    117 	mutex_enter(uobj->vmobjlock);
    118 	while (len) {
    119 		npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
    120 		memset(pgs, 0, npages * sizeof(struct vm_page *));
    121 		(void)uobj->pgops->pgo_get(uobj, trunc_page(off),
    122 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE,
    123 		    0, PAGERFLAGS | PGO_PASTEOF);
    124 		KASSERT(npages > 0);
    125 
    126 		mutex_enter(uobj->vmobjlock);
    127 		for (i = 0; i < npages; i++) {
    128 			struct vm_page *pg;
    129 			uint8_t *start;
    130 			size_t chunkoff, chunklen;
    131 
    132 			pg = pgs[i];
    133 			if (pg == NULL)
    134 				break;
    135 
    136 			KASSERT(pg->uobject != NULL);
    137 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    138 
    139 			chunkoff = off & PAGE_MASK;
    140 			chunklen = MIN(PAGE_SIZE - chunkoff, len);
    141 			start = (uint8_t *)pg->uanon + chunkoff;
    142 
    143 			memset(start, 0, chunklen);
    144 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    145 
    146 			off += chunklen;
    147 			len -= chunklen;
    148 		}
    149 		uvm_page_unbusy(pgs, npages);
    150 	}
    151 	mutex_exit(uobj->vmobjlock);
    152 	kmem_free(pgs, maxpages * sizeof(pgs));
    153 }
    154 
    155 #define len2npages(off, len)						\
    156     ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT)
    157 
    158 int
    159 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
    160 	int advice, int flags)
    161 {
    162 	struct vm_page **pgs;
    163 	int npages = len2npages(uio->uio_offset, todo);
    164 	size_t pgalloc;
    165 	int i, rv, pagerflags;
    166 	vm_prot_t prot;
    167 
    168 	pgalloc = npages * sizeof(pgs);
    169 	pgs = kmem_alloc(pgalloc, KM_SLEEP);
    170 
    171 	pagerflags = PAGERFLAGS;
    172 	if (flags & UBC_WRITE)
    173 		pagerflags |= PGO_PASTEOF;
    174 	if (flags & UBC_FAULTBUSY)
    175 		pagerflags |= PGO_OVERWRITE;
    176 
    177 	prot = VM_PROT_READ;
    178 	if (flags & UBC_WRITE)
    179 		prot |= VM_PROT_WRITE;
    180 
    181 	mutex_enter(uobj->vmobjlock);
    182 	do {
    183 		npages = len2npages(uio->uio_offset, todo);
    184 		memset(pgs, 0, pgalloc);
    185 		rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset),
    186 		    pgs, &npages, 0, prot, 0, pagerflags);
    187 		if (rv)
    188 			goto out;
    189 
    190 		mutex_enter(uobj->vmobjlock);
    191 		for (i = 0; i < npages; i++) {
    192 			struct vm_page *pg;
    193 			size_t xfersize;
    194 			off_t pageoff;
    195 
    196 			pg = pgs[i];
    197 			if (pg == NULL)
    198 				break;
    199 
    200 			KASSERT(pg->uobject != NULL);
    201 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    202 			pageoff = uio->uio_offset & PAGE_MASK;
    203 
    204 			xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
    205 			KASSERT(xfersize > 0);
    206 			rv = uiomove((uint8_t *)pg->uanon + pageoff,
    207 			    xfersize, uio);
    208 			if (rv) {
    209 				uvm_page_unbusy(pgs, npages);
    210 				mutex_exit(uobj->vmobjlock);
    211 				goto out;
    212 			}
    213 			if (uio->uio_rw == UIO_WRITE) {
    214 				pg->flags &= ~PG_FAKE;
    215 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    216 			}
    217 			todo -= xfersize;
    218 		}
    219 		uvm_page_unbusy(pgs, npages);
    220 	} while (todo);
    221 	mutex_exit(uobj->vmobjlock);
    222 
    223  out:
    224 	kmem_free(pgs, pgalloc);
    225 	return rv;
    226 }
    227