Home | History | Annotate | Line # | Download | only in rumpvfs
vm_vfs.c revision 1.26
      1 /*	$NetBSD: vm_vfs.c,v 1.26 2011/02/27 13:37:39 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by the
      7  * Finnish Cultural Foundation.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     19  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     21  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.26 2011/02/27 13:37:39 pooka Exp $");
     33 
     34 #include <sys/param.h>
     35 
     36 #include <sys/buf.h>
     37 #include <sys/vnode.h>
     38 
     39 #include <uvm/uvm.h>
     40 #include <uvm/uvm_readahead.h>
     41 
     42 /*
     43  * release resources held during async io.  this is almost the
     44  * same as uvm_aio_aiodone() from uvm_pager.c and only lacks the
     45  * call to uvm_aio_aiodone_pages(): unbusies pages directly here.
     46  */
     47 void
     48 uvm_aio_aiodone(struct buf *bp)
     49 {
     50 	struct uvm_object *uobj;
     51 	int i, npages = bp->b_bufsize >> PAGE_SHIFT;
     52 	struct vm_page **pgs;
     53 	vaddr_t va;
     54 	int pageout = 0;
     55 
     56 	KASSERT(npages > 0);
     57 	pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
     58 	for (i = 0; i < npages; i++) {
     59 		va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT);
     60 		pgs[i] = uvm_pageratop(va);
     61 		if (pgs[i]->flags & PG_PAGEOUT) {
     62 			KASSERT((pgs[i]->flags & PG_FAKE) == 0);
     63 			pageout++;
     64 			pgs[i]->flags &= ~PG_PAGEOUT;
     65 			pgs[i]->flags |= PG_RELEASED;
     66 		}
     67 	}
     68 
     69 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
     70 
     71 	/* get uobj because we need it after pages might be recycled */
     72 	uobj = pgs[0]->uobject;
     73 	KASSERT(uobj);
     74 
     75 	mutex_enter(&uobj->vmobjlock);
     76 	mutex_enter(&uvm_pageqlock);
     77 	uvm_page_unbusy(pgs, npages);
     78 	mutex_exit(&uvm_pageqlock);
     79 	mutex_exit(&uobj->vmobjlock);
     80 
     81 	uvm_pageout_done(pageout);
     82 
     83 	if (BUF_ISWRITE(bp) && (bp->b_cflags & BC_AGE) != 0) {
     84 		mutex_enter(bp->b_objlock);
     85 		vwakeup(bp);
     86 		mutex_exit(bp->b_objlock);
     87 	}
     88 
     89 	putiobuf(bp);
     90 
     91 	kmem_free(pgs, npages * sizeof(*pgs));
     92 }
     93 
     94 void
     95 uvm_aio_biodone(struct buf *bp)
     96 {
     97 
     98 	uvm_aio_aiodone(bp);
     99 }
    100 
    101 /*
    102  * UBC
    103  */
    104 
    105 #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP)
    106 
    107 void
    108 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
    109 {
    110 	struct uvm_object *uobj = &vp->v_uobj;
    111 	struct vm_page **pgs;
    112 	struct uvm_object *pguobj;
    113 	int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
    114 	int rv, npages, i;
    115 
    116 	if (maxpages == 0)
    117 		return;
    118 
    119 	pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
    120 	mutex_enter(&uobj->vmobjlock);
    121 	while (len) {
    122 		npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
    123 		memset(pgs, 0, npages * sizeof(struct vm_page *));
    124 		rv = uobj->pgops->pgo_get(uobj, trunc_page(off),
    125 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE,
    126 		    0, PAGERFLAGS | PGO_PASTEOF);
    127 		KASSERT(npages > 0);
    128 
    129 		for (i = 0, pguobj = NULL; i < npages; i++) {
    130 			struct vm_page *pg;
    131 			uint8_t *start;
    132 			size_t chunkoff, chunklen;
    133 
    134 			pg = pgs[i];
    135 			if (pg == NULL)
    136 				break;
    137 			if (pguobj == NULL)
    138 				pguobj = pg->uobject;
    139 			KASSERT(pguobj == pg->uobject);
    140 
    141 			chunkoff = off & PAGE_MASK;
    142 			chunklen = MIN(PAGE_SIZE - chunkoff, len);
    143 			start = (uint8_t *)pg->uanon + chunkoff;
    144 
    145 			memset(start, 0, chunklen);
    146 			pg->flags &= ~PG_CLEAN;
    147 
    148 			off += chunklen;
    149 			len -= chunklen;
    150 		}
    151 		mutex_enter(&pguobj->vmobjlock);
    152 		uvm_page_unbusy(pgs, npages);
    153 		if (pguobj != uobj) {
    154 			mutex_exit(&pguobj->vmobjlock);
    155 			mutex_enter(&uobj->vmobjlock);
    156 		}
    157 	}
    158 	mutex_exit(&uobj->vmobjlock);
    159 	kmem_free(pgs, maxpages * sizeof(pgs));
    160 
    161 	return;
    162 }
    163 
    164 #define len2npages(off, len)						\
    165     ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT)
    166 
    167 int
    168 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
    169 	int advice, int flags)
    170 {
    171 	struct vm_page **pgs;
    172 	struct uvm_object *pguobj;
    173 	int npages = len2npages(uio->uio_offset, todo);
    174 	size_t pgalloc;
    175 	int i, rv, pagerflags;
    176 
    177 	pgalloc = npages * sizeof(pgs);
    178 	pgs = kmem_alloc(pgalloc, KM_SLEEP);
    179 
    180 	pagerflags = PAGERFLAGS;
    181 	if (flags & UBC_WRITE)
    182 		pagerflags |= PGO_PASTEOF;
    183 	if (flags & UBC_FAULTBUSY)
    184 		pagerflags |= PGO_OVERWRITE;
    185 
    186 	mutex_enter(&uobj->vmobjlock);
    187 	do {
    188 		npages = len2npages(uio->uio_offset, todo);
    189 		memset(pgs, 0, pgalloc);
    190 		rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset),
    191 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0,
    192 		    pagerflags);
    193 		if (rv)
    194 			goto out;
    195 
    196 		for (i = 0, pguobj = NULL; i < npages; i++) {
    197 			struct vm_page *pg;
    198 			size_t xfersize;
    199 			off_t pageoff;
    200 
    201 			pg = pgs[i];
    202 			if (pg == NULL)
    203 				break;
    204 			if (pguobj == NULL)
    205 				pguobj = pg->uobject;
    206 			KASSERT(pguobj == pg->uobject);
    207 
    208 			pageoff = uio->uio_offset & PAGE_MASK;
    209 			xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
    210 			KASSERT(xfersize > 0);
    211 			rv = uiomove((uint8_t *)pg->uanon + pageoff,
    212 			    xfersize, uio);
    213 			if (rv) {
    214 				mutex_enter(&pguobj->vmobjlock);
    215 				uvm_page_unbusy(pgs, npages);
    216 				mutex_exit(&pguobj->vmobjlock);
    217 				goto out;
    218 			}
    219 			if (uio->uio_rw == UIO_WRITE)
    220 				pg->flags &= ~(PG_CLEAN | PG_FAKE);
    221 			todo -= xfersize;
    222 		}
    223 		mutex_enter(&pguobj->vmobjlock);
    224 		uvm_page_unbusy(pgs, npages);
    225 		if (pguobj != uobj) {
    226 			mutex_exit(&pguobj->vmobjlock);
    227 			mutex_enter(&uobj->vmobjlock);
    228 		}
    229 	} while (todo);
    230 	mutex_exit(&uobj->vmobjlock);
    231 
    232  out:
    233 	kmem_free(pgs, pgalloc);
    234 	return rv;
    235 }
    236