Home | History | Annotate | Line # | Download | only in rumpvfs
vm_vfs.c revision 1.30
      1 /*	$NetBSD: vm_vfs.c,v 1.30 2011/06/16 09:21:03 hannken Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008-2011 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.30 2011/06/16 09:21:03 hannken Exp $");
     30 
     31 #include <sys/param.h>
     32 
     33 #include <sys/buf.h>
     34 #include <sys/vnode.h>
     35 
     36 #include <uvm/uvm.h>
     37 #include <uvm/uvm_readahead.h>
     38 
     39 /*
     40  * release resources held during async io.  this is almost the
     41  * same as uvm_aio_aiodone() from uvm_pager.c and only lacks the
     42  * call to uvm_aio_aiodone_pages(): unbusies pages directly here.
     43  */
     44 void
     45 uvm_aio_aiodone(struct buf *bp)
     46 {
     47 	struct uvm_object *uobj;
     48 	int i, npages = bp->b_bufsize >> PAGE_SHIFT;
     49 	struct vm_page **pgs;
     50 	vaddr_t va;
     51 	int pageout = 0;
     52 
     53 	KASSERT(npages > 0);
     54 	pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
     55 	for (i = 0; i < npages; i++) {
     56 		va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT);
     57 		pgs[i] = uvm_pageratop(va);
     58 		if (pgs[i]->flags & PG_PAGEOUT) {
     59 			KASSERT((pgs[i]->flags & PG_FAKE) == 0);
     60 			pageout++;
     61 			pgs[i]->flags &= ~PG_PAGEOUT;
     62 			pgs[i]->flags |= PG_RELEASED;
     63 		}
     64 	}
     65 
     66 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
     67 
     68 	/* get uobj because we need it after pages might be recycled */
     69 	uobj = pgs[0]->uobject;
     70 	KASSERT(uobj);
     71 
     72 	mutex_enter(uobj->vmobjlock);
     73 	mutex_enter(&uvm_pageqlock);
     74 	uvm_page_unbusy(pgs, npages);
     75 	mutex_exit(&uvm_pageqlock);
     76 	mutex_exit(uobj->vmobjlock);
     77 
     78 	uvm_pageout_done(pageout);
     79 
     80 	if (BUF_ISWRITE(bp) && (bp->b_cflags & BC_AGE) != 0) {
     81 		mutex_enter(bp->b_objlock);
     82 		vwakeup(bp);
     83 		mutex_exit(bp->b_objlock);
     84 	}
     85 
     86 	putiobuf(bp);
     87 
     88 	kmem_free(pgs, npages * sizeof(*pgs));
     89 }
     90 
     91 void
     92 uvm_aio_biodone(struct buf *bp)
     93 {
     94 
     95 	uvm_aio_aiodone(bp);
     96 }
     97 
     98 /*
     99  * UBC
    100  */
    101 
    102 #define PAGERFLAGS (PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP)
    103 
    104 void
    105 ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    106 {
    107 	struct vm_page **pgs;
    108 	struct uvm_object *pguobj;
    109 	int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
    110 	int rv, npages, i;
    111 
    112 	if (maxpages == 0)
    113 		return;
    114 
    115 	pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
    116 	mutex_enter(uobj->vmobjlock);
    117 	while (len) {
    118 		npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
    119 		memset(pgs, 0, npages * sizeof(struct vm_page *));
    120 		rv = uobj->pgops->pgo_get(uobj, trunc_page(off),
    121 		    pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE,
    122 		    0, PAGERFLAGS | PGO_PASTEOF);
    123 		KASSERT(npages > 0);
    124 
    125 		for (i = 0, pguobj = NULL; i < npages; i++) {
    126 			struct vm_page *pg;
    127 			uint8_t *start;
    128 			size_t chunkoff, chunklen;
    129 
    130 			pg = pgs[i];
    131 			if (pg == NULL)
    132 				break;
    133 			if (pguobj == NULL)
    134 				pguobj = pg->uobject;
    135 			KASSERT(pguobj == pg->uobject);
    136 
    137 			chunkoff = off & PAGE_MASK;
    138 			chunklen = MIN(PAGE_SIZE - chunkoff, len);
    139 			start = (uint8_t *)pg->uanon + chunkoff;
    140 
    141 			memset(start, 0, chunklen);
    142 			pg->flags &= ~PG_CLEAN;
    143 
    144 			off += chunklen;
    145 			len -= chunklen;
    146 		}
    147 		mutex_enter(pguobj->vmobjlock);
    148 		uvm_page_unbusy(pgs, npages);
    149 		if (pguobj != uobj) {
    150 			mutex_exit(pguobj->vmobjlock);
    151 			mutex_enter(uobj->vmobjlock);
    152 		}
    153 	}
    154 	mutex_exit(uobj->vmobjlock);
    155 	kmem_free(pgs, maxpages * sizeof(pgs));
    156 
    157 	return;
    158 }
    159 
    160 #define len2npages(off, len)						\
    161     ((round_page(off+len) - trunc_page(off)) >> PAGE_SHIFT)
    162 
    163 int
    164 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
    165 	int advice, int flags)
    166 {
    167 	struct vm_page **pgs;
    168 	struct uvm_object *pguobj;
    169 	int npages = len2npages(uio->uio_offset, todo);
    170 	size_t pgalloc;
    171 	int i, rv, pagerflags;
    172 	vm_prot_t prot;
    173 
    174 	pgalloc = npages * sizeof(pgs);
    175 	pgs = kmem_alloc(pgalloc, KM_SLEEP);
    176 
    177 	pagerflags = PAGERFLAGS;
    178 	if (flags & UBC_WRITE)
    179 		pagerflags |= PGO_PASTEOF;
    180 	if (flags & UBC_FAULTBUSY)
    181 		pagerflags |= PGO_OVERWRITE;
    182 
    183 	prot = VM_PROT_READ;
    184 	if (flags & UBC_WRITE)
    185 		prot |= VM_PROT_WRITE;
    186 
    187 	mutex_enter(uobj->vmobjlock);
    188 	do {
    189 		npages = len2npages(uio->uio_offset, todo);
    190 		memset(pgs, 0, pgalloc);
    191 		rv = uobj->pgops->pgo_get(uobj, trunc_page(uio->uio_offset),
    192 		    pgs, &npages, 0, prot, 0, pagerflags);
    193 		if (rv)
    194 			goto out;
    195 
    196 		for (i = 0, pguobj = NULL; i < npages; i++) {
    197 			struct vm_page *pg;
    198 			size_t xfersize;
    199 			off_t pageoff;
    200 
    201 			pg = pgs[i];
    202 			if (pg == NULL)
    203 				break;
    204 			if (pguobj == NULL)
    205 				pguobj = pg->uobject;
    206 			KASSERT(pguobj == pg->uobject);
    207 
    208 			pageoff = uio->uio_offset & PAGE_MASK;
    209 			xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
    210 			KASSERT(xfersize > 0);
    211 			rv = uiomove((uint8_t *)pg->uanon + pageoff,
    212 			    xfersize, uio);
    213 			if (rv) {
    214 				mutex_enter(pguobj->vmobjlock);
    215 				uvm_page_unbusy(pgs, npages);
    216 				mutex_exit(pguobj->vmobjlock);
    217 				goto out;
    218 			}
    219 			if (uio->uio_rw == UIO_WRITE)
    220 				pg->flags &= ~(PG_CLEAN | PG_FAKE);
    221 			todo -= xfersize;
    222 		}
    223 		mutex_enter(pguobj->vmobjlock);
    224 		uvm_page_unbusy(pgs, npages);
    225 		if (pguobj != uobj) {
    226 			mutex_exit(pguobj->vmobjlock);
    227 			mutex_enter(uobj->vmobjlock);
    228 		}
    229 	} while (todo);
    230 	mutex_exit(uobj->vmobjlock);
    231 
    232  out:
    233 	kmem_free(pgs, pgalloc);
    234 	return rv;
    235 }
    236