1 /* $NetBSD: vm_vfs.c,v 1.42 2023/04/22 13:53:53 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2008-2011 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.42 2023/04/22 13:53:53 riastradh Exp $"); 30 31 #include <sys/param.h> 32 33 #include <sys/buf.h> 34 #include <sys/vnode.h> 35 36 #include <uvm/uvm.h> 37 #include <uvm/uvm_readahead.h> 38 39 void 40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error) 41 { 42 struct uvm_object *uobj = pgs[0]->uobject; 43 struct vm_page *pg; 44 int i; 45 46 rw_enter(uobj->vmobjlock, RW_WRITER); 47 for (i = 0; i < npages; i++) { 48 pg = pgs[i]; 49 KASSERT((pg->flags & PG_PAGEOUT) == 0 || 50 (pg->flags & PG_FAKE) == 0); 51 52 if (pg->flags & PG_FAKE) { 53 KASSERT(!write); 54 pg->flags &= ~PG_FAKE; 55 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN); 56 uvm_pagelock(pg); 57 uvm_pageenqueue(pg); 58 uvm_pageunlock(pg); 59 } 60 61 } 62 uvm_page_unbusy(pgs, npages); 63 rw_exit(uobj->vmobjlock); 64 } 65 66 /* 67 * Release resources held during async io. 68 */ 69 void 70 uvm_aio_aiodone(struct buf *bp) 71 { 72 struct uvm_object *uobj = NULL; 73 int npages = bp->b_bufsize >> PAGE_SHIFT; 74 struct vm_page **pgs; 75 vaddr_t va; 76 int i, error; 77 bool write; 78 79 error = bp->b_error; 80 write = BUF_ISWRITE(bp); 81 82 KASSERT(npages > 0); 83 pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP); 84 for (i = 0; i < npages; i++) { 85 va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT); 86 pgs[i] = uvm_pageratop(va); 87 88 if (uobj == NULL) { 89 uobj = pgs[i]->uobject; 90 KASSERT(uobj != NULL); 91 } else { 92 KASSERT(uobj == pgs[i]->uobject); 93 } 94 } 95 uvm_pagermapout((vaddr_t)bp->b_data, npages); 96 97 uvm_aio_aiodone_pages(pgs, npages, write, error); 98 99 if (write && (bp->b_cflags & BC_AGE) != 0) { 100 mutex_enter(bp->b_objlock); 101 vwakeup(bp); 102 mutex_exit(bp->b_objlock); 103 } 104 105 putiobuf(bp); 106 107 kmem_free(pgs, npages * sizeof(*pgs)); 108 } 109