Home | History | Annotate | Line # | Download | only in uvm
uvm_object.c revision 1.7.4.2
      1 /*	$NetBSD: uvm_object.c,v 1.7.4.2 2010/04/23 21:18:00 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * uvm_object.c: operate with memory objects
     34  *
     35  * TODO:
     36  *  1. Support PG_RELEASED-using objects
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.2 2010/04/23 21:18:00 rmind Exp $");
     41 
     42 #include "opt_ddb.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/mutex.h>
     46 #include <sys/queue.h>
     47 #include <sys/rb.h>
     48 
     49 #include <uvm/uvm.h>
     50 #include <uvm/uvm_ddb.h>
     51 
     52 /* We will fetch this page count per step */
     53 #define	FETCH_PAGECOUNT	16
     54 
     55 /*
     56  * uvm_obj_init: initialize UVM memory object.
     57  */
     58 void
     59 uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
     60     kmutex_t *lockptr, u_int refs)
     61 {
     62 
     63 	if (lockptr == NULL) {
     64 		uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
     65 	} else {
     66 		uo->vmobjlock = lockptr;
     67 		mutex_init(uo->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
     68 	}
     69 	uo->pgops = ops;
     70 	TAILQ_INIT(&uo->memq);
     71 	uo->uo_npages = 0;
     72 	uo->uo_refs = refs;
     73 	rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
     74 }
     75 
     76 /*
     77  * uvm_obj_destroy: destroy UVM memory object.
     78  */
     79 void
     80 uvm_obj_destroy(struct uvm_object *uo, kmutex_t *lockptr)
     81 {
     82 #ifdef DIAGNOSTIC
     83 	void *tmp = NULL;
     84 	KASSERT(rb_tree_find_node_geq(&uo->rb_tree, &tmp) == NULL);
     85 #endif
     86 	if (lockptr) {
     87 		KASSERT(uo->vmobjlock == lockptr);
     88 		mutex_destroy(uo->vmobjlock);
     89 	} else {
     90 		mutex_obj_free(uo->vmobjlock);
     91 	}
     92 }
     93 
     94 /*
     95  * uobj_wirepages: wire the pages of entire uobj
     96  *
     97  * => NOTE: this function should only be used for types of objects
     98  *  where PG_RELEASED flag is never set (aobj objects)
     99  * => caller must pass page-aligned start and end values
    100  */
    101 
    102 int
    103 uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
    104 {
    105 	int i, npages, error;
    106 	struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
    107 	off_t offset = start, left;
    108 
    109 	left = (end - start) >> PAGE_SHIFT;
    110 
    111 	mutex_enter(uobj->vmobjlock);
    112 	while (left) {
    113 
    114 		npages = MIN(FETCH_PAGECOUNT, left);
    115 
    116 		/* Get the pages */
    117 		memset(pgs, 0, sizeof(pgs));
    118 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
    119 			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
    120 			PGO_ALLPAGES | PGO_SYNCIO);
    121 
    122 		if (error)
    123 			goto error;
    124 
    125 		mutex_enter(uobj->vmobjlock);
    126 		for (i = 0; i < npages; i++) {
    127 
    128 			KASSERT(pgs[i] != NULL);
    129 			KASSERT(!(pgs[i]->flags & PG_RELEASED));
    130 
    131 			/*
    132 			 * Loan break
    133 			 */
    134 			if (pgs[i]->loan_count) {
    135 				while (pgs[i]->loan_count) {
    136 					pg = uvm_loanbreak(pgs[i]);
    137 					if (!pg) {
    138 						mutex_exit(uobj->vmobjlock);
    139 						uvm_wait("uobjwirepg");
    140 						mutex_enter(uobj->vmobjlock);
    141 						continue;
    142 					}
    143 				}
    144 				pgs[i] = pg;
    145 			}
    146 
    147 			if (pgs[i]->pqflags & PQ_AOBJ) {
    148 				pgs[i]->flags &= ~(PG_CLEAN);
    149 				uao_dropswap(uobj, i);
    150 			}
    151 		}
    152 
    153 		/* Wire the pages */
    154 		mutex_enter(&uvm_pageqlock);
    155 		for (i = 0; i < npages; i++) {
    156 			uvm_pagewire(pgs[i]);
    157 		}
    158 		mutex_exit(&uvm_pageqlock);
    159 
    160 		/* Unbusy the pages */
    161 		uvm_page_unbusy(pgs, npages);
    162 
    163 		left -= npages;
    164 		offset += npages << PAGE_SHIFT;
    165 	}
    166 	mutex_exit(uobj->vmobjlock);
    167 
    168 	return 0;
    169 
    170 error:
    171 	/* Unwire the pages which has been wired */
    172 	uobj_unwirepages(uobj, start, offset);
    173 
    174 	return error;
    175 }
    176 
    177 /*
    178  * uobj_unwirepages: unwire the pages of entire uobj
    179  *
    180  * => NOTE: this function should only be used for types of objects
    181  *  where PG_RELEASED flag is never set
    182  * => caller must pass page-aligned start and end values
    183  */
    184 
    185 void
    186 uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
    187 {
    188 	struct vm_page *pg;
    189 	off_t offset;
    190 
    191 	mutex_enter(uobj->vmobjlock);
    192 	mutex_enter(&uvm_pageqlock);
    193 	for (offset = start; offset < end; offset += PAGE_SIZE) {
    194 		pg = uvm_pagelookup(uobj, offset);
    195 
    196 		KASSERT(pg != NULL);
    197 		KASSERT(!(pg->flags & PG_RELEASED));
    198 
    199 		uvm_pageunwire(pg);
    200 	}
    201 	mutex_exit(&uvm_pageqlock);
    202 	mutex_exit(uobj->vmobjlock);
    203 }
    204 
    205 #if defined(DDB) || defined(DEBUGPRINT)
    206 
    207 /*
    208  * uvm_object_printit: actually prints the object
    209  */
    210 void
    211 uvm_object_printit(struct uvm_object *uobj, bool full,
    212     void (*pr)(const char *, ...))
    213 {
    214 	struct vm_page *pg;
    215 	int cnt = 0;
    216 
    217 	(*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
    218 	    uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
    219 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    220 		(*pr)("refs=<SYSTEM>\n");
    221 	else
    222 		(*pr)("refs=%d\n", uobj->uo_refs);
    223 
    224 	if (!full) {
    225 		return;
    226 	}
    227 	(*pr)("  PAGES <pg,offset>:\n  ");
    228 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    229 		cnt++;
    230 		(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
    231 		if ((cnt % 3) == 0) {
    232 			(*pr)("\n  ");
    233 		}
    234 	}
    235 	if ((cnt % 3) != 0) {
    236 		(*pr)("\n");
    237 	}
    238 }
    239 
    240 #endif /* DDB || DEBUGPRINT */
    241