Home | History | Annotate | Line # | Download | only in uvm
uvm_object.c revision 1.8
      1  1.8    rmind /*	$NetBSD: uvm_object.c,v 1.8 2011/06/12 03:36:03 rmind Exp $	*/
      2  1.1     yamt 
      3  1.1     yamt /*
      4  1.8    rmind  * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
      5  1.1     yamt  * All rights reserved.
      6  1.1     yamt  *
      7  1.3    rmind  * This code is derived from software contributed to The NetBSD Foundation
      8  1.3    rmind  * by Mindaugas Rasiukevicius.
      9  1.3    rmind  *
     10  1.1     yamt  * Redistribution and use in source and binary forms, with or without
     11  1.1     yamt  * modification, are permitted provided that the following conditions
     12  1.1     yamt  * are met:
     13  1.1     yamt  * 1. Redistributions of source code must retain the above copyright
     14  1.1     yamt  *    notice, this list of conditions and the following disclaimer.
     15  1.1     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1     yamt  *    notice, this list of conditions and the following disclaimer in the
     17  1.1     yamt  *    documentation and/or other materials provided with the distribution.
     18  1.1     yamt  *
     19  1.1     yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1     yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1     yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1     yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1     yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1     yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1     yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1     yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1     yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1     yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1     yamt  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1     yamt  */
     31  1.1     yamt 
     32  1.1     yamt /*
     33  1.1     yamt  * uvm_object.c: operate with memory objects
     34  1.1     yamt  *
     35  1.1     yamt  * TODO:
     36  1.1     yamt  *  1. Support PG_RELEASED-using objects
     37  1.1     yamt  */
     38  1.1     yamt 
     39  1.1     yamt #include <sys/cdefs.h>
     40  1.8    rmind __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.8 2011/06/12 03:36:03 rmind Exp $");
     41  1.1     yamt 
     42  1.7  thorpej #include "opt_ddb.h"
     43  1.1     yamt 
     44  1.1     yamt #include <sys/param.h>
     45  1.8    rmind #include <sys/mutex.h>
     46  1.8    rmind #include <sys/queue.h>
     47  1.8    rmind #include <sys/rbtree.h>
     48  1.1     yamt 
     49  1.1     yamt #include <uvm/uvm.h>
     50  1.7  thorpej #include <uvm/uvm_ddb.h>
     51  1.1     yamt 
     52  1.8    rmind /* Page count to fetch per single step. */
     53  1.8    rmind #define	FETCH_PAGECOUNT			16
     54  1.8    rmind 
     55  1.8    rmind /*
     56  1.8    rmind  * uvm_obj_init: initialize UVM memory object.
     57  1.8    rmind  */
     58  1.8    rmind void
     59  1.8    rmind uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
     60  1.8    rmind     bool alock, u_int refs)
     61  1.8    rmind {
     62  1.8    rmind 
     63  1.8    rmind 	if (alock) {
     64  1.8    rmind 		/* Allocate and assign a lock. */
     65  1.8    rmind 		uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
     66  1.8    rmind 	} else {
     67  1.8    rmind 		/* The lock will need to be set via uvm_obj_setlock(). */
     68  1.8    rmind 		uo->vmobjlock = NULL;
     69  1.8    rmind 	}
     70  1.8    rmind 	uo->pgops = ops;
     71  1.8    rmind 	TAILQ_INIT(&uo->memq);
     72  1.8    rmind 	LIST_INIT(&uo->uo_ubc);
     73  1.8    rmind 	uo->uo_npages = 0;
     74  1.8    rmind 	uo->uo_refs = refs;
     75  1.8    rmind 	rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
     76  1.8    rmind }
     77  1.8    rmind 
     78  1.8    rmind /*
     79  1.8    rmind  * uvm_obj_destroy: destroy UVM memory object.
     80  1.8    rmind  */
     81  1.8    rmind void
     82  1.8    rmind uvm_obj_destroy(struct uvm_object *uo, bool dlock)
     83  1.8    rmind {
     84  1.8    rmind 
     85  1.8    rmind 	KASSERT(rb_tree_iterate(&uo->rb_tree, NULL, RB_DIR_LEFT) == NULL);
     86  1.8    rmind 
     87  1.8    rmind 	/* Purge any UBC entries with this object. */
     88  1.8    rmind 	if (__predict_false(!LIST_EMPTY(&uo->uo_ubc))) {
     89  1.8    rmind 		ubc_purge(uo);
     90  1.8    rmind 	}
     91  1.8    rmind 	/* Destroy the lock, if requested. */
     92  1.8    rmind 	if (dlock) {
     93  1.8    rmind 		mutex_obj_free(uo->vmobjlock);
     94  1.8    rmind 	}
     95  1.8    rmind }
     96  1.8    rmind 
     97  1.8    rmind /*
     98  1.8    rmind  * uvm_obj_setlock: assign a vmobjlock to the UVM object.
     99  1.8    rmind  *
    100  1.8    rmind  * => Caller is responsible to ensure that UVM objects is not use.
    101  1.8    rmind  * => Only dynamic lock may be previously set.  We drop the reference then.
    102  1.8    rmind  */
    103  1.8    rmind void
    104  1.8    rmind uvm_obj_setlock(struct uvm_object *uo, kmutex_t *lockptr)
    105  1.8    rmind {
    106  1.8    rmind 	kmutex_t *olockptr = uo->vmobjlock;
    107  1.8    rmind 
    108  1.8    rmind 	if (olockptr) {
    109  1.8    rmind 		/* Drop the reference on the old lock. */
    110  1.8    rmind 		mutex_obj_free(olockptr);
    111  1.8    rmind 	}
    112  1.8    rmind 	if (lockptr == NULL) {
    113  1.8    rmind 		/* If new lock is not passed - allocate default one. */
    114  1.8    rmind 		lockptr = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    115  1.8    rmind 	}
    116  1.8    rmind 	uo->vmobjlock = lockptr;
    117  1.8    rmind }
    118  1.1     yamt 
    119  1.1     yamt /*
    120  1.8    rmind  * uvm_obj_wirepages: wire the pages of entire UVM object.
    121  1.1     yamt  *
    122  1.1     yamt  * => NOTE: this function should only be used for types of objects
    123  1.1     yamt  *  where PG_RELEASED flag is never set (aobj objects)
    124  1.1     yamt  * => caller must pass page-aligned start and end values
    125  1.1     yamt  */
    126  1.1     yamt int
    127  1.8    rmind uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
    128  1.1     yamt {
    129  1.1     yamt 	int i, npages, error;
    130  1.1     yamt 	struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
    131  1.1     yamt 	off_t offset = start, left;
    132  1.1     yamt 
    133  1.1     yamt 	left = (end - start) >> PAGE_SHIFT;
    134  1.1     yamt 
    135  1.8    rmind 	mutex_enter(uobj->vmobjlock);
    136  1.1     yamt 	while (left) {
    137  1.1     yamt 
    138  1.1     yamt 		npages = MIN(FETCH_PAGECOUNT, left);
    139  1.1     yamt 
    140  1.1     yamt 		/* Get the pages */
    141  1.1     yamt 		memset(pgs, 0, sizeof(pgs));
    142  1.1     yamt 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
    143  1.1     yamt 			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
    144  1.1     yamt 			PGO_ALLPAGES | PGO_SYNCIO);
    145  1.1     yamt 
    146  1.1     yamt 		if (error)
    147  1.1     yamt 			goto error;
    148  1.1     yamt 
    149  1.8    rmind 		mutex_enter(uobj->vmobjlock);
    150  1.1     yamt 		for (i = 0; i < npages; i++) {
    151  1.1     yamt 
    152  1.1     yamt 			KASSERT(pgs[i] != NULL);
    153  1.1     yamt 			KASSERT(!(pgs[i]->flags & PG_RELEASED));
    154  1.1     yamt 
    155  1.1     yamt 			/*
    156  1.1     yamt 			 * Loan break
    157  1.1     yamt 			 */
    158  1.1     yamt 			if (pgs[i]->loan_count) {
    159  1.1     yamt 				while (pgs[i]->loan_count) {
    160  1.1     yamt 					pg = uvm_loanbreak(pgs[i]);
    161  1.1     yamt 					if (!pg) {
    162  1.8    rmind 						mutex_exit(uobj->vmobjlock);
    163  1.1     yamt 						uvm_wait("uobjwirepg");
    164  1.8    rmind 						mutex_enter(uobj->vmobjlock);
    165  1.1     yamt 						continue;
    166  1.1     yamt 					}
    167  1.1     yamt 				}
    168  1.1     yamt 				pgs[i] = pg;
    169  1.1     yamt 			}
    170  1.1     yamt 
    171  1.1     yamt 			if (pgs[i]->pqflags & PQ_AOBJ) {
    172  1.1     yamt 				pgs[i]->flags &= ~(PG_CLEAN);
    173  1.1     yamt 				uao_dropswap(uobj, i);
    174  1.1     yamt 			}
    175  1.1     yamt 		}
    176  1.1     yamt 
    177  1.1     yamt 		/* Wire the pages */
    178  1.4       ad 		mutex_enter(&uvm_pageqlock);
    179  1.1     yamt 		for (i = 0; i < npages; i++) {
    180  1.1     yamt 			uvm_pagewire(pgs[i]);
    181  1.1     yamt 		}
    182  1.4       ad 		mutex_exit(&uvm_pageqlock);
    183  1.1     yamt 
    184  1.1     yamt 		/* Unbusy the pages */
    185  1.1     yamt 		uvm_page_unbusy(pgs, npages);
    186  1.1     yamt 
    187  1.1     yamt 		left -= npages;
    188  1.1     yamt 		offset += npages << PAGE_SHIFT;
    189  1.1     yamt 	}
    190  1.8    rmind 	mutex_exit(uobj->vmobjlock);
    191  1.1     yamt 
    192  1.1     yamt 	return 0;
    193  1.1     yamt 
    194  1.1     yamt error:
    195  1.1     yamt 	/* Unwire the pages which has been wired */
    196  1.8    rmind 	uvm_obj_unwirepages(uobj, start, offset);
    197  1.1     yamt 
    198  1.1     yamt 	return error;
    199  1.1     yamt }
    200  1.1     yamt 
    201  1.1     yamt /*
    202  1.8    rmind  * uvm_obj_unwirepages: unwire the pages of entire UVM object.
    203  1.1     yamt  *
    204  1.1     yamt  * => NOTE: this function should only be used for types of objects
    205  1.1     yamt  *  where PG_RELEASED flag is never set
    206  1.1     yamt  * => caller must pass page-aligned start and end values
    207  1.1     yamt  */
    208  1.1     yamt void
    209  1.8    rmind uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
    210  1.1     yamt {
    211  1.1     yamt 	struct vm_page *pg;
    212  1.1     yamt 	off_t offset;
    213  1.1     yamt 
    214  1.8    rmind 	mutex_enter(uobj->vmobjlock);
    215  1.4       ad 	mutex_enter(&uvm_pageqlock);
    216  1.1     yamt 	for (offset = start; offset < end; offset += PAGE_SIZE) {
    217  1.1     yamt 		pg = uvm_pagelookup(uobj, offset);
    218  1.1     yamt 
    219  1.1     yamt 		KASSERT(pg != NULL);
    220  1.1     yamt 		KASSERT(!(pg->flags & PG_RELEASED));
    221  1.1     yamt 
    222  1.1     yamt 		uvm_pageunwire(pg);
    223  1.1     yamt 	}
    224  1.4       ad 	mutex_exit(&uvm_pageqlock);
    225  1.8    rmind 	mutex_exit(uobj->vmobjlock);
    226  1.1     yamt }
    227  1.7  thorpej 
    228  1.7  thorpej #if defined(DDB) || defined(DEBUGPRINT)
    229  1.7  thorpej 
    230  1.7  thorpej /*
    231  1.7  thorpej  * uvm_object_printit: actually prints the object
    232  1.7  thorpej  */
    233  1.7  thorpej void
    234  1.7  thorpej uvm_object_printit(struct uvm_object *uobj, bool full,
    235  1.7  thorpej     void (*pr)(const char *, ...))
    236  1.7  thorpej {
    237  1.7  thorpej 	struct vm_page *pg;
    238  1.7  thorpej 	int cnt = 0;
    239  1.7  thorpej 
    240  1.7  thorpej 	(*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
    241  1.8    rmind 	    uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
    242  1.7  thorpej 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    243  1.7  thorpej 		(*pr)("refs=<SYSTEM>\n");
    244  1.7  thorpej 	else
    245  1.7  thorpej 		(*pr)("refs=%d\n", uobj->uo_refs);
    246  1.7  thorpej 
    247  1.7  thorpej 	if (!full) {
    248  1.7  thorpej 		return;
    249  1.7  thorpej 	}
    250  1.7  thorpej 	(*pr)("  PAGES <pg,offset>:\n  ");
    251  1.7  thorpej 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    252  1.7  thorpej 		cnt++;
    253  1.7  thorpej 		(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
    254  1.7  thorpej 		if ((cnt % 3) == 0) {
    255  1.7  thorpej 			(*pr)("\n  ");
    256  1.7  thorpej 		}
    257  1.7  thorpej 	}
    258  1.7  thorpej 	if ((cnt % 3) != 0) {
    259  1.7  thorpej 		(*pr)("\n");
    260  1.7  thorpej 	}
    261  1.7  thorpej }
    262  1.7  thorpej 
    263  1.7  thorpej #endif /* DDB || DEBUGPRINT */
    264