Home | History | Annotate | Line # | Download | only in uvm
uvm_object.c revision 1.7.4.1
      1  1.7.4.1    rmind /*	$NetBSD: uvm_object.c,v 1.7.4.1 2010/03/16 15:38:18 rmind Exp $	*/
      2      1.1     yamt 
      3      1.1     yamt /*
      4      1.1     yamt  * Copyright (c) 2006 The NetBSD Foundation, Inc.
      5      1.1     yamt  * All rights reserved.
      6      1.1     yamt  *
      7      1.3    rmind  * This code is derived from software contributed to The NetBSD Foundation
      8      1.3    rmind  * by Mindaugas Rasiukevicius.
      9      1.3    rmind  *
     10      1.1     yamt  * Redistribution and use in source and binary forms, with or without
     11      1.1     yamt  * modification, are permitted provided that the following conditions
     12      1.1     yamt  * are met:
     13      1.1     yamt  * 1. Redistributions of source code must retain the above copyright
     14      1.1     yamt  *    notice, this list of conditions and the following disclaimer.
     15      1.1     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     16      1.1     yamt  *    notice, this list of conditions and the following disclaimer in the
     17      1.1     yamt  *    documentation and/or other materials provided with the distribution.
     18      1.1     yamt  *
     19      1.1     yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20      1.1     yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21      1.1     yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22      1.1     yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23      1.1     yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24      1.1     yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25      1.1     yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26      1.1     yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27      1.1     yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28      1.1     yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29      1.1     yamt  * POSSIBILITY OF SUCH DAMAGE.
     30      1.1     yamt  */
     31      1.1     yamt 
     32      1.1     yamt /*
     33      1.1     yamt  * uvm_object.c: operate with memory objects
     34      1.1     yamt  *
     35      1.1     yamt  * TODO:
     36      1.1     yamt  *  1. Support PG_RELEASED-using objects
     37      1.1     yamt  */
     38      1.1     yamt 
     39      1.1     yamt #include <sys/cdefs.h>
     40  1.7.4.1    rmind __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.7.4.1 2010/03/16 15:38:18 rmind Exp $");
     41      1.1     yamt 
     42      1.7  thorpej #include "opt_ddb.h"
     43      1.1     yamt 
     44      1.1     yamt #include <sys/param.h>
     45  1.7.4.1    rmind #include <sys/mutex.h>
     46  1.7.4.1    rmind #include <sys/queue.h>
     47  1.7.4.1    rmind #include <sys/rb.h>
     48      1.1     yamt 
     49      1.1     yamt #include <uvm/uvm.h>
     50      1.7  thorpej #include <uvm/uvm_ddb.h>
     51      1.1     yamt 
     52      1.1     yamt /* We will fetch this page count per step */
     53      1.1     yamt #define	FETCH_PAGECOUNT	16
     54      1.1     yamt 
     55      1.1     yamt /*
     56  1.7.4.1    rmind  * uvm_obj_init: initialize UVM memory object.
     57  1.7.4.1    rmind  */
     58  1.7.4.1    rmind void
     59  1.7.4.1    rmind uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
     60  1.7.4.1    rmind     kmutex_t *lockptr, u_int refs)
     61  1.7.4.1    rmind {
     62  1.7.4.1    rmind 
     63  1.7.4.1    rmind 	if (lockptr == NULL) {
     64  1.7.4.1    rmind 		uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
     65  1.7.4.1    rmind 	} else {
     66  1.7.4.1    rmind 		uo->vmobjlock = lockptr;
     67  1.7.4.1    rmind 		mutex_init(uo->vmobjlock, MUTEX_DEFAULT, IPL_NONE);
     68  1.7.4.1    rmind 	}
     69  1.7.4.1    rmind 	uo->pgops = ops;
     70  1.7.4.1    rmind 	TAILQ_INIT(&uo->memq);
     71  1.7.4.1    rmind 	uo->uo_npages = 0;
     72  1.7.4.1    rmind 	uo->uo_refs = refs;
     73  1.7.4.1    rmind 	rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
     74  1.7.4.1    rmind }
     75  1.7.4.1    rmind 
     76  1.7.4.1    rmind /*
     77  1.7.4.1    rmind  * uvm_obj_destroy: destroy UVM memory object.
     78  1.7.4.1    rmind  */
     79  1.7.4.1    rmind void
     80  1.7.4.1    rmind uvm_obj_destroy(struct uvm_object *uo, kmutex_t *lockptr)
     81  1.7.4.1    rmind {
     82  1.7.4.1    rmind #ifdef DIAGNOSTIC
     83  1.7.4.1    rmind 	void *tmp = NULL;
     84  1.7.4.1    rmind 	KASSERT(rb_tree_find_node_geq(&uo->rb_tree, &tmp) == NULL);
     85  1.7.4.1    rmind #endif
     86  1.7.4.1    rmind 	if (lockptr) {
     87  1.7.4.1    rmind 		KASSERT(uo->vmobjlock == lockptr);
     88  1.7.4.1    rmind 		mutex_destroy(uo->vmobjlock);
     89  1.7.4.1    rmind 	} else {
     90  1.7.4.1    rmind 		mutex_obj_free(uo->vmobjlock);
     91  1.7.4.1    rmind 	}
     92  1.7.4.1    rmind }
     93  1.7.4.1    rmind 
     94  1.7.4.1    rmind /*
     95      1.1     yamt  * uobj_wirepages: wire the pages of entire uobj
     96      1.1     yamt  *
     97      1.1     yamt  * => NOTE: this function should only be used for types of objects
     98      1.1     yamt  *  where PG_RELEASED flag is never set (aobj objects)
     99      1.1     yamt  * => caller must pass page-aligned start and end values
    100      1.1     yamt  */
    101      1.1     yamt 
    102      1.1     yamt int
    103      1.1     yamt uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
    104      1.1     yamt {
    105      1.1     yamt 	int i, npages, error;
    106      1.1     yamt 	struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
    107      1.1     yamt 	off_t offset = start, left;
    108      1.1     yamt 
    109      1.1     yamt 	left = (end - start) >> PAGE_SHIFT;
    110      1.1     yamt 
    111  1.7.4.1    rmind 	mutex_enter(uobj->vmobjlock);
    112      1.1     yamt 	while (left) {
    113      1.1     yamt 
    114      1.1     yamt 		npages = MIN(FETCH_PAGECOUNT, left);
    115      1.1     yamt 
    116      1.1     yamt 		/* Get the pages */
    117      1.1     yamt 		memset(pgs, 0, sizeof(pgs));
    118      1.1     yamt 		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
    119      1.1     yamt 			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
    120      1.1     yamt 			PGO_ALLPAGES | PGO_SYNCIO);
    121      1.1     yamt 
    122      1.1     yamt 		if (error)
    123      1.1     yamt 			goto error;
    124      1.1     yamt 
    125  1.7.4.1    rmind 		mutex_enter(uobj->vmobjlock);
    126      1.1     yamt 		for (i = 0; i < npages; i++) {
    127      1.1     yamt 
    128      1.1     yamt 			KASSERT(pgs[i] != NULL);
    129      1.1     yamt 			KASSERT(!(pgs[i]->flags & PG_RELEASED));
    130      1.1     yamt 
    131      1.1     yamt 			/*
    132      1.1     yamt 			 * Loan break
    133      1.1     yamt 			 */
    134      1.1     yamt 			if (pgs[i]->loan_count) {
    135      1.1     yamt 				while (pgs[i]->loan_count) {
    136      1.1     yamt 					pg = uvm_loanbreak(pgs[i]);
    137      1.1     yamt 					if (!pg) {
    138  1.7.4.1    rmind 						mutex_exit(uobj->vmobjlock);
    139      1.1     yamt 						uvm_wait("uobjwirepg");
    140  1.7.4.1    rmind 						mutex_enter(uobj->vmobjlock);
    141      1.1     yamt 						continue;
    142      1.1     yamt 					}
    143      1.1     yamt 				}
    144      1.1     yamt 				pgs[i] = pg;
    145      1.1     yamt 			}
    146      1.1     yamt 
    147      1.1     yamt 			if (pgs[i]->pqflags & PQ_AOBJ) {
    148      1.1     yamt 				pgs[i]->flags &= ~(PG_CLEAN);
    149      1.1     yamt 				uao_dropswap(uobj, i);
    150      1.1     yamt 			}
    151      1.1     yamt 		}
    152      1.1     yamt 
    153      1.1     yamt 		/* Wire the pages */
    154      1.4       ad 		mutex_enter(&uvm_pageqlock);
    155      1.1     yamt 		for (i = 0; i < npages; i++) {
    156      1.1     yamt 			uvm_pagewire(pgs[i]);
    157      1.1     yamt 		}
    158      1.4       ad 		mutex_exit(&uvm_pageqlock);
    159      1.1     yamt 
    160      1.1     yamt 		/* Unbusy the pages */
    161      1.1     yamt 		uvm_page_unbusy(pgs, npages);
    162      1.1     yamt 
    163      1.1     yamt 		left -= npages;
    164      1.1     yamt 		offset += npages << PAGE_SHIFT;
    165      1.1     yamt 	}
    166  1.7.4.1    rmind 	mutex_exit(uobj->vmobjlock);
    167      1.1     yamt 
    168      1.1     yamt 	return 0;
    169      1.1     yamt 
    170      1.1     yamt error:
    171      1.1     yamt 	/* Unwire the pages which has been wired */
    172      1.1     yamt 	uobj_unwirepages(uobj, start, offset);
    173      1.1     yamt 
    174      1.1     yamt 	return error;
    175      1.1     yamt }
    176      1.1     yamt 
    177      1.1     yamt /*
    178      1.1     yamt  * uobj_unwirepages: unwire the pages of entire uobj
    179      1.1     yamt  *
    180      1.1     yamt  * => NOTE: this function should only be used for types of objects
    181      1.1     yamt  *  where PG_RELEASED flag is never set
    182      1.1     yamt  * => caller must pass page-aligned start and end values
    183      1.1     yamt  */
    184      1.1     yamt 
    185      1.1     yamt void
    186      1.1     yamt uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
    187      1.1     yamt {
    188      1.1     yamt 	struct vm_page *pg;
    189      1.1     yamt 	off_t offset;
    190      1.1     yamt 
    191  1.7.4.1    rmind 	mutex_enter(uobj->vmobjlock);
    192      1.4       ad 	mutex_enter(&uvm_pageqlock);
    193      1.1     yamt 	for (offset = start; offset < end; offset += PAGE_SIZE) {
    194      1.1     yamt 		pg = uvm_pagelookup(uobj, offset);
    195      1.1     yamt 
    196      1.1     yamt 		KASSERT(pg != NULL);
    197      1.1     yamt 		KASSERT(!(pg->flags & PG_RELEASED));
    198      1.1     yamt 
    199      1.1     yamt 		uvm_pageunwire(pg);
    200      1.1     yamt 	}
    201      1.4       ad 	mutex_exit(&uvm_pageqlock);
    202  1.7.4.1    rmind 	mutex_exit(uobj->vmobjlock);
    203      1.1     yamt }
    204      1.7  thorpej 
    205      1.7  thorpej #if defined(DDB) || defined(DEBUGPRINT)
    206      1.7  thorpej 
    207      1.7  thorpej /*
    208      1.7  thorpej  * uvm_object_printit: actually prints the object
    209      1.7  thorpej  */
    210      1.7  thorpej void
    211      1.7  thorpej uvm_object_printit(struct uvm_object *uobj, bool full,
    212      1.7  thorpej     void (*pr)(const char *, ...))
    213      1.7  thorpej {
    214      1.7  thorpej 	struct vm_page *pg;
    215      1.7  thorpej 	int cnt = 0;
    216      1.7  thorpej 
    217      1.7  thorpej 	(*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
    218  1.7.4.1    rmind 	    uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
    219      1.7  thorpej 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    220      1.7  thorpej 		(*pr)("refs=<SYSTEM>\n");
    221      1.7  thorpej 	else
    222      1.7  thorpej 		(*pr)("refs=%d\n", uobj->uo_refs);
    223      1.7  thorpej 
    224      1.7  thorpej 	if (!full) {
    225      1.7  thorpej 		return;
    226      1.7  thorpej 	}
    227      1.7  thorpej 	(*pr)("  PAGES <pg,offset>:\n  ");
    228      1.7  thorpej 	TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
    229      1.7  thorpej 		cnt++;
    230      1.7  thorpej 		(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
    231      1.7  thorpej 		if ((cnt % 3) == 0) {
    232      1.7  thorpej 			(*pr)("\n  ");
    233      1.7  thorpej 		}
    234      1.7  thorpej 	}
    235      1.7  thorpej 	if ((cnt % 3) != 0) {
    236      1.7  thorpej 		(*pr)("\n");
    237      1.7  thorpej 	}
    238      1.7  thorpej }
    239      1.7  thorpej 
    240      1.7  thorpej #endif /* DDB || DEBUGPRINT */
    241