Home | History | Annotate | Line # | Download | only in uvm
uvm_pglist.c revision 1.3
      1 /*	$NetBSD: uvm_pglist.c,v 1.3 1998/03/09 00:58:59 mrg Exp $	*/
      2 
      3 #define VM_PAGE_ALLOC_MEMORY_STATS
      4 
      5 /*-
      6  * Copyright (c) 1997 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     11  * NASA Ames Research Center.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *      This product includes software developed by the NetBSD
     24  *      Foundation, Inc. and its contributors.
     25  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  *    contributors may be used to endorse or promote products derived
     27  *    from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  * POSSIBILITY OF SUCH DAMAGE.
     40  */
     41 
     42 /*
     43  * uvm_pglist.c: pglist functions
     44  *
     45  * XXX: was part of uvm_page but has an incompatable copyright so it
     46  * gets its own file now.
     47  */
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 #include <sys/mount.h>
     53 #include <sys/proc.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_page.h>
     57 #include <vm/vm_kern.h>
     58 
     59 #include <sys/syscallargs.h>
     60 
     61 #include <uvm/uvm.h>
     62 
     63 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
     64 #define	STAT_INCR(v)	(v)++
     65 #define	STAT_DECR(v)	do { \
     66 		if ((v) == 0) \
     67 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
     68 		else \
     69 			(v)--; \
     70 	} while (0)
     71 u_long	uvm_pglistalloc_npages;
     72 #else
     73 #define	STAT_INCR(v)
     74 #define	STAT_DECR(v)
     75 #endif
     76 
     77 /*
     78  * uvm_pglistalloc: allocate a list of pages
     79  *
     80  * => allocated pages are placed at the tail of rlist.  rlist is
     81  *    assumed to be properly initialized by caller.
     82  * => returns 0 on success or errno on failure
     83  * => XXX: implementation allocates only a single segment, also
     84  *	might be able to better advantage of vm_physeg[].
     85  * => doesn't take into account clean non-busy pages on inactive list
     86  *	that could be used(?)
     87  * => params:
     88  *	size		the size of the allocation, rounded to page size.
     89  *	low		the low address of the allowed allocation range.
     90  *	high		the high address of the allowed allocation range.
     91  *	alignment	memory must be aligned to this power-of-two boundary.
     92  *	boundary	no segment in the allocation may cross this
     93  *			power-of-two boundary (relative to zero).
     94  */
     95 
     96 int
     97 uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
     98 	vm_size_t size;
     99 	vm_offset_t low, high, alignment, boundary;
    100 	struct pglist *rlist;
    101 	int nsegs, waitok;
    102 {
    103 	vm_offset_t try, idxpa, lastidxpa;
    104 	int psi;
    105 	struct vm_page *pgs;
    106 	int s, tryidx, idx, end, error;
    107 	vm_page_t m;
    108 	u_long pagemask;
    109 #ifdef DEBUG
    110 	vm_page_t tp;
    111 #endif
    112 
    113 #ifdef DIAGNOSTIC
    114 	if ((alignment & (alignment - 1)) != 0)
    115 		panic("vm_page_alloc_memory: alignment must be power of 2");
    116 
    117 	if ((boundary & (boundary - 1)) != 0)
    118 		panic("vm_page_alloc_memory: boundary must be power of 2");
    119 #endif
    120 
    121 	/*
    122 	 * Our allocations are always page granularity, so our alignment
    123 	 * must be, too.
    124 	 */
    125 	if (alignment < PAGE_SIZE)
    126 		alignment = PAGE_SIZE;
    127 
    128 	size = round_page(size);
    129 	try = roundup(low, alignment);
    130 
    131 	if (boundary != 0 && boundary < size)
    132 		return (EINVAL);
    133 
    134 	pagemask = ~(boundary - 1);
    135 
    136 	/* Default to "lose". */
    137 	error = ENOMEM;
    138 
    139 	/*
    140 	 * Block all memory allocation and lock the free list.
    141 	 */
    142 	s = splimp();
    143 	uvm_lock_fpageq();            /* lock free page queue */
    144 
    145 	/* Are there even any free pages? */
    146 	if (uvm.page_free.tqh_first == NULL)
    147 		goto out;
    148 
    149 	for (;; try += alignment) {
    150 		if (try + size > high) {
    151 			/*
    152 			 * We've run past the allowable range.
    153 			 */
    154 			goto out;
    155 		}
    156 
    157 		/*
    158 		 * Make sure this is a managed physical page.
    159 		 */
    160 
    161 		if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
    162 			continue; /* managed? */
    163 		if (vm_physseg_find(atop(try + size), NULL) != psi)
    164 			continue; /* end must be in this segment */
    165 
    166 		tryidx = idx;
    167 		end = idx + (size / PAGE_SIZE);
    168 		pgs = vm_physmem[psi].pgs;
    169 
    170 		/*
    171 		 * Found a suitable starting page.  See of the range is free.
    172 		 */
    173 		for (; idx < end; idx++) {
    174 			if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
    175 				/*
    176 				 * Page not available.
    177 				 */
    178 				break;
    179 			}
    180 
    181 			idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
    182 
    183 			if (idx > tryidx) {
    184 				lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
    185 
    186 				if ((lastidxpa + PAGE_SIZE) != idxpa) {
    187 					/*
    188 					 * Region not contiguous.
    189 					 */
    190 					break;
    191 				}
    192 				if (boundary != 0 &&
    193 				    ((lastidxpa ^ idxpa) & pagemask) != 0) {
    194 					/*
    195 					 * Region crosses boundary.
    196 					 */
    197 					break;
    198 				}
    199 			}
    200 		}
    201 
    202 		if (idx == end) {
    203 			/*
    204 			 * Woo hoo!  Found one.
    205 			 */
    206 			break;
    207 		}
    208 	}
    209 
    210 	/*
    211 	 * we have a chunk of memory that conforms to the requested constraints.
    212 	 */
    213 	idx = tryidx;
    214 	while (idx < end) {
    215 		m = &pgs[idx];
    216 #ifdef DEBUG
    217 		for (tp = uvm.page_free.tqh_first; tp != NULL;
    218 		    tp = tp->pageq.tqe_next) {
    219 			if (tp == m)
    220 				break;
    221 		}
    222 		if (tp == NULL)
    223 			panic("uvm_pglistalloc: page not on freelist");
    224 #endif
    225 		TAILQ_REMOVE(&uvm.page_free, m, pageq);
    226 		uvmexp.free--;
    227 		m->flags = PG_CLEAN;
    228 		m->pqflags = 0;
    229 		m->uobject = NULL;
    230 		m->uanon = NULL;
    231 		m->wire_count = 0;
    232 		m->loan_count = 0;
    233 		TAILQ_INSERT_TAIL(rlist, m, pageq);
    234 		idx++;
    235 		STAT_INCR(uvm_pglistalloc_npages);
    236 	}
    237 	error = 0;
    238 
    239 out:
    240 	uvm_unlock_fpageq();
    241 	splx(s);
    242 
    243 	/*
    244 	 * check to see if we need to generate some free pages waking
    245 	 * the pagedaemon.
    246 	 * XXX: we read uvm.free without locking
    247 	 */
    248 
    249 	if (uvmexp.free < uvmexp.freemin ||
    250 	    (uvmexp.free < uvmexp.freetarg &&
    251 	    uvmexp.inactive < uvmexp.inactarg))
    252 		thread_wakeup(&uvm.pagedaemon);
    253 
    254 	return (error);
    255 }
    256 
    257 /*
    258  * uvm_pglistfree: free a list of pages
    259  *
    260  * => pages should already be unmapped
    261  */
    262 
    263 void
    264 uvm_pglistfree(list)
    265 	struct pglist *list;
    266 {
    267 	vm_page_t m;
    268 	int s;
    269 
    270 	/*
    271 	 * Block all memory allocation and lock the free list.
    272 	 */
    273 	s = splimp();
    274 	uvm_lock_fpageq();
    275 
    276 	while ((m = list->tqh_first) != NULL) {
    277 #ifdef DIAGNOSTIC
    278 		if (m->pqflags & (PQ_ACTIVE|PQ_INACTIVE))
    279 			panic("uvm_pglistfree: active/inactive page!");
    280 #endif
    281 		TAILQ_REMOVE(list, m, pageq);
    282 		m->pqflags = PQ_FREE;
    283 		TAILQ_INSERT_TAIL(&uvm.page_free, m, pageq);
    284 		uvmexp.free++;
    285 		STAT_DECR(uvm_pglistalloc_npages);
    286 	}
    287 
    288 	uvm_unlock_fpageq();
    289 	splx(s);
    290 }
    291