Home | History | Annotate | Line # | Download | only in uvm
uvm_pglist.c revision 1.5
      1 /*	$NetBSD: uvm_pglist.c,v 1.5 1998/07/08 04:28:28 thorpej Exp $	*/
      2 
      3 #define VM_PAGE_ALLOC_MEMORY_STATS
      4 
      5 /*-
      6  * Copyright (c) 1997 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to The NetBSD Foundation
     10  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     11  * NASA Ames Research Center.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *      This product includes software developed by the NetBSD
     24  *      Foundation, Inc. and its contributors.
     25  * 4. Neither the name of The NetBSD Foundation nor the names of its
     26  *    contributors may be used to endorse or promote products derived
     27  *    from this software without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     39  * POSSIBILITY OF SUCH DAMAGE.
     40  */
     41 
     42 /*
     43  * uvm_pglist.c: pglist functions
     44  *
     45  * XXX: was part of uvm_page but has an incompatable copyright so it
     46  * gets its own file now.
     47  */
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/malloc.h>
     52 #include <sys/proc.h>
     53 
     54 #include <vm/vm.h>
     55 #include <vm/vm_page.h>
     56 #include <vm/vm_kern.h>
     57 
     58 #include <uvm/uvm.h>
     59 
     60 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
     61 #define	STAT_INCR(v)	(v)++
     62 #define	STAT_DECR(v)	do { \
     63 		if ((v) == 0) \
     64 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
     65 		else \
     66 			(v)--; \
     67 	} while (0)
     68 u_long	uvm_pglistalloc_npages;
     69 #else
     70 #define	STAT_INCR(v)
     71 #define	STAT_DECR(v)
     72 #endif
     73 
     74 /*
     75  * uvm_pglistalloc: allocate a list of pages
     76  *
     77  * => allocated pages are placed at the tail of rlist.  rlist is
     78  *    assumed to be properly initialized by caller.
     79  * => returns 0 on success or errno on failure
     80  * => XXX: implementation allocates only a single segment, also
     81  *	might be able to better advantage of vm_physeg[].
     82  * => doesn't take into account clean non-busy pages on inactive list
     83  *	that could be used(?)
     84  * => params:
     85  *	size		the size of the allocation, rounded to page size.
     86  *	low		the low address of the allowed allocation range.
     87  *	high		the high address of the allowed allocation range.
     88  *	alignment	memory must be aligned to this power-of-two boundary.
     89  *	boundary	no segment in the allocation may cross this
     90  *			power-of-two boundary (relative to zero).
     91  */
     92 
     93 int
     94 uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
     95 	vm_size_t size;
     96 	vm_offset_t low, high, alignment, boundary;
     97 	struct pglist *rlist;
     98 	int nsegs, waitok;
     99 {
    100 	vm_offset_t try, idxpa, lastidxpa;
    101 	int psi;
    102 	struct vm_page *pgs;
    103 	int s, tryidx, idx, end, error, free_list;
    104 	vm_page_t m;
    105 	u_long pagemask;
    106 #ifdef DEBUG
    107 	vm_page_t tp;
    108 #endif
    109 
    110 #ifdef DIAGNOSTIC
    111 	if ((alignment & (alignment - 1)) != 0)
    112 		panic("vm_page_alloc_memory: alignment must be power of 2");
    113 
    114 	if ((boundary & (boundary - 1)) != 0)
    115 		panic("vm_page_alloc_memory: boundary must be power of 2");
    116 #endif
    117 
    118 	/*
    119 	 * Our allocations are always page granularity, so our alignment
    120 	 * must be, too.
    121 	 */
    122 	if (alignment < PAGE_SIZE)
    123 		alignment = PAGE_SIZE;
    124 
    125 	size = round_page(size);
    126 	try = roundup(low, alignment);
    127 
    128 	if (boundary != 0 && boundary < size)
    129 		return (EINVAL);
    130 
    131 	pagemask = ~(boundary - 1);
    132 
    133 	/* Default to "lose". */
    134 	error = ENOMEM;
    135 
    136 	/*
    137 	 * Block all memory allocation and lock the free list.
    138 	 */
    139 	s = splimp();
    140 	uvm_lock_fpageq();            /* lock free page queue */
    141 
    142 	/* Are there even any free pages? */
    143 	for (idx = 0; idx < VM_NFREELIST; idx++)
    144 		if (uvm.page_free[idx].tqh_first != NULL)
    145 			break;
    146 	if (idx == VM_NFREELIST)
    147 		goto out;
    148 
    149 	for (;; try += alignment) {
    150 		if (try + size > high) {
    151 			/*
    152 			 * We've run past the allowable range.
    153 			 */
    154 			goto out;
    155 		}
    156 
    157 		/*
    158 		 * Make sure this is a managed physical page.
    159 		 */
    160 
    161 		if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
    162 			continue; /* managed? */
    163 		if (vm_physseg_find(atop(try + size), NULL) != psi)
    164 			continue; /* end must be in this segment */
    165 
    166 		tryidx = idx;
    167 		end = idx + (size / PAGE_SIZE);
    168 		pgs = vm_physmem[psi].pgs;
    169 
    170 		/*
    171 		 * Found a suitable starting page.  See of the range is free.
    172 		 */
    173 		for (; idx < end; idx++) {
    174 			if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
    175 				/*
    176 				 * Page not available.
    177 				 */
    178 				break;
    179 			}
    180 
    181 			idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
    182 
    183 			if (idx > tryidx) {
    184 				lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
    185 
    186 				if ((lastidxpa + PAGE_SIZE) != idxpa) {
    187 					/*
    188 					 * Region not contiguous.
    189 					 */
    190 					break;
    191 				}
    192 				if (boundary != 0 &&
    193 				    ((lastidxpa ^ idxpa) & pagemask) != 0) {
    194 					/*
    195 					 * Region crosses boundary.
    196 					 */
    197 					break;
    198 				}
    199 			}
    200 		}
    201 
    202 		if (idx == end) {
    203 			/*
    204 			 * Woo hoo!  Found one.
    205 			 */
    206 			break;
    207 		}
    208 	}
    209 
    210 	/*
    211 	 * we have a chunk of memory that conforms to the requested constraints.
    212 	 */
    213 	idx = tryidx;
    214 	while (idx < end) {
    215 		m = &pgs[idx];
    216 		free_list = uvm_page_lookup_freelist(m);
    217 #ifdef DEBUG
    218 		for (tp = uvm.page_free[free_list].tqh_first;
    219 		     tp != NULL; tp = tp->pageq.tqe_next) {
    220 			if (tp == m)
    221 				break;
    222 		}
    223 		if (tp == NULL)
    224 			panic("uvm_pglistalloc: page not on freelist");
    225 #endif
    226 		TAILQ_REMOVE(&uvm.page_free[free_list], m, pageq);
    227 		uvmexp.free--;
    228 		m->flags = PG_CLEAN;
    229 		m->pqflags = 0;
    230 		m->uobject = NULL;
    231 		m->uanon = NULL;
    232 		m->wire_count = 0;
    233 		m->loan_count = 0;
    234 		TAILQ_INSERT_TAIL(rlist, m, pageq);
    235 		idx++;
    236 		STAT_INCR(uvm_pglistalloc_npages);
    237 	}
    238 	error = 0;
    239 
    240 out:
    241 	uvm_unlock_fpageq();
    242 	splx(s);
    243 
    244 	/*
    245 	 * check to see if we need to generate some free pages waking
    246 	 * the pagedaemon.
    247 	 * XXX: we read uvm.free without locking
    248 	 */
    249 
    250 	if (uvmexp.free < uvmexp.freemin ||
    251 	    (uvmexp.free < uvmexp.freetarg &&
    252 	    uvmexp.inactive < uvmexp.inactarg))
    253 		thread_wakeup(&uvm.pagedaemon);
    254 
    255 	return (error);
    256 }
    257 
    258 /*
    259  * uvm_pglistfree: free a list of pages
    260  *
    261  * => pages should already be unmapped
    262  */
    263 
    264 void
    265 uvm_pglistfree(list)
    266 	struct pglist *list;
    267 {
    268 	vm_page_t m;
    269 	int s;
    270 
    271 	/*
    272 	 * Block all memory allocation and lock the free list.
    273 	 */
    274 	s = splimp();
    275 	uvm_lock_fpageq();
    276 
    277 	while ((m = list->tqh_first) != NULL) {
    278 #ifdef DIAGNOSTIC
    279 		if (m->pqflags & (PQ_ACTIVE|PQ_INACTIVE))
    280 			panic("uvm_pglistfree: active/inactive page!");
    281 #endif
    282 		TAILQ_REMOVE(list, m, pageq);
    283 		m->pqflags = PQ_FREE;
    284 		TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(m)],
    285 		    m, pageq);
    286 		uvmexp.free++;
    287 		STAT_DECR(uvm_pglistalloc_npages);
    288 	}
    289 
    290 	uvm_unlock_fpageq();
    291 	splx(s);
    292 }
    293