Home | History | Annotate | Line # | Download | only in uvm
uvm_map.c revision 1.397
      1  1.397  riastrad /*	$NetBSD: uvm_map.c,v 1.397 2022/06/04 23:09:57 riastradh Exp $	*/
      2    1.1       mrg 
      3   1.98       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.98       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20  1.295     chuck  * 3. Neither the name of the University nor the names of its contributors
     21    1.1       mrg  *    may be used to endorse or promote products derived from this software
     22    1.1       mrg  *    without specific prior written permission.
     23    1.1       mrg  *
     24    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34    1.1       mrg  * SUCH DAMAGE.
     35    1.1       mrg  *
     36    1.1       mrg  *	@(#)vm_map.c    8.3 (Berkeley) 1/12/94
     37    1.3       mrg  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
     38    1.1       mrg  *
     39    1.1       mrg  *
     40    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41    1.1       mrg  * All rights reserved.
     42   1.98       chs  *
     43    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45    1.1       mrg  * notice and this permission notice appear in all copies of the
     46    1.1       mrg  * software, derivative works or modified versions, and any portions
     47    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48   1.98       chs  *
     49   1.98       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.98       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.98       chs  *
     53    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54    1.1       mrg  *
     55    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56    1.1       mrg  *  School of Computer Science
     57    1.1       mrg  *  Carnegie Mellon University
     58    1.1       mrg  *  Pittsburgh PA 15213-3890
     59    1.1       mrg  *
     60    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61    1.1       mrg  * rights to redistribute these changes.
     62    1.1       mrg  */
     63    1.1       mrg 
     64  1.114     lukem /*
     65  1.114     lukem  * uvm_map.c: uvm map operations
     66  1.114     lukem  */
     67  1.114     lukem 
     68  1.114     lukem #include <sys/cdefs.h>
     69  1.397  riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.397 2022/06/04 23:09:57 riastradh Exp $");
     70  1.114     lukem 
     71   1.21  jonathan #include "opt_ddb.h"
     72  1.344     joerg #include "opt_pax.h"
     73    1.6       mrg #include "opt_uvmhist.h"
     74  1.169    petrov #include "opt_uvm.h"
     75   1.31      tron #include "opt_sysv.h"
     76    1.1       mrg 
     77    1.1       mrg #include <sys/param.h>
     78    1.1       mrg #include <sys/systm.h>
     79    1.1       mrg #include <sys/mman.h>
     80    1.1       mrg #include <sys/proc.h>
     81   1.25   thorpej #include <sys/pool.h>
     82  1.104       chs #include <sys/kernel.h>
     83  1.112   thorpej #include <sys/mount.h>
     84  1.344     joerg #include <sys/pax.h>
     85  1.109   thorpej #include <sys/vnode.h>
     86  1.335  christos #include <sys/filedesc.h>
     87  1.244      yamt #include <sys/lockdebug.h>
     88  1.248        ad #include <sys/atomic.h>
     89  1.335  christos #include <sys/sysctl.h>
     90  1.288  drochner #ifndef __USER_VA0_IS_SAFE
     91  1.288  drochner #include <sys/kauth.h>
     92  1.290  drochner #include "opt_user_va0_disable_default.h"
     93  1.288  drochner #endif
     94    1.1       mrg 
     95    1.1       mrg #include <sys/shm.h>
     96    1.1       mrg 
     97    1.1       mrg #include <uvm/uvm.h>
     98  1.271      yamt #include <uvm/uvm_readahead.h>
     99   1.21  jonathan 
    100  1.270     pooka #if defined(DDB) || defined(DEBUGPRINT)
    101   1.21  jonathan #include <uvm/uvm_ddb.h>
    102   1.21  jonathan #endif
    103   1.21  jonathan 
    104  1.318      matt #ifdef UVMHIST
    105  1.342       mrg #ifndef UVMHIST_MAPHIST_SIZE
    106  1.342       mrg #define UVMHIST_MAPHIST_SIZE 100
    107  1.342       mrg #endif
    108  1.342       mrg static struct kern_history_ent maphistbuf[UVMHIST_MAPHIST_SIZE];
    109  1.328      matt UVMHIST_DEFINE(maphist) = UVMHIST_INITIALIZER(maphist, maphistbuf);
    110  1.318      matt #endif
    111  1.318      matt 
    112  1.258        ad #if !defined(UVMMAP_COUNTERS)
    113  1.207      yamt 
    114  1.207      yamt #define	UVMMAP_EVCNT_DEFINE(name)	/* nothing */
    115  1.207      yamt #define UVMMAP_EVCNT_INCR(ev)		/* nothing */
    116  1.207      yamt #define UVMMAP_EVCNT_DECR(ev)		/* nothing */
    117  1.207      yamt 
    118  1.207      yamt #else /* defined(UVMMAP_NOCOUNTERS) */
    119  1.207      yamt 
    120  1.228      yamt #include <sys/evcnt.h>
    121  1.207      yamt #define	UVMMAP_EVCNT_DEFINE(name) \
    122  1.207      yamt struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
    123  1.207      yamt     "uvmmap", #name); \
    124  1.207      yamt EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
    125  1.207      yamt #define	UVMMAP_EVCNT_INCR(ev)		uvmmap_evcnt_##ev.ev_count++
    126  1.207      yamt #define	UVMMAP_EVCNT_DECR(ev)		uvmmap_evcnt_##ev.ev_count--
    127  1.207      yamt 
    128  1.207      yamt #endif /* defined(UVMMAP_NOCOUNTERS) */
    129  1.207      yamt 
    130  1.207      yamt UVMMAP_EVCNT_DEFINE(ubackmerge)
    131  1.207      yamt UVMMAP_EVCNT_DEFINE(uforwmerge)
    132  1.207      yamt UVMMAP_EVCNT_DEFINE(ubimerge)
    133  1.207      yamt UVMMAP_EVCNT_DEFINE(unomerge)
    134  1.207      yamt UVMMAP_EVCNT_DEFINE(kbackmerge)
    135  1.207      yamt UVMMAP_EVCNT_DEFINE(kforwmerge)
    136  1.207      yamt UVMMAP_EVCNT_DEFINE(kbimerge)
    137  1.207      yamt UVMMAP_EVCNT_DEFINE(knomerge)
    138  1.207      yamt UVMMAP_EVCNT_DEFINE(map_call)
    139  1.207      yamt UVMMAP_EVCNT_DEFINE(mlk_call)
    140  1.207      yamt UVMMAP_EVCNT_DEFINE(mlk_hint)
    141  1.263      matt UVMMAP_EVCNT_DEFINE(mlk_tree)
    142  1.263      matt UVMMAP_EVCNT_DEFINE(mlk_treeloop)
    143  1.169    petrov 
    144   1.87     enami const char vmmapbsy[] = "vmmapbsy";
    145    1.1       mrg 
    146    1.1       mrg /*
    147  1.248        ad  * cache for vmspace structures.
    148   1.25   thorpej  */
    149   1.25   thorpej 
    150  1.248        ad static struct pool_cache uvm_vmspace_cache;
    151   1.25   thorpej 
    152   1.26   thorpej /*
    153  1.248        ad  * cache for dynamically-allocated map entries.
    154   1.26   thorpej  */
    155   1.26   thorpej 
    156  1.248        ad static struct pool_cache uvm_map_entry_cache;
    157  1.130   thorpej 
    158   1.40   thorpej #ifdef PMAP_GROWKERNEL
    159   1.40   thorpej /*
    160   1.40   thorpej  * This global represents the end of the kernel virtual address
    161   1.40   thorpej  * space.  If we want to exceed this, we must grow the kernel
    162   1.40   thorpej  * virtual address space dynamically.
    163   1.40   thorpej  *
    164   1.40   thorpej  * Note, this variable is locked by kernel_map's lock.
    165   1.40   thorpej  */
    166   1.40   thorpej vaddr_t uvm_maxkaddr;
    167   1.40   thorpej #endif
    168   1.40   thorpej 
    169  1.288  drochner #ifndef __USER_VA0_IS_SAFE
    170  1.290  drochner #ifndef __USER_VA0_DISABLE_DEFAULT
    171  1.290  drochner #define __USER_VA0_DISABLE_DEFAULT 1
    172  1.288  drochner #endif
    173  1.290  drochner #ifdef USER_VA0_DISABLE_DEFAULT /* kernel config option overrides */
    174  1.290  drochner #undef __USER_VA0_DISABLE_DEFAULT
    175  1.290  drochner #define __USER_VA0_DISABLE_DEFAULT USER_VA0_DISABLE_DEFAULT
    176  1.288  drochner #endif
    177  1.341      maxv int user_va0_disable = __USER_VA0_DISABLE_DEFAULT;
    178  1.288  drochner #endif
    179  1.288  drochner 
    180   1.25   thorpej /*
    181    1.1       mrg  * macros
    182    1.1       mrg  */
    183    1.1       mrg 
    184    1.1       mrg /*
    185  1.365       rin  * uvm_map_align_va: round down or up virtual address
    186  1.365       rin  */
    187  1.365       rin static __inline void
    188  1.365       rin uvm_map_align_va(vaddr_t *vap, vsize_t align, int topdown)
    189  1.365       rin {
    190  1.365       rin 
    191  1.365       rin 	KASSERT(powerof2(align));
    192  1.365       rin 
    193  1.365       rin 	if (align != 0 && (*vap & (align - 1)) != 0) {
    194  1.365       rin 		if (topdown)
    195  1.365       rin 			*vap = rounddown2(*vap, align);
    196  1.365       rin 		else
    197  1.365       rin 			*vap = roundup2(*vap, align);
    198  1.365       rin 	}
    199  1.365       rin }
    200  1.365       rin 
    201  1.365       rin /*
    202  1.194      yamt  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
    203  1.194      yamt  */
    204  1.311      para extern struct vm_map *pager_map;
    205  1.194      yamt 
    206  1.194      yamt #define	UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
    207  1.194      yamt     prot, maxprot, inh, adv, wire) \
    208  1.194      yamt 	((ent)->etype == (type) && \
    209  1.311      para 	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
    210  1.194      yamt 	(ent)->object.uvm_obj == (uobj) && \
    211  1.194      yamt 	(ent)->protection == (prot) && \
    212  1.194      yamt 	(ent)->max_protection == (maxprot) && \
    213  1.194      yamt 	(ent)->inheritance == (inh) && \
    214  1.194      yamt 	(ent)->advice == (adv) && \
    215  1.194      yamt 	(ent)->wired_count == (wire))
    216  1.194      yamt 
    217  1.194      yamt /*
    218    1.1       mrg  * uvm_map_entry_link: insert entry into a map
    219    1.1       mrg  *
    220    1.1       mrg  * => map must be locked
    221    1.1       mrg  */
    222   1.10       mrg #define uvm_map_entry_link(map, after_where, entry) do { \
    223  1.218      yamt 	uvm_mapent_check(entry); \
    224   1.10       mrg 	(map)->nentries++; \
    225   1.10       mrg 	(entry)->prev = (after_where); \
    226   1.10       mrg 	(entry)->next = (after_where)->next; \
    227   1.10       mrg 	(entry)->prev->next = (entry); \
    228   1.10       mrg 	(entry)->next->prev = (entry); \
    229  1.144      yamt 	uvm_rb_insert((map), (entry)); \
    230  1.124     perry } while (/*CONSTCOND*/ 0)
    231   1.10       mrg 
    232    1.1       mrg /*
    233    1.1       mrg  * uvm_map_entry_unlink: remove entry from a map
    234    1.1       mrg  *
    235    1.1       mrg  * => map must be locked
    236    1.1       mrg  */
    237   1.10       mrg #define uvm_map_entry_unlink(map, entry) do { \
    238  1.221      yamt 	KASSERT((entry) != (map)->first_free); \
    239  1.221      yamt 	KASSERT((entry) != (map)->hint); \
    240  1.218      yamt 	uvm_mapent_check(entry); \
    241   1.10       mrg 	(map)->nentries--; \
    242   1.10       mrg 	(entry)->next->prev = (entry)->prev; \
    243   1.10       mrg 	(entry)->prev->next = (entry)->next; \
    244  1.144      yamt 	uvm_rb_remove((map), (entry)); \
    245  1.124     perry } while (/*CONSTCOND*/ 0)
    246    1.1       mrg 
    247    1.1       mrg /*
    248    1.1       mrg  * SAVE_HINT: saves the specified entry as the hint for future lookups.
    249    1.1       mrg  *
    250  1.248        ad  * => map need not be locked.
    251    1.1       mrg  */
    252  1.248        ad #define SAVE_HINT(map, check, value) do { \
    253  1.258        ad 	if ((map)->hint == (check)) \
    254  1.258        ad 		(map)->hint = (value); \
    255  1.124     perry } while (/*CONSTCOND*/ 0)
    256    1.1       mrg 
    257    1.1       mrg /*
    258  1.221      yamt  * clear_hints: ensure that hints don't point to the entry.
    259  1.221      yamt  *
    260  1.221      yamt  * => map must be write-locked.
    261  1.221      yamt  */
    262  1.221      yamt static void
    263  1.221      yamt clear_hints(struct vm_map *map, struct vm_map_entry *ent)
    264  1.221      yamt {
    265  1.221      yamt 
    266  1.221      yamt 	SAVE_HINT(map, ent, ent->prev);
    267  1.221      yamt 	if (map->first_free == ent) {
    268  1.221      yamt 		map->first_free = ent->prev;
    269  1.221      yamt 	}
    270  1.221      yamt }
    271  1.221      yamt 
    272  1.221      yamt /*
    273    1.1       mrg  * VM_MAP_RANGE_CHECK: check and correct range
    274    1.1       mrg  *
    275    1.1       mrg  * => map must at least be read locked
    276    1.1       mrg  */
    277    1.1       mrg 
    278   1.10       mrg #define VM_MAP_RANGE_CHECK(map, start, end) do { \
    279  1.139     enami 	if (start < vm_map_min(map))		\
    280  1.139     enami 		start = vm_map_min(map);	\
    281  1.139     enami 	if (end > vm_map_max(map))		\
    282  1.139     enami 		end = vm_map_max(map);		\
    283  1.139     enami 	if (start > end)			\
    284  1.139     enami 		start = end;			\
    285  1.124     perry } while (/*CONSTCOND*/ 0)
    286    1.1       mrg 
    287    1.1       mrg /*
    288    1.1       mrg  * local prototypes
    289    1.1       mrg  */
    290    1.1       mrg 
    291  1.138     enami static struct vm_map_entry *
    292  1.138     enami 		uvm_mapent_alloc(struct vm_map *, int);
    293  1.138     enami static void	uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
    294  1.138     enami static void	uvm_mapent_free(struct vm_map_entry *);
    295  1.218      yamt #if defined(DEBUG)
    296  1.384      maxv static void	_uvm_mapent_check(const struct vm_map_entry *, int);
    297  1.384      maxv #define	uvm_mapent_check(map)	_uvm_mapent_check(map, __LINE__)
    298  1.218      yamt #else /* defined(DEBUG) */
    299  1.218      yamt #define	uvm_mapent_check(e)	/* nothing */
    300  1.218      yamt #endif /* defined(DEBUG) */
    301  1.219      yamt 
    302  1.138     enami static void	uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
    303  1.138     enami static void	uvm_map_reference_amap(struct vm_map_entry *, int);
    304  1.140     enami static int	uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
    305  1.304      matt 		    int, struct vm_map_entry *);
    306  1.138     enami static void	uvm_map_unreference_amap(struct vm_map_entry *, int);
    307    1.1       mrg 
    308  1.222      yamt int _uvm_map_sanity(struct vm_map *);
    309  1.222      yamt int _uvm_tree_sanity(struct vm_map *);
    310  1.263      matt static vsize_t uvm_rb_maxgap(const struct vm_map_entry *);
    311  1.144      yamt 
    312  1.263      matt #define	ROOT_ENTRY(map)		((struct vm_map_entry *)(map)->rb_tree.rbt_root)
    313  1.263      matt #define	LEFT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_left)
    314  1.263      matt #define	RIGHT_ENTRY(entry)	((struct vm_map_entry *)(entry)->rb_node.rb_right)
    315  1.263      matt #define	PARENT_ENTRY(map, entry) \
    316  1.263      matt 	(ROOT_ENTRY(map) == (entry) \
    317  1.293     rmind 	    ? NULL : (struct vm_map_entry *)RB_FATHER(&(entry)->rb_node))
    318  1.263      matt 
    319  1.336  pgoyette /*
    320  1.336  pgoyette  * These get filled in if/when SYSVSHM shared memory code is loaded
    321  1.336  pgoyette  *
    322  1.336  pgoyette  * We do this with function pointers rather the #ifdef SYSVSHM so the
    323  1.336  pgoyette  * SYSVSHM code can be loaded and unloaded
    324  1.336  pgoyette  */
    325  1.336  pgoyette void (*uvm_shmexit)(struct vmspace *) = NULL;
    326  1.336  pgoyette void (*uvm_shmfork)(struct vmspace *, struct vmspace *) = NULL;
    327  1.336  pgoyette 
    328  1.263      matt static int
    329  1.293     rmind uvm_map_compare_nodes(void *ctx, const void *nparent, const void *nkey)
    330  1.144      yamt {
    331  1.293     rmind 	const struct vm_map_entry *eparent = nparent;
    332  1.293     rmind 	const struct vm_map_entry *ekey = nkey;
    333  1.144      yamt 
    334  1.263      matt 	KASSERT(eparent->start < ekey->start || eparent->start >= ekey->end);
    335  1.263      matt 	KASSERT(ekey->start < eparent->start || ekey->start >= eparent->end);
    336  1.164  junyoung 
    337  1.293     rmind 	if (eparent->start < ekey->start)
    338  1.263      matt 		return -1;
    339  1.293     rmind 	if (eparent->end >= ekey->start)
    340  1.263      matt 		return 1;
    341  1.263      matt 	return 0;
    342  1.144      yamt }
    343  1.144      yamt 
    344  1.263      matt static int
    345  1.293     rmind uvm_map_compare_key(void *ctx, const void *nparent, const void *vkey)
    346  1.144      yamt {
    347  1.293     rmind 	const struct vm_map_entry *eparent = nparent;
    348  1.263      matt 	const vaddr_t va = *(const vaddr_t *) vkey;
    349  1.144      yamt 
    350  1.293     rmind 	if (eparent->start < va)
    351  1.263      matt 		return -1;
    352  1.293     rmind 	if (eparent->end >= va)
    353  1.263      matt 		return 1;
    354  1.263      matt 	return 0;
    355  1.144      yamt }
    356  1.144      yamt 
    357  1.293     rmind static const rb_tree_ops_t uvm_map_tree_ops = {
    358  1.263      matt 	.rbto_compare_nodes = uvm_map_compare_nodes,
    359  1.263      matt 	.rbto_compare_key = uvm_map_compare_key,
    360  1.293     rmind 	.rbto_node_offset = offsetof(struct vm_map_entry, rb_node),
    361  1.293     rmind 	.rbto_context = NULL
    362  1.263      matt };
    363  1.144      yamt 
    364  1.293     rmind /*
    365  1.293     rmind  * uvm_rb_gap: return the gap size between our entry and next entry.
    366  1.293     rmind  */
    367  1.206     perry static inline vsize_t
    368  1.263      matt uvm_rb_gap(const struct vm_map_entry *entry)
    369  1.144      yamt {
    370  1.293     rmind 
    371  1.144      yamt 	KASSERT(entry->next != NULL);
    372  1.144      yamt 	return entry->next->start - entry->end;
    373  1.144      yamt }
    374  1.144      yamt 
    375  1.144      yamt static vsize_t
    376  1.263      matt uvm_rb_maxgap(const struct vm_map_entry *entry)
    377  1.144      yamt {
    378  1.263      matt 	struct vm_map_entry *child;
    379  1.263      matt 	vsize_t maxgap = entry->gap;
    380  1.144      yamt 
    381  1.263      matt 	/*
    382  1.263      matt 	 * We need maxgap to be the largest gap of us or any of our
    383  1.263      matt 	 * descendents.  Since each of our children's maxgap is the
    384  1.263      matt 	 * cached value of their largest gap of themselves or their
    385  1.263      matt 	 * descendents, we can just use that value and avoid recursing
    386  1.263      matt 	 * down the tree to calculate it.
    387  1.263      matt 	 */
    388  1.263      matt 	if ((child = LEFT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
    389  1.263      matt 		maxgap = child->maxgap;
    390  1.144      yamt 
    391  1.263      matt 	if ((child = RIGHT_ENTRY(entry)) != NULL && maxgap < child->maxgap)
    392  1.263      matt 		maxgap = child->maxgap;
    393  1.144      yamt 
    394  1.263      matt 	return maxgap;
    395  1.144      yamt }
    396  1.144      yamt 
    397  1.263      matt static void
    398  1.144      yamt uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
    399  1.144      yamt {
    400  1.263      matt 	struct vm_map_entry *parent;
    401  1.263      matt 
    402  1.263      matt 	KASSERT(entry->gap == uvm_rb_gap(entry));
    403  1.263      matt 	entry->maxgap = uvm_rb_maxgap(entry);
    404  1.263      matt 
    405  1.263      matt 	while ((parent = PARENT_ENTRY(map, entry)) != NULL) {
    406  1.263      matt 		struct vm_map_entry *brother;
    407  1.263      matt 		vsize_t maxgap = parent->gap;
    408  1.293     rmind 		unsigned int which;
    409  1.263      matt 
    410  1.263      matt 		KDASSERT(parent->gap == uvm_rb_gap(parent));
    411  1.263      matt 		if (maxgap < entry->maxgap)
    412  1.263      matt 			maxgap = entry->maxgap;
    413  1.263      matt 		/*
    414  1.293     rmind 		 * Since we work towards the root, we know entry's maxgap
    415  1.293     rmind 		 * value is OK, but its brothers may now be out-of-date due
    416  1.293     rmind 		 * to rebalancing.  So refresh it.
    417  1.263      matt 		 */
    418  1.293     rmind 		which = RB_POSITION(&entry->rb_node) ^ RB_DIR_OTHER;
    419  1.293     rmind 		brother = (struct vm_map_entry *)parent->rb_node.rb_nodes[which];
    420  1.263      matt 		if (brother != NULL) {
    421  1.263      matt 			KDASSERT(brother->gap == uvm_rb_gap(brother));
    422  1.263      matt 			brother->maxgap = uvm_rb_maxgap(brother);
    423  1.263      matt 			if (maxgap < brother->maxgap)
    424  1.263      matt 				maxgap = brother->maxgap;
    425  1.263      matt 		}
    426  1.263      matt 
    427  1.263      matt 		parent->maxgap = maxgap;
    428  1.263      matt 		entry = parent;
    429  1.263      matt 	}
    430  1.144      yamt }
    431  1.144      yamt 
    432  1.203   thorpej static void
    433  1.144      yamt uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
    434  1.144      yamt {
    435  1.326    martin 	struct vm_map_entry *ret __diagused;
    436  1.293     rmind 
    437  1.263      matt 	entry->gap = entry->maxgap = uvm_rb_gap(entry);
    438  1.263      matt 	if (entry->prev != &map->header)
    439  1.263      matt 		entry->prev->gap = uvm_rb_gap(entry->prev);
    440  1.144      yamt 
    441  1.293     rmind 	ret = rb_tree_insert_node(&map->rb_tree, entry);
    442  1.293     rmind 	KASSERTMSG(ret == entry,
    443  1.305       jym 	    "uvm_rb_insert: map %p: duplicate entry %p", map, ret);
    444  1.263      matt 
    445  1.263      matt 	/*
    446  1.263      matt 	 * If the previous entry is not our immediate left child, then it's an
    447  1.263      matt 	 * ancestor and will be fixed up on the way to the root.  We don't
    448  1.263      matt 	 * have to check entry->prev against &map->header since &map->header
    449  1.263      matt 	 * will never be in the tree.
    450  1.263      matt 	 */
    451  1.263      matt 	uvm_rb_fixup(map,
    452  1.263      matt 	    LEFT_ENTRY(entry) == entry->prev ? entry->prev : entry);
    453  1.144      yamt }
    454  1.144      yamt 
    455  1.203   thorpej static void
    456  1.144      yamt uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
    457  1.144      yamt {
    458  1.263      matt 	struct vm_map_entry *prev_parent = NULL, *next_parent = NULL;
    459  1.144      yamt 
    460  1.263      matt 	/*
    461  1.263      matt 	 * If we are removing an interior node, then an adjacent node will
    462  1.263      matt 	 * be used to replace its position in the tree.  Therefore we will
    463  1.263      matt 	 * need to fixup the tree starting at the parent of the replacement
    464  1.263      matt 	 * node.  So record their parents for later use.
    465  1.263      matt 	 */
    466  1.144      yamt 	if (entry->prev != &map->header)
    467  1.263      matt 		prev_parent = PARENT_ENTRY(map, entry->prev);
    468  1.263      matt 	if (entry->next != &map->header)
    469  1.263      matt 		next_parent = PARENT_ENTRY(map, entry->next);
    470  1.263      matt 
    471  1.293     rmind 	rb_tree_remove_node(&map->rb_tree, entry);
    472  1.263      matt 
    473  1.263      matt 	/*
    474  1.263      matt 	 * If the previous node has a new parent, fixup the tree starting
    475  1.263      matt 	 * at the previous node's old parent.
    476  1.263      matt 	 */
    477  1.263      matt 	if (entry->prev != &map->header) {
    478  1.263      matt 		/*
    479  1.263      matt 		 * Update the previous entry's gap due to our absence.
    480  1.263      matt 		 */
    481  1.263      matt 		entry->prev->gap = uvm_rb_gap(entry->prev);
    482  1.144      yamt 		uvm_rb_fixup(map, entry->prev);
    483  1.263      matt 		if (prev_parent != NULL
    484  1.263      matt 		    && prev_parent != entry
    485  1.263      matt 		    && prev_parent != PARENT_ENTRY(map, entry->prev))
    486  1.263      matt 			uvm_rb_fixup(map, prev_parent);
    487  1.263      matt 	}
    488  1.263      matt 
    489  1.263      matt 	/*
    490  1.263      matt 	 * If the next node has a new parent, fixup the tree starting
    491  1.263      matt 	 * at the next node's old parent.
    492  1.263      matt 	 */
    493  1.263      matt 	if (entry->next != &map->header) {
    494  1.263      matt 		uvm_rb_fixup(map, entry->next);
    495  1.263      matt 		if (next_parent != NULL
    496  1.263      matt 		    && next_parent != entry
    497  1.263      matt 		    && next_parent != PARENT_ENTRY(map, entry->next))
    498  1.263      matt 			uvm_rb_fixup(map, next_parent);
    499  1.263      matt 	}
    500  1.144      yamt }
    501  1.144      yamt 
    502  1.222      yamt #if defined(DEBUG)
    503  1.222      yamt int uvm_debug_check_map = 0;
    504  1.159      yamt int uvm_debug_check_rbtree = 0;
    505  1.222      yamt #define uvm_map_check(map, name) \
    506  1.222      yamt 	_uvm_map_check((map), (name), __FILE__, __LINE__)
    507  1.222      yamt static void
    508  1.222      yamt _uvm_map_check(struct vm_map *map, const char *name,
    509  1.222      yamt     const char *file, int line)
    510  1.222      yamt {
    511  1.222      yamt 
    512  1.222      yamt 	if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
    513  1.222      yamt 	    (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
    514  1.222      yamt 		panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
    515  1.222      yamt 		    name, map, file, line);
    516  1.222      yamt 	}
    517  1.222      yamt }
    518  1.222      yamt #else /* defined(DEBUG) */
    519  1.222      yamt #define uvm_map_check(map, name)	/* nothing */
    520  1.222      yamt #endif /* defined(DEBUG) */
    521  1.222      yamt 
    522  1.222      yamt #if defined(DEBUG) || defined(DDB)
    523  1.222      yamt int
    524  1.222      yamt _uvm_map_sanity(struct vm_map *map)
    525  1.222      yamt {
    526  1.234   thorpej 	bool first_free_found = false;
    527  1.234   thorpej 	bool hint_found = false;
    528  1.222      yamt 	const struct vm_map_entry *e;
    529  1.272      yamt 	struct vm_map_entry *hint = map->hint;
    530  1.222      yamt 
    531  1.340   msaitoh 	e = &map->header;
    532  1.222      yamt 	for (;;) {
    533  1.222      yamt 		if (map->first_free == e) {
    534  1.234   thorpej 			first_free_found = true;
    535  1.222      yamt 		} else if (!first_free_found && e->next->start > e->end) {
    536  1.222      yamt 			printf("first_free %p should be %p\n",
    537  1.222      yamt 			    map->first_free, e);
    538  1.222      yamt 			return -1;
    539  1.222      yamt 		}
    540  1.272      yamt 		if (hint == e) {
    541  1.234   thorpej 			hint_found = true;
    542  1.222      yamt 		}
    543  1.222      yamt 
    544  1.222      yamt 		e = e->next;
    545  1.222      yamt 		if (e == &map->header) {
    546  1.222      yamt 			break;
    547  1.222      yamt 		}
    548  1.222      yamt 	}
    549  1.222      yamt 	if (!first_free_found) {
    550  1.222      yamt 		printf("stale first_free\n");
    551  1.222      yamt 		return -1;
    552  1.222      yamt 	}
    553  1.222      yamt 	if (!hint_found) {
    554  1.222      yamt 		printf("stale hint\n");
    555  1.222      yamt 		return -1;
    556  1.222      yamt 	}
    557  1.222      yamt 	return 0;
    558  1.222      yamt }
    559  1.144      yamt 
    560  1.144      yamt int
    561  1.222      yamt _uvm_tree_sanity(struct vm_map *map)
    562  1.144      yamt {
    563  1.144      yamt 	struct vm_map_entry *tmp, *trtmp;
    564  1.144      yamt 	int n = 0, i = 1;
    565  1.144      yamt 
    566  1.263      matt 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
    567  1.263      matt 		if (tmp->gap != uvm_rb_gap(tmp)) {
    568  1.334      matt 			printf("%d/%d gap %#lx != %#lx %s\n",
    569  1.222      yamt 			    n + 1, map->nentries,
    570  1.263      matt 			    (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp),
    571  1.144      yamt 			    tmp->next == &map->header ? "(last)" : "");
    572  1.144      yamt 			goto error;
    573  1.144      yamt 		}
    574  1.263      matt 		/*
    575  1.263      matt 		 * If any entries are out of order, tmp->gap will be unsigned
    576  1.263      matt 		 * and will likely exceed the size of the map.
    577  1.263      matt 		 */
    578  1.273      yamt 		if (tmp->gap >= vm_map_max(map) - vm_map_min(map)) {
    579  1.273      yamt 			printf("too large gap %zu\n", (size_t)tmp->gap);
    580  1.273      yamt 			goto error;
    581  1.273      yamt 		}
    582  1.263      matt 		n++;
    583  1.263      matt 	}
    584  1.263      matt 
    585  1.263      matt 	if (n != map->nentries) {
    586  1.263      matt 		printf("nentries: %d vs %d\n", n, map->nentries);
    587  1.263      matt 		goto error;
    588  1.144      yamt 	}
    589  1.263      matt 
    590  1.144      yamt 	trtmp = NULL;
    591  1.263      matt 	for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) {
    592  1.263      matt 		if (tmp->maxgap != uvm_rb_maxgap(tmp)) {
    593  1.334      matt 			printf("maxgap %#lx != %#lx\n",
    594  1.263      matt 			    (ulong)tmp->maxgap,
    595  1.263      matt 			    (ulong)uvm_rb_maxgap(tmp));
    596  1.144      yamt 			goto error;
    597  1.144      yamt 		}
    598  1.144      yamt 		if (trtmp != NULL && trtmp->start >= tmp->start) {
    599  1.285      matt 			printf("corrupt: 0x%"PRIxVADDR"x >= 0x%"PRIxVADDR"x\n",
    600  1.222      yamt 			    trtmp->start, tmp->start);
    601  1.144      yamt 			goto error;
    602  1.144      yamt 		}
    603  1.144      yamt 
    604  1.144      yamt 		trtmp = tmp;
    605  1.144      yamt 	}
    606  1.144      yamt 
    607  1.263      matt 	for (tmp = map->header.next; tmp != &map->header;
    608  1.144      yamt 	    tmp = tmp->next, i++) {
    609  1.293     rmind 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_LEFT);
    610  1.263      matt 		if (trtmp == NULL)
    611  1.263      matt 			trtmp = &map->header;
    612  1.263      matt 		if (tmp->prev != trtmp) {
    613  1.263      matt 			printf("lookup: %d: %p->prev=%p: %p\n",
    614  1.263      matt 			    i, tmp, tmp->prev, trtmp);
    615  1.263      matt 			goto error;
    616  1.263      matt 		}
    617  1.293     rmind 		trtmp = rb_tree_iterate(&map->rb_tree, tmp, RB_DIR_RIGHT);
    618  1.263      matt 		if (trtmp == NULL)
    619  1.263      matt 			trtmp = &map->header;
    620  1.263      matt 		if (tmp->next != trtmp) {
    621  1.263      matt 			printf("lookup: %d: %p->next=%p: %p\n",
    622  1.263      matt 			    i, tmp, tmp->next, trtmp);
    623  1.263      matt 			goto error;
    624  1.263      matt 		}
    625  1.293     rmind 		trtmp = rb_tree_find_node(&map->rb_tree, &tmp->start);
    626  1.144      yamt 		if (trtmp != tmp) {
    627  1.222      yamt 			printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
    628  1.263      matt 			    PARENT_ENTRY(map, tmp));
    629  1.144      yamt 			goto error;
    630  1.144      yamt 		}
    631  1.144      yamt 	}
    632  1.144      yamt 
    633  1.144      yamt 	return (0);
    634  1.144      yamt  error:
    635  1.144      yamt 	return (-1);
    636  1.144      yamt }
    637  1.222      yamt #endif /* defined(DEBUG) || defined(DDB) */
    638  1.144      yamt 
    639    1.1       mrg /*
    640  1.238        ad  * vm_map_lock: acquire an exclusive (write) lock on a map.
    641  1.238        ad  *
    642  1.238        ad  * => The locking protocol provides for guaranteed upgrade from shared ->
    643  1.238        ad  *    exclusive by whichever thread currently has the map marked busy.
    644  1.238        ad  *    See "LOCKING PROTOCOL NOTES" in uvm_map.h.  This is horrible; among
    645  1.238        ad  *    other problems, it defeats any fairness guarantees provided by RW
    646  1.238        ad  *    locks.
    647  1.238        ad  */
    648  1.238        ad 
    649  1.238        ad void
    650  1.238        ad vm_map_lock(struct vm_map *map)
    651  1.238        ad {
    652  1.238        ad 
    653  1.238        ad 	for (;;) {
    654  1.238        ad 		rw_enter(&map->lock, RW_WRITER);
    655  1.314     rmind 		if (map->busy == NULL || map->busy == curlwp) {
    656  1.249      yamt 			break;
    657  1.314     rmind 		}
    658  1.238        ad 		mutex_enter(&map->misc_lock);
    659  1.238        ad 		rw_exit(&map->lock);
    660  1.314     rmind 		if (map->busy != NULL) {
    661  1.248        ad 			cv_wait(&map->cv, &map->misc_lock);
    662  1.314     rmind 		}
    663  1.238        ad 		mutex_exit(&map->misc_lock);
    664  1.238        ad 	}
    665  1.238        ad 	map->timestamp++;
    666  1.238        ad }
    667  1.238        ad 
    668  1.238        ad /*
    669  1.238        ad  * vm_map_lock_try: try to lock a map, failing if it is already locked.
    670  1.238        ad  */
    671  1.238        ad 
    672  1.238        ad bool
    673  1.238        ad vm_map_lock_try(struct vm_map *map)
    674  1.238        ad {
    675  1.238        ad 
    676  1.314     rmind 	if (!rw_tryenter(&map->lock, RW_WRITER)) {
    677  1.238        ad 		return false;
    678  1.314     rmind 	}
    679  1.238        ad 	if (map->busy != NULL) {
    680  1.238        ad 		rw_exit(&map->lock);
    681  1.238        ad 		return false;
    682  1.238        ad 	}
    683  1.238        ad 	map->timestamp++;
    684  1.238        ad 	return true;
    685  1.238        ad }
    686  1.238        ad 
    687  1.238        ad /*
    688  1.238        ad  * vm_map_unlock: release an exclusive lock on a map.
    689  1.238        ad  */
    690  1.238        ad 
    691  1.238        ad void
    692  1.238        ad vm_map_unlock(struct vm_map *map)
    693  1.238        ad {
    694  1.238        ad 
    695  1.314     rmind 	KASSERT(rw_write_held(&map->lock));
    696  1.314     rmind 	KASSERT(map->busy == NULL || map->busy == curlwp);
    697  1.314     rmind 	rw_exit(&map->lock);
    698  1.238        ad }
    699  1.238        ad 
    700  1.238        ad /*
    701  1.238        ad  * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
    702  1.238        ad  *     want an exclusive lock.
    703  1.238        ad  */
    704  1.238        ad 
    705  1.238        ad void
    706  1.238        ad vm_map_unbusy(struct vm_map *map)
    707  1.238        ad {
    708  1.238        ad 
    709  1.238        ad 	KASSERT(map->busy == curlwp);
    710  1.238        ad 
    711  1.238        ad 	/*
    712  1.238        ad 	 * Safe to clear 'busy' and 'waiters' with only a read lock held:
    713  1.238        ad 	 *
    714  1.238        ad 	 * o they can only be set with a write lock held
    715  1.238        ad 	 * o writers are blocked out with a read or write hold
    716  1.238        ad 	 * o at any time, only one thread owns the set of values
    717  1.238        ad 	 */
    718  1.248        ad 	mutex_enter(&map->misc_lock);
    719  1.238        ad 	map->busy = NULL;
    720  1.238        ad 	cv_broadcast(&map->cv);
    721  1.238        ad 	mutex_exit(&map->misc_lock);
    722  1.238        ad }
    723  1.238        ad 
    724  1.238        ad /*
    725  1.248        ad  * vm_map_lock_read: acquire a shared (read) lock on a map.
    726  1.248        ad  */
    727  1.248        ad 
    728  1.248        ad void
    729  1.248        ad vm_map_lock_read(struct vm_map *map)
    730  1.248        ad {
    731  1.248        ad 
    732  1.248        ad 	rw_enter(&map->lock, RW_READER);
    733  1.248        ad }
    734  1.248        ad 
    735  1.248        ad /*
    736  1.248        ad  * vm_map_unlock_read: release a shared lock on a map.
    737  1.248        ad  */
    738  1.314     rmind 
    739  1.248        ad void
    740  1.248        ad vm_map_unlock_read(struct vm_map *map)
    741  1.248        ad {
    742  1.248        ad 
    743  1.248        ad 	rw_exit(&map->lock);
    744  1.248        ad }
    745  1.248        ad 
    746  1.248        ad /*
    747  1.248        ad  * vm_map_busy: mark a map as busy.
    748  1.248        ad  *
    749  1.248        ad  * => the caller must hold the map write locked
    750  1.248        ad  */
    751  1.248        ad 
    752  1.248        ad void
    753  1.248        ad vm_map_busy(struct vm_map *map)
    754  1.248        ad {
    755  1.248        ad 
    756  1.248        ad 	KASSERT(rw_write_held(&map->lock));
    757  1.248        ad 	KASSERT(map->busy == NULL);
    758  1.248        ad 
    759  1.248        ad 	map->busy = curlwp;
    760  1.248        ad }
    761  1.248        ad 
    762  1.248        ad /*
    763  1.248        ad  * vm_map_locked_p: return true if the map is write locked.
    764  1.269      yamt  *
    765  1.269      yamt  * => only for debug purposes like KASSERTs.
    766  1.269      yamt  * => should not be used to verify that a map is not locked.
    767  1.248        ad  */
    768  1.248        ad 
    769  1.248        ad bool
    770  1.248        ad vm_map_locked_p(struct vm_map *map)
    771  1.248        ad {
    772  1.248        ad 
    773  1.314     rmind 	return rw_write_held(&map->lock);
    774  1.248        ad }
    775  1.248        ad 
    776  1.248        ad /*
    777    1.1       mrg  * uvm_mapent_alloc: allocate a map entry
    778    1.1       mrg  */
    779    1.1       mrg 
    780  1.203   thorpej static struct vm_map_entry *
    781  1.138     enami uvm_mapent_alloc(struct vm_map *map, int flags)
    782   1.10       mrg {
    783   1.99       chs 	struct vm_map_entry *me;
    784  1.127   thorpej 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
    785  1.385     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    786    1.1       mrg 
    787  1.311      para 	me = pool_cache_get(&uvm_map_entry_cache, pflags);
    788  1.314     rmind 	if (__predict_false(me == NULL)) {
    789  1.311      para 		return NULL;
    790  1.314     rmind 	}
    791  1.311      para 	me->flags = 0;
    792  1.311      para 
    793  1.353  pgoyette 	UVMHIST_LOG(maphist, "<- new entry=%#jx [kentry=%jd]", (uintptr_t)me,
    794  1.314     rmind 	    (map == kernel_map), 0, 0);
    795  1.314     rmind 	return me;
    796    1.1       mrg }
    797    1.1       mrg 
    798    1.1       mrg /*
    799    1.1       mrg  * uvm_mapent_free: free map entry
    800    1.1       mrg  */
    801    1.1       mrg 
    802  1.203   thorpej static void
    803  1.138     enami uvm_mapent_free(struct vm_map_entry *me)
    804    1.1       mrg {
    805  1.385     skrll 	UVMHIST_FUNC(__func__);
    806  1.386     skrll 	UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
    807  1.353  pgoyette 		(uintptr_t)me, me->flags, 0, 0);
    808  1.311      para 	pool_cache_put(&uvm_map_entry_cache, me);
    809    1.1       mrg }
    810    1.1       mrg 
    811    1.1       mrg /*
    812    1.1       mrg  * uvm_mapent_copy: copy a map entry, preserving flags
    813    1.1       mrg  */
    814    1.1       mrg 
    815  1.206     perry static inline void
    816  1.138     enami uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
    817   1.10       mrg {
    818  1.139     enami 
    819  1.373        ad 	memcpy(dst, src, sizeof(*dst));
    820  1.373        ad 	dst->flags = 0;
    821    1.1       mrg }
    822    1.1       mrg 
    823  1.218      yamt #if defined(DEBUG)
    824  1.218      yamt static void
    825  1.384      maxv _uvm_mapent_check(const struct vm_map_entry *entry, int line)
    826  1.218      yamt {
    827  1.218      yamt 
    828  1.218      yamt 	if (entry->start >= entry->end) {
    829  1.218      yamt 		goto bad;
    830  1.218      yamt 	}
    831  1.218      yamt 	if (UVM_ET_ISOBJ(entry)) {
    832  1.218      yamt 		if (entry->object.uvm_obj == NULL) {
    833  1.218      yamt 			goto bad;
    834  1.218      yamt 		}
    835  1.218      yamt 	} else if (UVM_ET_ISSUBMAP(entry)) {
    836  1.218      yamt 		if (entry->object.sub_map == NULL) {
    837  1.218      yamt 			goto bad;
    838  1.218      yamt 		}
    839  1.218      yamt 	} else {
    840  1.218      yamt 		if (entry->object.uvm_obj != NULL ||
    841  1.218      yamt 		    entry->object.sub_map != NULL) {
    842  1.218      yamt 			goto bad;
    843  1.218      yamt 		}
    844  1.218      yamt 	}
    845  1.218      yamt 	if (!UVM_ET_ISOBJ(entry)) {
    846  1.218      yamt 		if (entry->offset != 0) {
    847  1.218      yamt 			goto bad;
    848  1.218      yamt 		}
    849  1.218      yamt 	}
    850  1.218      yamt 
    851  1.218      yamt 	return;
    852  1.218      yamt 
    853  1.218      yamt bad:
    854  1.384      maxv 	panic("%s: bad entry %p, line %d", __func__, entry, line);
    855  1.218      yamt }
    856  1.218      yamt #endif /* defined(DEBUG) */
    857  1.218      yamt 
    858    1.1       mrg /*
    859    1.1       mrg  * uvm_map_entry_unwire: unwire a map entry
    860    1.1       mrg  *
    861    1.1       mrg  * => map should be locked by caller
    862    1.1       mrg  */
    863    1.1       mrg 
    864  1.206     perry static inline void
    865  1.138     enami uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
    866   1.10       mrg {
    867  1.139     enami 
    868   1.10       mrg 	entry->wired_count = 0;
    869   1.57   thorpej 	uvm_fault_unwire_locked(map, entry->start, entry->end);
    870    1.1       mrg }
    871    1.1       mrg 
    872   1.85       chs 
    873   1.85       chs /*
    874   1.85       chs  * wrapper for calling amap_ref()
    875   1.85       chs  */
    876  1.206     perry static inline void
    877  1.138     enami uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
    878   1.85       chs {
    879  1.139     enami 
    880   1.99       chs 	amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
    881  1.139     enami 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
    882   1.85       chs }
    883   1.85       chs 
    884   1.85       chs 
    885   1.85       chs /*
    886   1.98       chs  * wrapper for calling amap_unref()
    887   1.85       chs  */
    888  1.206     perry static inline void
    889  1.138     enami uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
    890   1.85       chs {
    891  1.139     enami 
    892   1.99       chs 	amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
    893  1.139     enami 	    (entry->end - entry->start) >> PAGE_SHIFT, flags);
    894   1.85       chs }
    895   1.85       chs 
    896   1.85       chs 
    897    1.1       mrg /*
    898  1.248        ad  * uvm_map_init: init mapping system at boot time.
    899    1.1       mrg  */
    900    1.1       mrg 
    901   1.10       mrg void
    902  1.138     enami uvm_map_init(void)
    903    1.1       mrg {
    904   1.10       mrg 	/*
    905   1.10       mrg 	 * first, init logging system.
    906   1.10       mrg 	 */
    907    1.1       mrg 
    908  1.385     skrll 	UVMHIST_FUNC(__func__);
    909  1.328      matt 	UVMHIST_LINK_STATIC(maphist);
    910  1.387       mrg 	UVMHIST_LINK_STATIC(pdhist);
    911   1.10       mrg 	UVMHIST_CALLED(maphist);
    912   1.10       mrg 	UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
    913   1.10       mrg 
    914   1.10       mrg 	/*
    915  1.174      yamt 	 * initialize the global lock for kernel map entry.
    916   1.10       mrg 	 */
    917   1.10       mrg 
    918  1.238        ad 	mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
    919  1.311      para }
    920  1.248        ad 
    921  1.311      para /*
    922  1.311      para  * uvm_map_init_caches: init mapping system caches.
    923  1.311      para  */
    924  1.311      para void
    925  1.311      para uvm_map_init_caches(void)
    926  1.312     rmind {
    927  1.248        ad 	/*
    928  1.248        ad 	 * initialize caches.
    929  1.248        ad 	 */
    930  1.248        ad 
    931  1.248        ad 	pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
    932  1.377        ad 	    coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
    933  1.377        ad 	    NULL, NULL);
    934  1.248        ad 	pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
    935  1.248        ad 	    0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
    936    1.1       mrg }
    937    1.1       mrg 
    938    1.1       mrg /*
    939    1.1       mrg  * clippers
    940    1.1       mrg  */
    941    1.1       mrg 
    942    1.1       mrg /*
    943  1.218      yamt  * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
    944  1.218      yamt  */
    945  1.218      yamt 
    946  1.218      yamt static void
    947  1.218      yamt uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
    948  1.218      yamt     vaddr_t splitat)
    949  1.218      yamt {
    950  1.218      yamt 	vaddr_t adj;
    951  1.218      yamt 
    952  1.218      yamt 	KASSERT(entry1->start < splitat);
    953  1.218      yamt 	KASSERT(splitat < entry1->end);
    954  1.218      yamt 
    955  1.218      yamt 	adj = splitat - entry1->start;
    956  1.218      yamt 	entry1->end = entry2->start = splitat;
    957  1.218      yamt 
    958  1.218      yamt 	if (entry1->aref.ar_amap) {
    959  1.218      yamt 		amap_splitref(&entry1->aref, &entry2->aref, adj);
    960  1.218      yamt 	}
    961  1.218      yamt 	if (UVM_ET_ISSUBMAP(entry1)) {
    962  1.218      yamt 		/* ... unlikely to happen, but play it safe */
    963  1.218      yamt 		 uvm_map_reference(entry1->object.sub_map);
    964  1.218      yamt 	} else if (UVM_ET_ISOBJ(entry1)) {
    965  1.218      yamt 		KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
    966  1.218      yamt 		entry2->offset += adj;
    967  1.218      yamt 		if (entry1->object.uvm_obj->pgops &&
    968  1.218      yamt 		    entry1->object.uvm_obj->pgops->pgo_reference)
    969  1.218      yamt 			entry1->object.uvm_obj->pgops->pgo_reference(
    970  1.218      yamt 			    entry1->object.uvm_obj);
    971  1.218      yamt 	}
    972  1.218      yamt }
    973  1.218      yamt 
    974  1.218      yamt /*
    975    1.1       mrg  * uvm_map_clip_start: ensure that the entry begins at or after
    976    1.1       mrg  *	the starting address, if it doesn't we split the entry.
    977   1.98       chs  *
    978    1.1       mrg  * => caller should use UVM_MAP_CLIP_START macro rather than calling
    979    1.1       mrg  *    this directly
    980    1.1       mrg  * => map must be locked by caller
    981    1.1       mrg  */
    982    1.1       mrg 
    983   1.99       chs void
    984  1.138     enami uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
    985  1.311      para     vaddr_t start)
    986    1.1       mrg {
    987   1.99       chs 	struct vm_map_entry *new_entry;
    988    1.1       mrg 
    989    1.1       mrg 	/* uvm_map_simplify_entry(map, entry); */ /* XXX */
    990    1.1       mrg 
    991  1.222      yamt 	uvm_map_check(map, "clip_start entry");
    992  1.218      yamt 	uvm_mapent_check(entry);
    993  1.144      yamt 
    994   1.10       mrg 	/*
    995   1.10       mrg 	 * Split off the front portion.  note that we must insert the new
    996   1.10       mrg 	 * entry BEFORE this one, so that this entry has the specified
    997    1.1       mrg 	 * starting address.
    998   1.10       mrg 	 */
    999  1.311      para 	new_entry = uvm_mapent_alloc(map, 0);
   1000    1.1       mrg 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
   1001  1.218      yamt 	uvm_mapent_splitadj(new_entry, entry, start);
   1002   1.10       mrg 	uvm_map_entry_link(map, entry->prev, new_entry);
   1003   1.85       chs 
   1004  1.222      yamt 	uvm_map_check(map, "clip_start leave");
   1005    1.1       mrg }
   1006    1.1       mrg 
   1007    1.1       mrg /*
   1008    1.1       mrg  * uvm_map_clip_end: ensure that the entry ends at or before
   1009    1.1       mrg  *	the ending address, if it does't we split the reference
   1010   1.98       chs  *
   1011    1.1       mrg  * => caller should use UVM_MAP_CLIP_END macro rather than calling
   1012    1.1       mrg  *    this directly
   1013    1.1       mrg  * => map must be locked by caller
   1014    1.1       mrg  */
   1015    1.1       mrg 
   1016   1.10       mrg void
   1017  1.311      para uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
   1018    1.1       mrg {
   1019  1.218      yamt 	struct vm_map_entry *new_entry;
   1020    1.1       mrg 
   1021  1.222      yamt 	uvm_map_check(map, "clip_end entry");
   1022  1.218      yamt 	uvm_mapent_check(entry);
   1023  1.174      yamt 
   1024    1.1       mrg 	/*
   1025    1.1       mrg 	 *	Create a new entry and insert it
   1026    1.1       mrg 	 *	AFTER the specified entry
   1027    1.1       mrg 	 */
   1028  1.311      para 	new_entry = uvm_mapent_alloc(map, 0);
   1029    1.1       mrg 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
   1030  1.218      yamt 	uvm_mapent_splitadj(entry, new_entry, end);
   1031    1.1       mrg 	uvm_map_entry_link(map, entry, new_entry);
   1032    1.1       mrg 
   1033  1.222      yamt 	uvm_map_check(map, "clip_end leave");
   1034    1.1       mrg }
   1035    1.1       mrg 
   1036    1.1       mrg /*
   1037    1.1       mrg  *   M A P   -   m a i n   e n t r y   p o i n t
   1038    1.1       mrg  */
   1039    1.1       mrg /*
   1040    1.1       mrg  * uvm_map: establish a valid mapping in a map
   1041    1.1       mrg  *
   1042    1.1       mrg  * => assume startp is page aligned.
   1043    1.1       mrg  * => assume size is a multiple of PAGE_SIZE.
   1044    1.1       mrg  * => assume sys_mmap provides enough of a "hint" to have us skip
   1045    1.1       mrg  *	over text/data/bss area.
   1046    1.1       mrg  * => map must be unlocked (we will lock it)
   1047    1.1       mrg  * => <uobj,uoffset> value meanings (4 cases):
   1048  1.139     enami  *	 [1] <NULL,uoffset>		== uoffset is a hint for PMAP_PREFER
   1049    1.1       mrg  *	 [2] <NULL,UVM_UNKNOWN_OFFSET>	== don't PMAP_PREFER
   1050    1.1       mrg  *	 [3] <uobj,uoffset>		== normal mapping
   1051    1.1       mrg  *	 [4] <uobj,UVM_UNKNOWN_OFFSET>	== uvm_map finds offset based on VA
   1052   1.98       chs  *
   1053    1.1       mrg  *    case [4] is for kernel mappings where we don't know the offset until
   1054    1.8     chuck  *    we've found a virtual address.   note that kernel object offsets are
   1055    1.8     chuck  *    always relative to vm_map_min(kernel_map).
   1056   1.81   thorpej  *
   1057  1.165      yamt  * => if `align' is non-zero, we align the virtual address to the specified
   1058  1.165      yamt  *	alignment.
   1059  1.165      yamt  *	this is provided as a mechanism for large pages.
   1060   1.81   thorpej  *
   1061    1.1       mrg  * => XXXCDC: need way to map in external amap?
   1062    1.1       mrg  */
   1063    1.1       mrg 
   1064   1.10       mrg int
   1065  1.138     enami uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
   1066  1.138     enami     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
   1067   1.10       mrg {
   1068  1.174      yamt 	struct uvm_map_args args;
   1069  1.174      yamt 	struct vm_map_entry *new_entry;
   1070  1.174      yamt 	int error;
   1071  1.174      yamt 
   1072  1.187      yamt 	KASSERT((size & PAGE_MASK) == 0);
   1073  1.365       rin 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
   1074  1.174      yamt 
   1075  1.174      yamt 	/*
   1076  1.174      yamt 	 * for pager_map, allocate the new entry first to avoid sleeping
   1077  1.174      yamt 	 * for memory while we have the map locked.
   1078  1.174      yamt 	 */
   1079  1.174      yamt 
   1080  1.174      yamt 	new_entry = NULL;
   1081  1.311      para 	if (map == pager_map) {
   1082  1.174      yamt 		new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
   1083  1.174      yamt 		if (__predict_false(new_entry == NULL))
   1084  1.174      yamt 			return ENOMEM;
   1085  1.174      yamt 	}
   1086  1.174      yamt 	if (map == pager_map)
   1087  1.174      yamt 		flags |= UVM_FLAG_NOMERGE;
   1088  1.174      yamt 
   1089  1.174      yamt 	error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
   1090  1.174      yamt 	    flags, &args);
   1091  1.174      yamt 	if (!error) {
   1092  1.174      yamt 		error = uvm_map_enter(map, &args, new_entry);
   1093  1.174      yamt 		*startp = args.uma_start;
   1094  1.189      yamt 	} else if (new_entry) {
   1095  1.189      yamt 		uvm_mapent_free(new_entry);
   1096  1.174      yamt 	}
   1097  1.174      yamt 
   1098  1.187      yamt #if defined(DEBUG)
   1099  1.333  christos 	if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
   1100  1.264        ad 		uvm_km_check_empty(map, *startp, *startp + size);
   1101  1.187      yamt 	}
   1102  1.187      yamt #endif /* defined(DEBUG) */
   1103  1.187      yamt 
   1104  1.174      yamt 	return error;
   1105  1.174      yamt }
   1106  1.174      yamt 
   1107  1.307      yamt /*
   1108  1.307      yamt  * uvm_map_prepare:
   1109  1.307      yamt  *
   1110  1.307      yamt  * called with map unlocked.
   1111  1.307      yamt  * on success, returns the map locked.
   1112  1.307      yamt  */
   1113  1.307      yamt 
   1114  1.174      yamt int
   1115  1.174      yamt uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
   1116  1.174      yamt     struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
   1117  1.174      yamt     struct uvm_map_args *args)
   1118  1.174      yamt {
   1119  1.174      yamt 	struct vm_map_entry *prev_entry;
   1120  1.174      yamt 	vm_prot_t prot = UVM_PROTECTION(flags);
   1121  1.174      yamt 	vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
   1122  1.174      yamt 
   1123  1.385     skrll 	UVMHIST_FUNC(__func__);
   1124  1.391     skrll 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%jx, flags=%#jx)",
   1125  1.353  pgoyette 	    (uintptr_t)map, start, size, flags);
   1126  1.353  pgoyette 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
   1127  1.353  pgoyette 	    uoffset,0,0);
   1128  1.107       chs 
   1129  1.107       chs 	/*
   1130  1.107       chs 	 * detect a popular device driver bug.
   1131  1.107       chs 	 */
   1132  1.107       chs 
   1133  1.314     rmind 	KASSERT(doing_shutdown || curlwp != NULL);
   1134    1.1       mrg 
   1135   1.10       mrg 	/*
   1136  1.144      yamt 	 * zero-sized mapping doesn't make any sense.
   1137  1.144      yamt 	 */
   1138  1.144      yamt 	KASSERT(size > 0);
   1139  1.144      yamt 
   1140  1.180      yamt 	KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
   1141  1.180      yamt 
   1142  1.222      yamt 	uvm_map_check(map, "map entry");
   1143  1.144      yamt 
   1144  1.144      yamt 	/*
   1145  1.106       chs 	 * check sanity of protection code
   1146   1.10       mrg 	 */
   1147    1.1       mrg 
   1148   1.10       mrg 	if ((prot & maxprot) != prot) {
   1149  1.353  pgoyette 		UVMHIST_LOG(maphist, "<- prot. failure:  prot=%#jx, max=%#jx",
   1150   1.10       mrg 		prot, maxprot,0,0);
   1151   1.94       chs 		return EACCES;
   1152   1.10       mrg 	}
   1153    1.1       mrg 
   1154   1.10       mrg 	/*
   1155  1.106       chs 	 * figure out where to put new VM range
   1156   1.10       mrg 	 */
   1157  1.180      yamt retry:
   1158  1.234   thorpej 	if (vm_map_lock_try(map) == false) {
   1159  1.314     rmind 		if ((flags & UVM_FLAG_TRYLOCK) != 0) {
   1160   1.94       chs 			return EAGAIN;
   1161  1.106       chs 		}
   1162   1.10       mrg 		vm_map_lock(map); /* could sleep here */
   1163   1.10       mrg 	}
   1164  1.349       chs 	if (flags & UVM_FLAG_UNMAP) {
   1165  1.349       chs 		KASSERT(flags & UVM_FLAG_FIXED);
   1166  1.347       chs 		KASSERT((flags & UVM_FLAG_NOWAIT) == 0);
   1167  1.347       chs 
   1168  1.347       chs 		/*
   1169  1.347       chs 		 * Set prev_entry to what it will need to be after any existing
   1170  1.347       chs 		 * entries are removed later in uvm_map_enter().
   1171  1.347       chs 		 */
   1172  1.347       chs 
   1173  1.347       chs 		if (uvm_map_lookup_entry(map, start, &prev_entry)) {
   1174  1.347       chs 			if (start == prev_entry->start)
   1175  1.347       chs 				prev_entry = prev_entry->prev;
   1176  1.347       chs 			else
   1177  1.347       chs 				UVM_MAP_CLIP_END(map, prev_entry, start);
   1178  1.347       chs 			SAVE_HINT(map, map->hint, prev_entry);
   1179  1.347       chs 		}
   1180  1.347       chs 	} else {
   1181  1.347       chs 		prev_entry = uvm_map_findspace(map, start, size, &start,
   1182  1.347       chs 		    uobj, uoffset, align, flags);
   1183  1.347       chs 	}
   1184  1.226      yamt 	if (prev_entry == NULL) {
   1185  1.180      yamt 		unsigned int timestamp;
   1186  1.180      yamt 
   1187  1.180      yamt 		timestamp = map->timestamp;
   1188  1.353  pgoyette 		UVMHIST_LOG(maphist,"waiting va timestamp=%#jx",
   1189  1.180      yamt 			    timestamp,0,0,0);
   1190  1.180      yamt 		map->flags |= VM_MAP_WANTVA;
   1191   1.10       mrg 		vm_map_unlock(map);
   1192  1.180      yamt 
   1193  1.180      yamt 		/*
   1194  1.226      yamt 		 * try to reclaim kva and wait until someone does unmap.
   1195  1.238        ad 		 * fragile locking here, so we awaken every second to
   1196  1.238        ad 		 * recheck the condition.
   1197  1.180      yamt 		 */
   1198  1.180      yamt 
   1199  1.238        ad 		mutex_enter(&map->misc_lock);
   1200  1.180      yamt 		while ((map->flags & VM_MAP_WANTVA) != 0 &&
   1201  1.180      yamt 		   map->timestamp == timestamp) {
   1202  1.226      yamt 			if ((flags & UVM_FLAG_WAITVA) == 0) {
   1203  1.238        ad 				mutex_exit(&map->misc_lock);
   1204  1.226      yamt 				UVMHIST_LOG(maphist,
   1205  1.226      yamt 				    "<- uvm_map_findspace failed!", 0,0,0,0);
   1206  1.226      yamt 				return ENOMEM;
   1207  1.226      yamt 			} else {
   1208  1.238        ad 				cv_timedwait(&map->cv, &map->misc_lock, hz);
   1209  1.226      yamt 			}
   1210  1.180      yamt 		}
   1211  1.238        ad 		mutex_exit(&map->misc_lock);
   1212  1.180      yamt 		goto retry;
   1213   1.10       mrg 	}
   1214    1.1       mrg 
   1215   1.40   thorpej #ifdef PMAP_GROWKERNEL
   1216  1.152    simonb 	/*
   1217  1.152    simonb 	 * If the kernel pmap can't map the requested space,
   1218  1.152    simonb 	 * then allocate more resources for it.
   1219  1.152    simonb 	 */
   1220  1.229      yamt 	if (map == kernel_map && uvm_maxkaddr < (start + size))
   1221  1.229      yamt 		uvm_maxkaddr = pmap_growkernel(start + size);
   1222   1.10       mrg #endif
   1223   1.10       mrg 
   1224  1.207      yamt 	UVMMAP_EVCNT_INCR(map_call);
   1225   1.10       mrg 
   1226   1.10       mrg 	/*
   1227   1.10       mrg 	 * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
   1228   1.98       chs 	 * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
   1229   1.98       chs 	 * either case we want to zero it  before storing it in the map entry
   1230   1.10       mrg 	 * (because it looks strange and confusing when debugging...)
   1231   1.98       chs 	 *
   1232   1.98       chs 	 * if uobj is not null
   1233   1.10       mrg 	 *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
   1234   1.10       mrg 	 *      and we do not need to change uoffset.
   1235   1.10       mrg 	 *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
   1236   1.10       mrg 	 *      now (based on the starting address of the map).   this case is
   1237   1.10       mrg 	 *      for kernel object mappings where we don't know the offset until
   1238   1.10       mrg 	 *      the virtual address is found (with uvm_map_findspace).   the
   1239   1.10       mrg 	 *      offset is the distance we are from the start of the map.
   1240   1.10       mrg 	 */
   1241   1.10       mrg 
   1242   1.10       mrg 	if (uobj == NULL) {
   1243   1.10       mrg 		uoffset = 0;
   1244   1.10       mrg 	} else {
   1245   1.10       mrg 		if (uoffset == UVM_UNKNOWN_OFFSET) {
   1246   1.85       chs 			KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
   1247  1.174      yamt 			uoffset = start - vm_map_min(kernel_map);
   1248   1.10       mrg 		}
   1249   1.10       mrg 	}
   1250   1.10       mrg 
   1251  1.174      yamt 	args->uma_flags = flags;
   1252  1.174      yamt 	args->uma_prev = prev_entry;
   1253  1.174      yamt 	args->uma_start = start;
   1254  1.174      yamt 	args->uma_size = size;
   1255  1.174      yamt 	args->uma_uobj = uobj;
   1256  1.174      yamt 	args->uma_uoffset = uoffset;
   1257  1.174      yamt 
   1258  1.276      matt 	UVMHIST_LOG(maphist, "<- done!", 0,0,0,0);
   1259  1.174      yamt 	return 0;
   1260  1.174      yamt }
   1261  1.174      yamt 
   1262  1.307      yamt /*
   1263  1.307      yamt  * uvm_map_enter:
   1264  1.307      yamt  *
   1265  1.307      yamt  * called with map locked.
   1266  1.307      yamt  * unlock the map before returning.
   1267  1.307      yamt  */
   1268  1.307      yamt 
   1269  1.174      yamt int
   1270  1.174      yamt uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
   1271  1.174      yamt     struct vm_map_entry *new_entry)
   1272  1.174      yamt {
   1273  1.174      yamt 	struct vm_map_entry *prev_entry = args->uma_prev;
   1274  1.347       chs 	struct vm_map_entry *dead = NULL, *dead_entries = NULL;
   1275  1.174      yamt 
   1276  1.174      yamt 	const uvm_flag_t flags = args->uma_flags;
   1277  1.174      yamt 	const vm_prot_t prot = UVM_PROTECTION(flags);
   1278  1.174      yamt 	const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
   1279  1.174      yamt 	const vm_inherit_t inherit = UVM_INHERIT(flags);
   1280  1.174      yamt 	const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
   1281  1.174      yamt 	    AMAP_EXTEND_NOWAIT : 0;
   1282  1.174      yamt 	const int advice = UVM_ADVICE(flags);
   1283  1.174      yamt 
   1284  1.174      yamt 	vaddr_t start = args->uma_start;
   1285  1.174      yamt 	vsize_t size = args->uma_size;
   1286  1.174      yamt 	struct uvm_object *uobj = args->uma_uobj;
   1287  1.174      yamt 	voff_t uoffset = args->uma_uoffset;
   1288  1.174      yamt 
   1289  1.174      yamt 	const int kmap = (vm_map_pmap(map) == pmap_kernel());
   1290  1.174      yamt 	int merged = 0;
   1291  1.174      yamt 	int error;
   1292  1.176      yamt 	int newetype;
   1293  1.174      yamt 
   1294  1.385     skrll 	UVMHIST_FUNC(__func__);
   1295  1.385     skrll 	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
   1296  1.353  pgoyette 	    (uintptr_t)map, start, size, flags);
   1297  1.353  pgoyette 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
   1298  1.353  pgoyette 	    uoffset,0,0);
   1299  1.174      yamt 
   1300  1.221      yamt 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
   1301  1.307      yamt 	KASSERT(vm_map_locked_p(map));
   1302  1.349       chs 	KASSERT((flags & (UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP)) !=
   1303  1.349       chs 		(UVM_FLAG_NOWAIT | UVM_FLAG_UNMAP));
   1304  1.221      yamt 
   1305  1.176      yamt 	if (uobj)
   1306  1.176      yamt 		newetype = UVM_ET_OBJ;
   1307  1.176      yamt 	else
   1308  1.176      yamt 		newetype = 0;
   1309  1.176      yamt 
   1310  1.176      yamt 	if (flags & UVM_FLAG_COPYONW) {
   1311  1.176      yamt 		newetype |= UVM_ET_COPYONWRITE;
   1312  1.176      yamt 		if ((flags & UVM_FLAG_OVERLAY) == 0)
   1313  1.176      yamt 			newetype |= UVM_ET_NEEDSCOPY;
   1314  1.176      yamt 	}
   1315  1.176      yamt 
   1316   1.10       mrg 	/*
   1317  1.349       chs 	 * For mappings with unmap, remove any old entries now.  Adding the new
   1318  1.347       chs 	 * entry cannot fail because that can only happen if UVM_FLAG_NOWAIT
   1319  1.349       chs 	 * is set, and we do not support nowait and unmap together.
   1320  1.347       chs 	 */
   1321  1.347       chs 
   1322  1.349       chs 	if (flags & UVM_FLAG_UNMAP) {
   1323  1.349       chs 		KASSERT(flags & UVM_FLAG_FIXED);
   1324  1.347       chs 		uvm_unmap_remove(map, start, start + size, &dead_entries, 0);
   1325  1.347       chs #ifdef DEBUG
   1326  1.350  christos 		struct vm_map_entry *tmp_entry __diagused;
   1327  1.350  christos 		bool rv __diagused;
   1328  1.347       chs 
   1329  1.347       chs 		rv = uvm_map_lookup_entry(map, start, &tmp_entry);
   1330  1.347       chs 		KASSERT(!rv);
   1331  1.347       chs 		KASSERTMSG(prev_entry == tmp_entry,
   1332  1.347       chs 			   "args %p prev_entry %p tmp_entry %p",
   1333  1.347       chs 			   args, prev_entry, tmp_entry);
   1334  1.347       chs #endif
   1335  1.347       chs 		SAVE_HINT(map, map->hint, prev_entry);
   1336  1.347       chs 	}
   1337  1.347       chs 
   1338  1.347       chs 	/*
   1339  1.106       chs 	 * try and insert in map by extending previous entry, if possible.
   1340   1.10       mrg 	 * XXX: we don't try and pull back the next entry.   might be useful
   1341   1.10       mrg 	 * for a stack, but we are currently allocating our stack in advance.
   1342   1.10       mrg 	 */
   1343   1.10       mrg 
   1344  1.121    atatat 	if (flags & UVM_FLAG_NOMERGE)
   1345  1.121    atatat 		goto nomerge;
   1346  1.121    atatat 
   1347  1.194      yamt 	if (prev_entry->end == start &&
   1348  1.121    atatat 	    prev_entry != &map->header &&
   1349  1.312     rmind 	    UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, 0,
   1350  1.194      yamt 	    prot, maxprot, inherit, advice, 0)) {
   1351  1.161      matt 
   1352   1.10       mrg 		if (uobj && prev_entry->offset +
   1353   1.10       mrg 		    (prev_entry->end - prev_entry->start) != uoffset)
   1354  1.121    atatat 			goto forwardmerge;
   1355   1.10       mrg 
   1356   1.10       mrg 		/*
   1357   1.98       chs 		 * can't extend a shared amap.  note: no need to lock amap to
   1358   1.34     chuck 		 * look at refs since we don't care about its exact value.
   1359   1.10       mrg 		 * if it is one (i.e. we have only reference) it will stay there
   1360   1.10       mrg 		 */
   1361   1.85       chs 
   1362   1.10       mrg 		if (prev_entry->aref.ar_amap &&
   1363   1.34     chuck 		    amap_refs(prev_entry->aref.ar_amap) != 1) {
   1364  1.121    atatat 			goto forwardmerge;
   1365   1.10       mrg 		}
   1366   1.85       chs 
   1367  1.119       chs 		if (prev_entry->aref.ar_amap) {
   1368  1.139     enami 			error = amap_extend(prev_entry, size,
   1369  1.126    bouyer 			    amapwaitflag | AMAP_EXTEND_FORWARDS);
   1370  1.174      yamt 			if (error)
   1371  1.191      yamt 				goto nomerge;
   1372  1.119       chs 		}
   1373   1.10       mrg 
   1374  1.258        ad 		if (kmap) {
   1375  1.207      yamt 			UVMMAP_EVCNT_INCR(kbackmerge);
   1376  1.258        ad 		} else {
   1377  1.207      yamt 			UVMMAP_EVCNT_INCR(ubackmerge);
   1378  1.258        ad 		}
   1379   1.10       mrg 		UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
   1380   1.10       mrg 
   1381   1.10       mrg 		/*
   1382   1.10       mrg 		 * drop our reference to uobj since we are extending a reference
   1383   1.10       mrg 		 * that we already have (the ref count can not drop to zero).
   1384   1.10       mrg 		 */
   1385  1.119       chs 
   1386   1.10       mrg 		if (uobj && uobj->pgops->pgo_detach)
   1387   1.10       mrg 			uobj->pgops->pgo_detach(uobj);
   1388   1.10       mrg 
   1389  1.263      matt 		/*
   1390  1.263      matt 		 * Now that we've merged the entries, note that we've grown
   1391  1.263      matt 		 * and our gap has shrunk.  Then fix the tree.
   1392  1.263      matt 		 */
   1393   1.10       mrg 		prev_entry->end += size;
   1394  1.263      matt 		prev_entry->gap -= size;
   1395  1.145      yamt 		uvm_rb_fixup(map, prev_entry);
   1396  1.145      yamt 
   1397  1.222      yamt 		uvm_map_check(map, "map backmerged");
   1398   1.10       mrg 
   1399   1.10       mrg 		UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
   1400  1.121    atatat 		merged++;
   1401  1.106       chs 	}
   1402   1.10       mrg 
   1403  1.121    atatat forwardmerge:
   1404  1.194      yamt 	if (prev_entry->next->start == (start + size) &&
   1405  1.121    atatat 	    prev_entry->next != &map->header &&
   1406  1.312     rmind 	    UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, 0,
   1407  1.194      yamt 	    prot, maxprot, inherit, advice, 0)) {
   1408  1.161      matt 
   1409  1.121    atatat 		if (uobj && prev_entry->next->offset != uoffset + size)
   1410  1.121    atatat 			goto nomerge;
   1411  1.121    atatat 
   1412  1.121    atatat 		/*
   1413  1.121    atatat 		 * can't extend a shared amap.  note: no need to lock amap to
   1414  1.121    atatat 		 * look at refs since we don't care about its exact value.
   1415  1.122    atatat 		 * if it is one (i.e. we have only reference) it will stay there.
   1416  1.122    atatat 		 *
   1417  1.122    atatat 		 * note that we also can't merge two amaps, so if we
   1418  1.122    atatat 		 * merged with the previous entry which has an amap,
   1419  1.122    atatat 		 * and the next entry also has an amap, we give up.
   1420  1.122    atatat 		 *
   1421  1.125    atatat 		 * Interesting cases:
   1422  1.125    atatat 		 * amap, new, amap -> give up second merge (single fwd extend)
   1423  1.125    atatat 		 * amap, new, none -> double forward extend (extend again here)
   1424  1.125    atatat 		 * none, new, amap -> double backward extend (done here)
   1425  1.125    atatat 		 * uobj, new, amap -> single backward extend (done here)
   1426  1.125    atatat 		 *
   1427  1.122    atatat 		 * XXX should we attempt to deal with someone refilling
   1428  1.122    atatat 		 * the deallocated region between two entries that are
   1429  1.122    atatat 		 * backed by the same amap (ie, arefs is 2, "prev" and
   1430  1.122    atatat 		 * "next" refer to it, and adding this allocation will
   1431  1.122    atatat 		 * close the hole, thus restoring arefs to 1 and
   1432  1.122    atatat 		 * deallocating the "next" vm_map_entry)?  -- @@@
   1433  1.121    atatat 		 */
   1434  1.121    atatat 
   1435  1.121    atatat 		if (prev_entry->next->aref.ar_amap &&
   1436  1.122    atatat 		    (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
   1437  1.122    atatat 		     (merged && prev_entry->aref.ar_amap))) {
   1438  1.121    atatat 			goto nomerge;
   1439  1.121    atatat 		}
   1440  1.121    atatat 
   1441  1.122    atatat 		if (merged) {
   1442  1.123    atatat 			/*
   1443  1.123    atatat 			 * Try to extend the amap of the previous entry to
   1444  1.123    atatat 			 * cover the next entry as well.  If it doesn't work
   1445  1.123    atatat 			 * just skip on, don't actually give up, since we've
   1446  1.123    atatat 			 * already completed the back merge.
   1447  1.123    atatat 			 */
   1448  1.125    atatat 			if (prev_entry->aref.ar_amap) {
   1449  1.125    atatat 				if (amap_extend(prev_entry,
   1450  1.125    atatat 				    prev_entry->next->end -
   1451  1.125    atatat 				    prev_entry->next->start,
   1452  1.126    bouyer 				    amapwaitflag | AMAP_EXTEND_FORWARDS))
   1453  1.142     enami 					goto nomerge;
   1454  1.125    atatat 			}
   1455  1.125    atatat 
   1456  1.125    atatat 			/*
   1457  1.125    atatat 			 * Try to extend the amap of the *next* entry
   1458  1.125    atatat 			 * back to cover the new allocation *and* the
   1459  1.125    atatat 			 * previous entry as well (the previous merge
   1460  1.125    atatat 			 * didn't have an amap already otherwise we
   1461  1.125    atatat 			 * wouldn't be checking here for an amap).  If
   1462  1.125    atatat 			 * it doesn't work just skip on, again, don't
   1463  1.125    atatat 			 * actually give up, since we've already
   1464  1.125    atatat 			 * completed the back merge.
   1465  1.125    atatat 			 */
   1466  1.125    atatat 			else if (prev_entry->next->aref.ar_amap) {
   1467  1.125    atatat 				if (amap_extend(prev_entry->next,
   1468  1.125    atatat 				    prev_entry->end -
   1469  1.141    atatat 				    prev_entry->start,
   1470  1.126    bouyer 				    amapwaitflag | AMAP_EXTEND_BACKWARDS))
   1471  1.142     enami 					goto nomerge;
   1472  1.125    atatat 			}
   1473  1.125    atatat 		} else {
   1474  1.125    atatat 			/*
   1475  1.125    atatat 			 * Pull the next entry's amap backwards to cover this
   1476  1.125    atatat 			 * new allocation.
   1477  1.125    atatat 			 */
   1478  1.125    atatat 			if (prev_entry->next->aref.ar_amap) {
   1479  1.125    atatat 				error = amap_extend(prev_entry->next, size,
   1480  1.126    bouyer 				    amapwaitflag | AMAP_EXTEND_BACKWARDS);
   1481  1.174      yamt 				if (error)
   1482  1.191      yamt 					goto nomerge;
   1483  1.125    atatat 			}
   1484  1.122    atatat 		}
   1485  1.122    atatat 
   1486  1.121    atatat 		if (merged) {
   1487  1.121    atatat 			if (kmap) {
   1488  1.207      yamt 				UVMMAP_EVCNT_DECR(kbackmerge);
   1489  1.207      yamt 				UVMMAP_EVCNT_INCR(kbimerge);
   1490  1.121    atatat 			} else {
   1491  1.207      yamt 				UVMMAP_EVCNT_DECR(ubackmerge);
   1492  1.207      yamt 				UVMMAP_EVCNT_INCR(ubimerge);
   1493  1.121    atatat 			}
   1494  1.122    atatat 		} else {
   1495  1.258        ad 			if (kmap) {
   1496  1.207      yamt 				UVMMAP_EVCNT_INCR(kforwmerge);
   1497  1.258        ad 			} else {
   1498  1.207      yamt 				UVMMAP_EVCNT_INCR(uforwmerge);
   1499  1.258        ad 			}
   1500  1.121    atatat 		}
   1501  1.121    atatat 		UVMHIST_LOG(maphist,"  starting forward merge", 0, 0, 0, 0);
   1502   1.10       mrg 
   1503  1.121    atatat 		/*
   1504  1.121    atatat 		 * drop our reference to uobj since we are extending a reference
   1505  1.121    atatat 		 * that we already have (the ref count can not drop to zero).
   1506  1.121    atatat 		 */
   1507  1.319       chs 		if (uobj && uobj->pgops->pgo_detach)
   1508  1.121    atatat 			uobj->pgops->pgo_detach(uobj);
   1509    1.1       mrg 
   1510  1.121    atatat 		if (merged) {
   1511  1.174      yamt 			dead = prev_entry->next;
   1512  1.121    atatat 			prev_entry->end = dead->end;
   1513  1.121    atatat 			uvm_map_entry_unlink(map, dead);
   1514  1.125    atatat 			if (dead->aref.ar_amap != NULL) {
   1515  1.125    atatat 				prev_entry->aref = dead->aref;
   1516  1.125    atatat 				dead->aref.ar_amap = NULL;
   1517  1.125    atatat 			}
   1518  1.121    atatat 		} else {
   1519  1.121    atatat 			prev_entry->next->start -= size;
   1520  1.263      matt 			if (prev_entry != &map->header) {
   1521  1.263      matt 				prev_entry->gap -= size;
   1522  1.263      matt 				KASSERT(prev_entry->gap == uvm_rb_gap(prev_entry));
   1523  1.145      yamt 				uvm_rb_fixup(map, prev_entry);
   1524  1.263      matt 			}
   1525  1.121    atatat 			if (uobj)
   1526  1.121    atatat 				prev_entry->next->offset = uoffset;
   1527  1.121    atatat 		}
   1528  1.145      yamt 
   1529  1.222      yamt 		uvm_map_check(map, "map forwardmerged");
   1530    1.1       mrg 
   1531  1.121    atatat 		UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
   1532  1.121    atatat 		merged++;
   1533  1.106       chs 	}
   1534  1.121    atatat 
   1535  1.121    atatat nomerge:
   1536  1.121    atatat 	if (!merged) {
   1537  1.121    atatat 		UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
   1538  1.258        ad 		if (kmap) {
   1539  1.207      yamt 			UVMMAP_EVCNT_INCR(knomerge);
   1540  1.258        ad 		} else {
   1541  1.207      yamt 			UVMMAP_EVCNT_INCR(unomerge);
   1542  1.258        ad 		}
   1543  1.106       chs 
   1544   1.10       mrg 		/*
   1545  1.121    atatat 		 * allocate new entry and link it in.
   1546   1.10       mrg 		 */
   1547  1.106       chs 
   1548  1.121    atatat 		if (new_entry == NULL) {
   1549  1.126    bouyer 			new_entry = uvm_mapent_alloc(map,
   1550  1.127   thorpej 				(flags & UVM_FLAG_NOWAIT));
   1551  1.126    bouyer 			if (__predict_false(new_entry == NULL)) {
   1552  1.174      yamt 				error = ENOMEM;
   1553  1.174      yamt 				goto done;
   1554  1.126    bouyer 			}
   1555  1.121    atatat 		}
   1556  1.174      yamt 		new_entry->start = start;
   1557  1.121    atatat 		new_entry->end = new_entry->start + size;
   1558  1.121    atatat 		new_entry->object.uvm_obj = uobj;
   1559  1.121    atatat 		new_entry->offset = uoffset;
   1560  1.121    atatat 
   1561  1.176      yamt 		new_entry->etype = newetype;
   1562  1.121    atatat 
   1563  1.161      matt 		if (flags & UVM_FLAG_NOMERGE) {
   1564  1.161      matt 			new_entry->flags |= UVM_MAP_NOMERGE;
   1565  1.161      matt 		}
   1566  1.121    atatat 
   1567  1.121    atatat 		new_entry->protection = prot;
   1568  1.121    atatat 		new_entry->max_protection = maxprot;
   1569  1.121    atatat 		new_entry->inheritance = inherit;
   1570  1.121    atatat 		new_entry->wired_count = 0;
   1571  1.121    atatat 		new_entry->advice = advice;
   1572  1.121    atatat 		if (flags & UVM_FLAG_OVERLAY) {
   1573  1.121    atatat 
   1574  1.121    atatat 			/*
   1575  1.121    atatat 			 * to_add: for BSS we overallocate a little since we
   1576  1.121    atatat 			 * are likely to extend
   1577  1.121    atatat 			 */
   1578  1.121    atatat 
   1579  1.121    atatat 			vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
   1580  1.121    atatat 				UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
   1581  1.126    bouyer 			struct vm_amap *amap = amap_alloc(size, to_add,
   1582  1.227      yamt 			    (flags & UVM_FLAG_NOWAIT));
   1583  1.126    bouyer 			if (__predict_false(amap == NULL)) {
   1584  1.174      yamt 				error = ENOMEM;
   1585  1.174      yamt 				goto done;
   1586  1.126    bouyer 			}
   1587  1.121    atatat 			new_entry->aref.ar_pageoff = 0;
   1588  1.121    atatat 			new_entry->aref.ar_amap = amap;
   1589  1.121    atatat 		} else {
   1590  1.121    atatat 			new_entry->aref.ar_pageoff = 0;
   1591  1.121    atatat 			new_entry->aref.ar_amap = NULL;
   1592  1.121    atatat 		}
   1593  1.121    atatat 		uvm_map_entry_link(map, prev_entry, new_entry);
   1594    1.1       mrg 
   1595  1.121    atatat 		/*
   1596  1.121    atatat 		 * Update the free space hint
   1597  1.121    atatat 		 */
   1598   1.10       mrg 
   1599  1.121    atatat 		if ((map->first_free == prev_entry) &&
   1600  1.121    atatat 		    (prev_entry->end >= new_entry->start))
   1601  1.121    atatat 			map->first_free = new_entry;
   1602  1.174      yamt 
   1603  1.174      yamt 		new_entry = NULL;
   1604  1.121    atatat 	}
   1605   1.10       mrg 
   1606  1.146      yamt 	map->size += size;
   1607  1.146      yamt 
   1608   1.10       mrg 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
   1609  1.174      yamt 
   1610  1.174      yamt 	error = 0;
   1611  1.347       chs 
   1612  1.174      yamt done:
   1613  1.311      para 	vm_map_unlock(map);
   1614  1.311      para 
   1615  1.311      para 	if (new_entry) {
   1616  1.311      para 		uvm_mapent_free(new_entry);
   1617  1.174      yamt 	}
   1618  1.174      yamt 	if (dead) {
   1619  1.174      yamt 		KDASSERT(merged);
   1620  1.311      para 		uvm_mapent_free(dead);
   1621  1.248        ad 	}
   1622  1.347       chs 	if (dead_entries)
   1623  1.347       chs 		uvm_unmap_detach(dead_entries, 0);
   1624  1.311      para 
   1625  1.174      yamt 	return error;
   1626    1.1       mrg }
   1627    1.1       mrg 
   1628    1.1       mrg /*
   1629  1.247      yamt  * uvm_map_lookup_entry_bytree: lookup an entry in tree
   1630  1.247      yamt  */
   1631  1.247      yamt 
   1632  1.263      matt static inline bool
   1633  1.247      yamt uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
   1634  1.247      yamt     struct vm_map_entry **entry	/* OUT */)
   1635  1.247      yamt {
   1636  1.247      yamt 	struct vm_map_entry *prev = &map->header;
   1637  1.263      matt 	struct vm_map_entry *cur = ROOT_ENTRY(map);
   1638  1.247      yamt 
   1639  1.247      yamt 	while (cur) {
   1640  1.263      matt 		UVMMAP_EVCNT_INCR(mlk_treeloop);
   1641  1.247      yamt 		if (address >= cur->start) {
   1642  1.247      yamt 			if (address < cur->end) {
   1643  1.247      yamt 				*entry = cur;
   1644  1.247      yamt 				return true;
   1645  1.247      yamt 			}
   1646  1.247      yamt 			prev = cur;
   1647  1.263      matt 			cur = RIGHT_ENTRY(cur);
   1648  1.247      yamt 		} else
   1649  1.263      matt 			cur = LEFT_ENTRY(cur);
   1650  1.247      yamt 	}
   1651  1.247      yamt 	*entry = prev;
   1652  1.247      yamt 	return false;
   1653  1.247      yamt }
   1654  1.247      yamt 
   1655  1.247      yamt /*
   1656    1.1       mrg  * uvm_map_lookup_entry: find map entry at or before an address
   1657    1.1       mrg  *
   1658    1.1       mrg  * => map must at least be read-locked by caller
   1659    1.1       mrg  * => entry is returned in "entry"
   1660    1.1       mrg  * => return value is true if address is in the returned entry
   1661    1.1       mrg  */
   1662    1.1       mrg 
   1663  1.233   thorpej bool
   1664  1.138     enami uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
   1665  1.138     enami     struct vm_map_entry **entry	/* OUT */)
   1666    1.1       mrg {
   1667   1.99       chs 	struct vm_map_entry *cur;
   1668  1.385     skrll 	UVMHIST_FUNC(__func__);
   1669  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
   1670  1.353  pgoyette 	    (uintptr_t)map, address, (uintptr_t)entry, 0);
   1671    1.1       mrg 
   1672    1.1       mrg 	/*
   1673  1.373        ad 	 * make a quick check to see if we are already looking at
   1674  1.373        ad 	 * the entry we want (which is usually the case).  note also
   1675  1.373        ad 	 * that we don't need to save the hint here...  it is the
   1676  1.373        ad 	 * same hint (unless we are at the header, in which case the
   1677  1.373        ad 	 * hint didn't buy us anything anyway).
   1678    1.1       mrg 	 */
   1679    1.1       mrg 
   1680    1.1       mrg 	cur = map->hint;
   1681  1.207      yamt 	UVMMAP_EVCNT_INCR(mlk_call);
   1682  1.373        ad 	if (cur != &map->header &&
   1683  1.373        ad 	    address >= cur->start && cur->end > address) {
   1684  1.373        ad 		UVMMAP_EVCNT_INCR(mlk_hint);
   1685  1.373        ad 		*entry = cur;
   1686  1.373        ad 		UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
   1687  1.373        ad 		    (uintptr_t)cur, 0, 0, 0);
   1688  1.373        ad 		uvm_mapent_check(*entry);
   1689  1.373        ad 		return (true);
   1690  1.144      yamt 	}
   1691  1.222      yamt 	uvm_map_check(map, __func__);
   1692  1.144      yamt 
   1693    1.1       mrg 	/*
   1694  1.373        ad 	 * lookup in the tree.
   1695    1.1       mrg 	 */
   1696    1.1       mrg 
   1697  1.373        ad 	UVMMAP_EVCNT_INCR(mlk_tree);
   1698  1.373        ad 	if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
   1699  1.373        ad 		SAVE_HINT(map, map->hint, *entry);
   1700  1.373        ad 		UVMHIST_LOG(maphist,"<- search got it (%#jx)",
   1701  1.373        ad 		    (uintptr_t)cur, 0, 0, 0);
   1702  1.373        ad 		KDASSERT((*entry)->start <= address);
   1703  1.373        ad 		KDASSERT(address < (*entry)->end);
   1704  1.373        ad 		uvm_mapent_check(*entry);
   1705  1.373        ad 		return (true);
   1706  1.373        ad 	}
   1707    1.1       mrg 
   1708  1.374        ad 	SAVE_HINT(map, map->hint, *entry);
   1709    1.1       mrg 	UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
   1710  1.147      yamt 	KDASSERT((*entry) == &map->header || (*entry)->end <= address);
   1711  1.144      yamt 	KDASSERT((*entry)->next == &map->header ||
   1712  1.144      yamt 	    address < (*entry)->next->start);
   1713  1.234   thorpej 	return (false);
   1714    1.1       mrg }
   1715    1.1       mrg 
   1716    1.1       mrg /*
   1717  1.140     enami  * See if the range between start and start + length fits in the gap
   1718  1.140     enami  * entry->next->start and entry->end.  Returns 1 if fits, 0 if doesn't
   1719  1.140     enami  * fit, and -1 address wraps around.
   1720  1.140     enami  */
   1721  1.203   thorpej static int
   1722  1.232      yamt uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
   1723  1.304      matt     vsize_t align, int flags, int topdown, struct vm_map_entry *entry)
   1724  1.140     enami {
   1725  1.140     enami 	vaddr_t end;
   1726  1.140     enami 
   1727  1.140     enami #ifdef PMAP_PREFER
   1728  1.140     enami 	/*
   1729  1.140     enami 	 * push start address forward as needed to avoid VAC alias problems.
   1730  1.140     enami 	 * we only do this if a valid offset is specified.
   1731  1.140     enami 	 */
   1732  1.140     enami 
   1733  1.140     enami 	if (uoffset != UVM_UNKNOWN_OFFSET)
   1734  1.182    atatat 		PMAP_PREFER(uoffset, start, length, topdown);
   1735  1.140     enami #endif
   1736  1.304      matt 	if ((flags & UVM_FLAG_COLORMATCH) != 0) {
   1737  1.304      matt 		KASSERT(align < uvmexp.ncolors);
   1738  1.304      matt 		if (uvmexp.ncolors > 1) {
   1739  1.304      matt 			const u_int colormask = uvmexp.colormask;
   1740  1.304      matt 			const u_int colorsize = colormask + 1;
   1741  1.304      matt 			vaddr_t hint = atop(*start);
   1742  1.304      matt 			const u_int color = hint & colormask;
   1743  1.304      matt 			if (color != align) {
   1744  1.304      matt 				hint -= color;	/* adjust to color boundary */
   1745  1.304      matt 				KASSERT((hint & colormask) == 0);
   1746  1.304      matt 				if (topdown) {
   1747  1.304      matt 					if (align > color)
   1748  1.304      matt 						hint -= colorsize;
   1749  1.304      matt 				} else {
   1750  1.304      matt 					if (align < color)
   1751  1.304      matt 						hint += colorsize;
   1752  1.304      matt 				}
   1753  1.304      matt 				*start = ptoa(hint + align); /* adjust to color */
   1754  1.304      matt 			}
   1755  1.304      matt 		}
   1756  1.365       rin 	} else {
   1757  1.365       rin 		KASSERT(powerof2(align));
   1758  1.365       rin 		uvm_map_align_va(start, align, topdown);
   1759  1.140     enami 		/*
   1760  1.140     enami 		 * XXX Should we PMAP_PREFER() here again?
   1761  1.182    atatat 		 * eh...i think we're okay
   1762  1.140     enami 		 */
   1763  1.140     enami 	}
   1764  1.140     enami 
   1765  1.140     enami 	/*
   1766  1.140     enami 	 * Find the end of the proposed new region.  Be sure we didn't
   1767  1.140     enami 	 * wrap around the address; if so, we lose.  Otherwise, if the
   1768  1.140     enami 	 * proposed new region fits before the next entry, we win.
   1769  1.140     enami 	 */
   1770  1.140     enami 
   1771  1.140     enami 	end = *start + length;
   1772  1.140     enami 	if (end < *start)
   1773  1.140     enami 		return (-1);
   1774  1.140     enami 
   1775  1.140     enami 	if (entry->next->start >= end && *start >= entry->end)
   1776  1.140     enami 		return (1);
   1777  1.140     enami 
   1778  1.140     enami 	return (0);
   1779  1.140     enami }
   1780  1.140     enami 
   1781  1.140     enami /*
   1782    1.1       mrg  * uvm_map_findspace: find "length" sized space in "map".
   1783    1.1       mrg  *
   1784  1.167  junyoung  * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
   1785  1.167  junyoung  *	set in "flags" (in which case we insist on using "hint").
   1786    1.1       mrg  * => "result" is VA returned
   1787    1.1       mrg  * => uobj/uoffset are to be used to handle VAC alignment, if required
   1788  1.167  junyoung  * => if "align" is non-zero, we attempt to align to that value.
   1789    1.1       mrg  * => caller must at least have read-locked map
   1790    1.1       mrg  * => returns NULL on failure, or pointer to prev. map entry if success
   1791    1.1       mrg  * => note this is a cross between the old vm_map_findspace and vm_map_find
   1792    1.1       mrg  */
   1793    1.1       mrg 
   1794   1.99       chs struct vm_map_entry *
   1795  1.138     enami uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
   1796  1.232      yamt     vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
   1797  1.138     enami     vsize_t align, int flags)
   1798    1.1       mrg {
   1799  1.140     enami 	struct vm_map_entry *entry;
   1800  1.144      yamt 	struct vm_map_entry *child, *prev, *tmp;
   1801  1.326    martin 	vaddr_t orig_hint __diagused;
   1802  1.131    atatat 	const int topdown = map->flags & VM_MAP_TOPDOWN;
   1803  1.385     skrll 	UVMHIST_FUNC(__func__);
   1804  1.391     skrll 	UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx...",
   1805  1.385     skrll 	    (uintptr_t)map, hint, length, flags);
   1806  1.391     skrll 	UVMHIST_LOG(maphist, " uobj=%#jx, uoffset=%#jx, align=%#jx)",
   1807  1.391     skrll 	    (uintptr_t)uobj, uoffset, align, 0);
   1808    1.1       mrg 
   1809  1.365       rin 	KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align));
   1810  1.304      matt 	KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
   1811   1.85       chs 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
   1812   1.81   thorpej 
   1813  1.222      yamt 	uvm_map_check(map, "map_findspace entry");
   1814  1.144      yamt 
   1815   1.81   thorpej 	/*
   1816  1.395  riastrad 	 * Clamp the hint to the VM map's min/max address, and remmeber
   1817  1.395  riastrad 	 * the clamped original hint.  Remember the original hint,
   1818  1.395  riastrad 	 * clamped to the min/max address.  If we are aligning, then we
   1819  1.395  riastrad 	 * may have to try again with no alignment constraint if we
   1820  1.395  riastrad 	 * fail the first time.
   1821  1.395  riastrad 	 *
   1822  1.395  riastrad 	 * We use the original hint to verify later that the search has
   1823  1.395  riastrad 	 * been monotonic -- that is, nonincreasing or nondecreasing,
   1824  1.395  riastrad 	 * according to topdown or !topdown respectively.  But the
   1825  1.395  riastrad 	 * clamping is not monotonic.
   1826   1.81   thorpej 	 */
   1827  1.184       chs 	if (hint < vm_map_min(map)) {	/* check ranges ... */
   1828   1.81   thorpej 		if (flags & UVM_FLAG_FIXED) {
   1829    1.1       mrg 			UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
   1830  1.139     enami 			return (NULL);
   1831    1.1       mrg 		}
   1832  1.184       chs 		hint = vm_map_min(map);
   1833    1.1       mrg 	}
   1834  1.184       chs 	if (hint > vm_map_max(map)) {
   1835  1.353  pgoyette 		UVMHIST_LOG(maphist,"<- VA %#jx > range [%#jx->%#jx]",
   1836  1.184       chs 		    hint, vm_map_min(map), vm_map_max(map), 0);
   1837  1.139     enami 		return (NULL);
   1838    1.1       mrg 	}
   1839  1.395  riastrad 	orig_hint = hint;
   1840    1.1       mrg 
   1841  1.391     skrll 	UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
   1842  1.391     skrll 	    hint, vm_map_min(map), vm_map_max(map), 0);
   1843  1.391     skrll 
   1844    1.1       mrg 	/*
   1845  1.365       rin 	 * hint may not be aligned properly; we need round up or down it
   1846  1.365       rin 	 * before proceeding further.
   1847  1.365       rin 	 */
   1848  1.366       rin 	if ((flags & UVM_FLAG_COLORMATCH) == 0)
   1849  1.366       rin 		uvm_map_align_va(&hint, align, topdown);
   1850  1.365       rin 
   1851  1.391     skrll 	UVMHIST_LOG(maphist,"<- VA %#jx vs range [%#jx->%#jx]",
   1852  1.391     skrll 	    hint, vm_map_min(map), vm_map_max(map), 0);
   1853  1.365       rin 	/*
   1854    1.1       mrg 	 * Look for the first possible address; if there's already
   1855    1.1       mrg 	 * something at this address, we have to start after it.
   1856    1.1       mrg 	 */
   1857    1.1       mrg 
   1858  1.131    atatat 	/*
   1859  1.131    atatat 	 * @@@: there are four, no, eight cases to consider.
   1860  1.131    atatat 	 *
   1861  1.131    atatat 	 * 0: found,     fixed,     bottom up -> fail
   1862  1.131    atatat 	 * 1: found,     fixed,     top down  -> fail
   1863  1.140     enami 	 * 2: found,     not fixed, bottom up -> start after entry->end,
   1864  1.140     enami 	 *                                       loop up
   1865  1.140     enami 	 * 3: found,     not fixed, top down  -> start before entry->start,
   1866  1.140     enami 	 *                                       loop down
   1867  1.140     enami 	 * 4: not found, fixed,     bottom up -> check entry->next->start, fail
   1868  1.140     enami 	 * 5: not found, fixed,     top down  -> check entry->next->start, fail
   1869  1.140     enami 	 * 6: not found, not fixed, bottom up -> check entry->next->start,
   1870  1.140     enami 	 *                                       loop up
   1871  1.140     enami 	 * 7: not found, not fixed, top down  -> check entry->next->start,
   1872  1.140     enami 	 *                                       loop down
   1873  1.131    atatat 	 *
   1874  1.131    atatat 	 * as you can see, it reduces to roughly five cases, and that
   1875  1.131    atatat 	 * adding top down mapping only adds one unique case (without
   1876  1.131    atatat 	 * it, there would be four cases).
   1877  1.131    atatat 	 */
   1878  1.131    atatat 
   1879  1.396  riastrad 	if ((flags & UVM_FLAG_FIXED) == 0 &&
   1880  1.396  riastrad 	    hint == (topdown ? vm_map_max(map) : vm_map_min(map))) {
   1881  1.396  riastrad 		/*
   1882  1.396  riastrad 		 * The uvm_map_findspace algorithm is monotonic -- for
   1883  1.396  riastrad 		 * topdown VM it starts with a high hint and returns a
   1884  1.396  riastrad 		 * lower free address; for !topdown VM it starts with a
   1885  1.396  riastrad 		 * low hint and returns a higher free address.  As an
   1886  1.396  riastrad 		 * optimization, start with the first (highest for
   1887  1.396  riastrad 		 * topdown, lowest for !topdown) free address.
   1888  1.396  riastrad 		 *
   1889  1.396  riastrad 		 * XXX This `optimization' probably doesn't actually do
   1890  1.396  riastrad 		 * much in practice unless userland explicitly passes
   1891  1.396  riastrad 		 * the VM map's minimum or maximum address, which
   1892  1.396  riastrad 		 * varies from machine to machine (VM_MAX/MIN_ADDRESS,
   1893  1.396  riastrad 		 * e.g. 0x7fbfdfeff000 on amd64 but 0xfffffffff000 on
   1894  1.396  riastrad 		 * aarch64) and may vary according to other factors
   1895  1.396  riastrad 		 * like sysctl vm.user_va0_disable.  In particular, if
   1896  1.396  riastrad 		 * the user specifies 0 as a hint to mmap, then mmap
   1897  1.396  riastrad 		 * will choose a default address which is usually _not_
   1898  1.396  riastrad 		 * VM_MAX/MIN_ADDRESS but something else instead like
   1899  1.396  riastrad 		 * VM_MAX_ADDRESS - stack size - guard page overhead,
   1900  1.396  riastrad 		 * in which case this branch is never hit.
   1901  1.396  riastrad 		 *
   1902  1.396  riastrad 		 * In fact, this branch appears to have been broken for
   1903  1.396  riastrad 		 * two decades between when topdown was introduced in
   1904  1.396  riastrad 		 * ~2003 and when it was adapted to handle the topdown
   1905  1.396  riastrad 		 * case without violating the monotonicity assertion in
   1906  1.396  riastrad 		 * 2022.  Maybe Someone^TM should either ditch the
   1907  1.396  riastrad 		 * optimization or find a better way to do it.
   1908  1.396  riastrad 		 */
   1909  1.140     enami 		entry = map->first_free;
   1910    1.1       mrg 	} else {
   1911  1.140     enami 		if (uvm_map_lookup_entry(map, hint, &entry)) {
   1912    1.1       mrg 			/* "hint" address already in use ... */
   1913   1.81   thorpej 			if (flags & UVM_FLAG_FIXED) {
   1914  1.140     enami 				UVMHIST_LOG(maphist, "<- fixed & VA in use",
   1915   1.10       mrg 				    0, 0, 0, 0);
   1916  1.139     enami 				return (NULL);
   1917    1.1       mrg 			}
   1918  1.140     enami 			if (topdown)
   1919  1.140     enami 				/* Start from lower gap. */
   1920  1.140     enami 				entry = entry->prev;
   1921  1.140     enami 		} else if (flags & UVM_FLAG_FIXED) {
   1922  1.140     enami 			if (entry->next->start >= hint + length &&
   1923  1.140     enami 			    hint + length > hint)
   1924  1.140     enami 				goto found;
   1925  1.140     enami 
   1926  1.140     enami 			/* "hint" address is gap but too small */
   1927  1.140     enami 			UVMHIST_LOG(maphist, "<- fixed mapping failed",
   1928  1.140     enami 			    0, 0, 0, 0);
   1929  1.140     enami 			return (NULL); /* only one shot at it ... */
   1930  1.140     enami 		} else {
   1931  1.140     enami 			/*
   1932  1.140     enami 			 * See if given hint fits in this gap.
   1933  1.140     enami 			 */
   1934  1.140     enami 			switch (uvm_map_space_avail(&hint, length,
   1935  1.304      matt 			    uoffset, align, flags, topdown, entry)) {
   1936  1.140     enami 			case 1:
   1937  1.140     enami 				goto found;
   1938  1.140     enami 			case -1:
   1939  1.140     enami 				goto wraparound;
   1940  1.140     enami 			}
   1941  1.140     enami 
   1942  1.148      yamt 			if (topdown) {
   1943  1.140     enami 				/*
   1944  1.140     enami 				 * Still there is a chance to fit
   1945  1.140     enami 				 * if hint > entry->end.
   1946  1.140     enami 				 */
   1947  1.148      yamt 			} else {
   1948  1.168  junyoung 				/* Start from higher gap. */
   1949  1.148      yamt 				entry = entry->next;
   1950  1.148      yamt 				if (entry == &map->header)
   1951  1.148      yamt 					goto notfound;
   1952  1.140     enami 				goto nextgap;
   1953  1.148      yamt 			}
   1954    1.1       mrg 		}
   1955    1.1       mrg 	}
   1956    1.1       mrg 
   1957    1.1       mrg 	/*
   1958  1.144      yamt 	 * Note that all UVM_FLAGS_FIXED case is already handled.
   1959  1.144      yamt 	 */
   1960  1.144      yamt 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
   1961  1.144      yamt 
   1962  1.144      yamt 	/* Try to find the space in the red-black tree */
   1963  1.144      yamt 
   1964  1.144      yamt 	/* Check slot before any entry */
   1965  1.144      yamt 	hint = topdown ? entry->next->start - length : entry->end;
   1966  1.304      matt 	switch (uvm_map_space_avail(&hint, length, uoffset, align, flags,
   1967  1.144      yamt 	    topdown, entry)) {
   1968  1.144      yamt 	case 1:
   1969  1.144      yamt 		goto found;
   1970  1.144      yamt 	case -1:
   1971  1.144      yamt 		goto wraparound;
   1972  1.144      yamt 	}
   1973  1.144      yamt 
   1974  1.144      yamt nextgap:
   1975  1.148      yamt 	KDASSERT((flags & UVM_FLAG_FIXED) == 0);
   1976  1.144      yamt 	/* If there is not enough space in the whole tree, we fail */
   1977  1.263      matt 	tmp = ROOT_ENTRY(map);
   1978  1.263      matt 	if (tmp == NULL || tmp->maxgap < length)
   1979  1.144      yamt 		goto notfound;
   1980  1.144      yamt 
   1981  1.144      yamt 	prev = NULL; /* previous candidate */
   1982  1.144      yamt 
   1983  1.144      yamt 	/* Find an entry close to hint that has enough space */
   1984  1.144      yamt 	for (; tmp;) {
   1985  1.263      matt 		KASSERT(tmp->next->start == tmp->end + tmp->gap);
   1986  1.144      yamt 		if (topdown) {
   1987  1.144      yamt 			if (tmp->next->start < hint + length &&
   1988  1.144      yamt 			    (prev == NULL || tmp->end > prev->end)) {
   1989  1.263      matt 				if (tmp->gap >= length)
   1990  1.144      yamt 					prev = tmp;
   1991  1.263      matt 				else if ((child = LEFT_ENTRY(tmp)) != NULL
   1992  1.263      matt 				    && child->maxgap >= length)
   1993  1.144      yamt 					prev = tmp;
   1994  1.144      yamt 			}
   1995  1.144      yamt 		} else {
   1996  1.144      yamt 			if (tmp->end >= hint &&
   1997  1.144      yamt 			    (prev == NULL || tmp->end < prev->end)) {
   1998  1.263      matt 				if (tmp->gap >= length)
   1999  1.144      yamt 					prev = tmp;
   2000  1.263      matt 				else if ((child = RIGHT_ENTRY(tmp)) != NULL
   2001  1.263      matt 				    && child->maxgap >= length)
   2002  1.144      yamt 					prev = tmp;
   2003  1.144      yamt 			}
   2004  1.144      yamt 		}
   2005  1.144      yamt 		if (tmp->next->start < hint + length)
   2006  1.263      matt 			child = RIGHT_ENTRY(tmp);
   2007  1.144      yamt 		else if (tmp->end > hint)
   2008  1.263      matt 			child = LEFT_ENTRY(tmp);
   2009  1.144      yamt 		else {
   2010  1.263      matt 			if (tmp->gap >= length)
   2011  1.144      yamt 				break;
   2012  1.144      yamt 			if (topdown)
   2013  1.263      matt 				child = LEFT_ENTRY(tmp);
   2014  1.144      yamt 			else
   2015  1.263      matt 				child = RIGHT_ENTRY(tmp);
   2016  1.144      yamt 		}
   2017  1.263      matt 		if (child == NULL || child->maxgap < length)
   2018  1.144      yamt 			break;
   2019  1.144      yamt 		tmp = child;
   2020  1.144      yamt 	}
   2021  1.144      yamt 
   2022  1.148      yamt 	if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
   2023  1.164  junyoung 		/*
   2024  1.144      yamt 		 * Check if the entry that we found satifies the
   2025  1.144      yamt 		 * space requirement
   2026  1.144      yamt 		 */
   2027  1.148      yamt 		if (topdown) {
   2028  1.149      yamt 			if (hint > tmp->next->start - length)
   2029  1.149      yamt 				hint = tmp->next->start - length;
   2030  1.148      yamt 		} else {
   2031  1.149      yamt 			if (hint < tmp->end)
   2032  1.149      yamt 				hint = tmp->end;
   2033  1.148      yamt 		}
   2034  1.148      yamt 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
   2035  1.304      matt 		    flags, topdown, tmp)) {
   2036  1.148      yamt 		case 1:
   2037  1.144      yamt 			entry = tmp;
   2038  1.144      yamt 			goto found;
   2039  1.148      yamt 		case -1:
   2040  1.148      yamt 			goto wraparound;
   2041  1.144      yamt 		}
   2042  1.263      matt 		if (tmp->gap >= length)
   2043  1.144      yamt 			goto listsearch;
   2044  1.144      yamt 	}
   2045  1.144      yamt 	if (prev == NULL)
   2046  1.144      yamt 		goto notfound;
   2047  1.144      yamt 
   2048  1.148      yamt 	if (topdown) {
   2049  1.150      yamt 		KASSERT(orig_hint >= prev->next->start - length ||
   2050  1.148      yamt 		    prev->next->start - length > prev->next->start);
   2051  1.148      yamt 		hint = prev->next->start - length;
   2052  1.148      yamt 	} else {
   2053  1.150      yamt 		KASSERT(orig_hint <= prev->end);
   2054  1.148      yamt 		hint = prev->end;
   2055  1.148      yamt 	}
   2056  1.148      yamt 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
   2057  1.304      matt 	    flags, topdown, prev)) {
   2058  1.148      yamt 	case 1:
   2059  1.144      yamt 		entry = prev;
   2060  1.144      yamt 		goto found;
   2061  1.148      yamt 	case -1:
   2062  1.148      yamt 		goto wraparound;
   2063  1.144      yamt 	}
   2064  1.263      matt 	if (prev->gap >= length)
   2065  1.144      yamt 		goto listsearch;
   2066  1.164  junyoung 
   2067  1.144      yamt 	if (topdown)
   2068  1.263      matt 		tmp = LEFT_ENTRY(prev);
   2069  1.144      yamt 	else
   2070  1.263      matt 		tmp = RIGHT_ENTRY(prev);
   2071  1.144      yamt 	for (;;) {
   2072  1.263      matt 		KASSERT(tmp && tmp->maxgap >= length);
   2073  1.144      yamt 		if (topdown)
   2074  1.263      matt 			child = RIGHT_ENTRY(tmp);
   2075  1.144      yamt 		else
   2076  1.263      matt 			child = LEFT_ENTRY(tmp);
   2077  1.263      matt 		if (child && child->maxgap >= length) {
   2078  1.144      yamt 			tmp = child;
   2079  1.144      yamt 			continue;
   2080  1.144      yamt 		}
   2081  1.263      matt 		if (tmp->gap >= length)
   2082  1.144      yamt 			break;
   2083  1.144      yamt 		if (topdown)
   2084  1.263      matt 			tmp = LEFT_ENTRY(tmp);
   2085  1.144      yamt 		else
   2086  1.263      matt 			tmp = RIGHT_ENTRY(tmp);
   2087  1.144      yamt 	}
   2088  1.164  junyoung 
   2089  1.148      yamt 	if (topdown) {
   2090  1.150      yamt 		KASSERT(orig_hint >= tmp->next->start - length ||
   2091  1.148      yamt 		    tmp->next->start - length > tmp->next->start);
   2092  1.148      yamt 		hint = tmp->next->start - length;
   2093  1.148      yamt 	} else {
   2094  1.150      yamt 		KASSERT(orig_hint <= tmp->end);
   2095  1.148      yamt 		hint = tmp->end;
   2096  1.148      yamt 	}
   2097  1.144      yamt 	switch (uvm_map_space_avail(&hint, length, uoffset, align,
   2098  1.304      matt 	    flags, topdown, tmp)) {
   2099  1.144      yamt 	case 1:
   2100  1.144      yamt 		entry = tmp;
   2101  1.144      yamt 		goto found;
   2102  1.148      yamt 	case -1:
   2103  1.148      yamt 		goto wraparound;
   2104  1.144      yamt 	}
   2105  1.144      yamt 
   2106  1.164  junyoung 	/*
   2107  1.144      yamt 	 * The tree fails to find an entry because of offset or alignment
   2108  1.144      yamt 	 * restrictions.  Search the list instead.
   2109  1.144      yamt 	 */
   2110  1.144      yamt  listsearch:
   2111  1.144      yamt 	/*
   2112    1.1       mrg 	 * Look through the rest of the map, trying to fit a new region in
   2113    1.1       mrg 	 * the gap between existing regions, or after the very last region.
   2114  1.140     enami 	 * note: entry->end = base VA of current gap,
   2115  1.140     enami 	 *	 entry->next->start = VA of end of current gap
   2116    1.1       mrg 	 */
   2117   1.99       chs 
   2118  1.140     enami 	for (;;) {
   2119  1.140     enami 		/* Update hint for current gap. */
   2120  1.140     enami 		hint = topdown ? entry->next->start - length : entry->end;
   2121  1.140     enami 
   2122  1.140     enami 		/* See if it fits. */
   2123  1.140     enami 		switch (uvm_map_space_avail(&hint, length, uoffset, align,
   2124  1.304      matt 		    flags, topdown, entry)) {
   2125  1.140     enami 		case 1:
   2126  1.140     enami 			goto found;
   2127  1.140     enami 		case -1:
   2128  1.140     enami 			goto wraparound;
   2129  1.140     enami 		}
   2130  1.140     enami 
   2131  1.140     enami 		/* Advance to next/previous gap */
   2132  1.140     enami 		if (topdown) {
   2133  1.140     enami 			if (entry == &map->header) {
   2134  1.140     enami 				UVMHIST_LOG(maphist, "<- failed (off start)",
   2135  1.140     enami 				    0,0,0,0);
   2136  1.140     enami 				goto notfound;
   2137  1.134      matt 			}
   2138  1.140     enami 			entry = entry->prev;
   2139  1.140     enami 		} else {
   2140  1.140     enami 			entry = entry->next;
   2141  1.140     enami 			if (entry == &map->header) {
   2142  1.140     enami 				UVMHIST_LOG(maphist, "<- failed (off end)",
   2143   1.81   thorpej 				    0,0,0,0);
   2144  1.140     enami 				goto notfound;
   2145   1.81   thorpej 			}
   2146    1.1       mrg 		}
   2147    1.1       mrg 	}
   2148  1.140     enami 
   2149  1.140     enami  found:
   2150   1.82   thorpej 	SAVE_HINT(map, map->hint, entry);
   2151    1.1       mrg 	*result = hint;
   2152  1.353  pgoyette 	UVMHIST_LOG(maphist,"<- got it!  (result=%#jx)", hint, 0,0,0);
   2153  1.386     skrll 	KASSERTMSG( topdown || hint >= orig_hint, "hint: %#jx, orig_hint: %#jx",
   2154  1.339    martin 	    (uintmax_t)hint, (uintmax_t)orig_hint);
   2155  1.386     skrll 	KASSERTMSG(!topdown || hint <= orig_hint, "hint: %#jx, orig_hint: %#jx",
   2156  1.339    martin 	    (uintmax_t)hint, (uintmax_t)orig_hint);
   2157  1.144      yamt 	KASSERT(entry->end <= hint);
   2158  1.144      yamt 	KASSERT(hint + length <= entry->next->start);
   2159    1.1       mrg 	return (entry);
   2160  1.140     enami 
   2161  1.140     enami  wraparound:
   2162  1.140     enami 	UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
   2163  1.140     enami 
   2164  1.165      yamt 	return (NULL);
   2165  1.165      yamt 
   2166  1.140     enami  notfound:
   2167  1.165      yamt 	UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
   2168  1.165      yamt 
   2169  1.140     enami 	return (NULL);
   2170    1.1       mrg }
   2171    1.1       mrg 
   2172    1.1       mrg /*
   2173    1.1       mrg  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
   2174    1.1       mrg  */
   2175    1.1       mrg 
   2176    1.1       mrg /*
   2177    1.1       mrg  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
   2178    1.1       mrg  *
   2179   1.98       chs  * => caller must check alignment and size
   2180    1.1       mrg  * => map must be locked by caller
   2181    1.1       mrg  * => we return a list of map entries that we've remove from the map
   2182    1.1       mrg  *    in "entry_list"
   2183    1.1       mrg  */
   2184    1.1       mrg 
   2185   1.94       chs void
   2186  1.138     enami uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
   2187  1.311      para     struct vm_map_entry **entry_list /* OUT */, int flags)
   2188   1.10       mrg {
   2189   1.99       chs 	struct vm_map_entry *entry, *first_entry, *next;
   2190   1.24       eeh 	vaddr_t len;
   2191  1.385     skrll 	UVMHIST_FUNC(__func__);
   2192  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
   2193  1.353  pgoyette 	    (uintptr_t)map, start, end, 0);
   2194   1.10       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   2195   1.10       mrg 
   2196  1.222      yamt 	uvm_map_check(map, "unmap_remove entry");
   2197  1.144      yamt 
   2198   1.10       mrg 	/*
   2199   1.10       mrg 	 * find first entry
   2200   1.10       mrg 	 */
   2201   1.99       chs 
   2202  1.234   thorpej 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
   2203   1.29     chuck 		/* clip and go... */
   2204   1.10       mrg 		entry = first_entry;
   2205  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   2206   1.10       mrg 		/* critical!  prevents stale hint */
   2207   1.82   thorpej 		SAVE_HINT(map, entry, entry->prev);
   2208   1.10       mrg 	} else {
   2209   1.10       mrg 		entry = first_entry->next;
   2210   1.10       mrg 	}
   2211   1.10       mrg 
   2212   1.10       mrg 	/*
   2213  1.371        ad 	 * save the free space hint
   2214   1.10       mrg 	 */
   2215   1.10       mrg 
   2216  1.220      yamt 	if (map->first_free != &map->header && map->first_free->start >= start)
   2217   1.10       mrg 		map->first_free = entry->prev;
   2218   1.10       mrg 
   2219   1.10       mrg 	/*
   2220   1.10       mrg 	 * note: we now re-use first_entry for a different task.  we remove
   2221   1.10       mrg 	 * a number of map entries from the map and save them in a linked
   2222   1.10       mrg 	 * list headed by "first_entry".  once we remove them from the map
   2223   1.10       mrg 	 * the caller should unlock the map and drop the references to the
   2224   1.10       mrg 	 * backing objects [c.f. uvm_unmap_detach].  the object is to
   2225  1.100       wiz 	 * separate unmapping from reference dropping.  why?
   2226   1.10       mrg 	 *   [1] the map has to be locked for unmapping
   2227   1.10       mrg 	 *   [2] the map need not be locked for reference dropping
   2228   1.10       mrg 	 *   [3] dropping references may trigger pager I/O, and if we hit
   2229   1.10       mrg 	 *       a pager that does synchronous I/O we may have to wait for it.
   2230   1.10       mrg 	 *   [4] we would like all waiting for I/O to occur with maps unlocked
   2231   1.98       chs 	 *       so that we don't block other threads.
   2232   1.10       mrg 	 */
   2233   1.99       chs 
   2234   1.10       mrg 	first_entry = NULL;
   2235  1.106       chs 	*entry_list = NULL;
   2236   1.10       mrg 
   2237   1.10       mrg 	/*
   2238   1.98       chs 	 * break up the area into map entry sized regions and unmap.  note
   2239   1.10       mrg 	 * that all mappings have to be removed before we can even consider
   2240   1.10       mrg 	 * dropping references to amaps or VM objects (otherwise we could end
   2241   1.10       mrg 	 * up with a mapping to a page on the free list which would be very bad)
   2242   1.10       mrg 	 */
   2243   1.10       mrg 
   2244   1.10       mrg 	while ((entry != &map->header) && (entry->start < end)) {
   2245  1.311      para 		KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
   2246  1.174      yamt 
   2247  1.311      para 		UVM_MAP_CLIP_END(map, entry, end);
   2248   1.10       mrg 		next = entry->next;
   2249   1.10       mrg 		len = entry->end - entry->start;
   2250   1.81   thorpej 
   2251   1.10       mrg 		/*
   2252   1.10       mrg 		 * unwire before removing addresses from the pmap; otherwise
   2253   1.10       mrg 		 * unwiring will put the entries back into the pmap (XXX).
   2254   1.10       mrg 		 */
   2255    1.1       mrg 
   2256  1.106       chs 		if (VM_MAPENT_ISWIRED(entry)) {
   2257   1.10       mrg 			uvm_map_entry_unwire(map, entry);
   2258  1.106       chs 		}
   2259  1.187      yamt 		if (flags & UVM_FLAG_VAONLY) {
   2260  1.187      yamt 
   2261  1.187      yamt 			/* nothing */
   2262  1.187      yamt 
   2263  1.187      yamt 		} else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
   2264   1.10       mrg 
   2265  1.106       chs 			/*
   2266  1.106       chs 			 * if the map is non-pageable, any pages mapped there
   2267  1.106       chs 			 * must be wired and entered with pmap_kenter_pa(),
   2268  1.106       chs 			 * and we should free any such pages immediately.
   2269  1.287     joerg 			 * this is mostly used for kmem_map.
   2270  1.106       chs 			 */
   2271  1.292     rmind 			KASSERT(vm_map_pmap(map) == pmap_kernel());
   2272   1.99       chs 
   2273  1.323      para 			uvm_km_pgremove_intrsafe(map, entry->start, entry->end);
   2274  1.106       chs 		} else if (UVM_ET_ISOBJ(entry) &&
   2275  1.106       chs 			   UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
   2276  1.300      yamt 			panic("%s: kernel object %p %p\n",
   2277  1.300      yamt 			    __func__, map, entry);
   2278  1.106       chs 		} else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
   2279   1.29     chuck 			/*
   2280  1.298     rmind 			 * remove mappings the standard way.  lock object
   2281  1.298     rmind 			 * and/or amap to ensure vm_page state does not
   2282  1.298     rmind 			 * change while in pmap_remove().
   2283  1.139     enami 			 */
   2284   1.99       chs 
   2285  1.376        ad #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
   2286  1.372        ad 			uvm_map_lock_entry(entry, RW_WRITER);
   2287  1.376        ad #else
   2288  1.376        ad 			uvm_map_lock_entry(entry, RW_READER);
   2289  1.376        ad #endif
   2290   1.29     chuck 			pmap_remove(map->pmap, entry->start, entry->end);
   2291  1.371        ad 
   2292  1.371        ad 			/*
   2293  1.371        ad 			 * note: if map is dying, leave pmap_update() for
   2294  1.378        ad 			 * later.  if the map is to be reused (exec) then
   2295  1.378        ad 			 * pmap_update() will be called.  if the map is
   2296  1.378        ad 			 * being disposed of (exit) then pmap_destroy()
   2297  1.378        ad 			 * will be called.
   2298  1.371        ad 			 */
   2299  1.371        ad 
   2300  1.371        ad 			if ((map->flags & VM_MAP_DYING) == 0) {
   2301  1.371        ad 				pmap_update(vm_map_pmap(map));
   2302  1.371        ad 			} else {
   2303  1.371        ad 				KASSERT(vm_map_pmap(map) != pmap_kernel());
   2304  1.371        ad 			}
   2305  1.371        ad 
   2306  1.298     rmind 			uvm_map_unlock_entry(entry);
   2307   1.10       mrg 		}
   2308   1.10       mrg 
   2309  1.331  christos #if defined(UVMDEBUG)
   2310  1.323      para 		/*
   2311  1.323      para 		 * check if there's remaining mapping,
   2312  1.323      para 		 * which is a bug in caller.
   2313  1.323      para 		 */
   2314  1.177      yamt 
   2315  1.323      para 		vaddr_t va;
   2316  1.323      para 		for (va = entry->start; va < entry->end;
   2317  1.323      para 		    va += PAGE_SIZE) {
   2318  1.323      para 			if (pmap_extract(vm_map_pmap(map), va, NULL)) {
   2319  1.323      para 				panic("%s: %#"PRIxVADDR" has mapping",
   2320  1.323      para 				    __func__, va);
   2321  1.177      yamt 			}
   2322  1.323      para 		}
   2323  1.187      yamt 
   2324  1.333  christos 		if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) {
   2325  1.323      para 			uvm_km_check_empty(map, entry->start,
   2326  1.323      para 			    entry->end);
   2327  1.177      yamt 		}
   2328  1.331  christos #endif /* defined(UVMDEBUG) */
   2329  1.177      yamt 
   2330   1.10       mrg 		/*
   2331   1.98       chs 		 * remove entry from map and put it on our list of entries
   2332  1.106       chs 		 * that we've nuked.  then go to next entry.
   2333   1.10       mrg 		 */
   2334   1.99       chs 
   2335  1.353  pgoyette 		UVMHIST_LOG(maphist, "  removed map entry %#jx",
   2336  1.353  pgoyette 		    (uintptr_t)entry, 0, 0, 0);
   2337   1.82   thorpej 
   2338   1.82   thorpej 		/* critical!  prevents stale hint */
   2339   1.82   thorpej 		SAVE_HINT(map, entry, entry->prev);
   2340   1.82   thorpej 
   2341   1.10       mrg 		uvm_map_entry_unlink(map, entry);
   2342  1.146      yamt 		KASSERT(map->size >= len);
   2343   1.10       mrg 		map->size -= len;
   2344  1.131    atatat 		entry->prev = NULL;
   2345   1.10       mrg 		entry->next = first_entry;
   2346   1.10       mrg 		first_entry = entry;
   2347  1.106       chs 		entry = next;
   2348   1.10       mrg 	}
   2349  1.292     rmind 
   2350  1.222      yamt 	uvm_map_check(map, "unmap_remove leave");
   2351  1.144      yamt 
   2352   1.10       mrg 	/*
   2353   1.10       mrg 	 * now we've cleaned up the map and are ready for the caller to drop
   2354   1.98       chs 	 * references to the mapped objects.
   2355   1.10       mrg 	 */
   2356   1.10       mrg 
   2357   1.10       mrg 	*entry_list = first_entry;
   2358   1.10       mrg 	UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
   2359  1.180      yamt 
   2360  1.180      yamt 	if (map->flags & VM_MAP_WANTVA) {
   2361  1.238        ad 		mutex_enter(&map->misc_lock);
   2362  1.180      yamt 		map->flags &= ~VM_MAP_WANTVA;
   2363  1.238        ad 		cv_broadcast(&map->cv);
   2364  1.238        ad 		mutex_exit(&map->misc_lock);
   2365  1.180      yamt 	}
   2366    1.1       mrg }
   2367    1.1       mrg 
   2368    1.1       mrg /*
   2369    1.1       mrg  * uvm_unmap_detach: drop references in a chain of map entries
   2370    1.1       mrg  *
   2371    1.1       mrg  * => we will free the map entries as we traverse the list.
   2372    1.1       mrg  */
   2373    1.1       mrg 
   2374   1.10       mrg void
   2375  1.138     enami uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
   2376    1.1       mrg {
   2377   1.99       chs 	struct vm_map_entry *next_entry;
   2378  1.385     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2379    1.1       mrg 
   2380   1.10       mrg 	while (first_entry) {
   2381   1.85       chs 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
   2382   1.10       mrg 		UVMHIST_LOG(maphist,
   2383  1.353  pgoyette 		    "  detach %#jx: amap=%#jx, obj=%#jx, submap?=%jd",
   2384  1.353  pgoyette 		    (uintptr_t)first_entry,
   2385  1.353  pgoyette 		    (uintptr_t)first_entry->aref.ar_amap,
   2386  1.353  pgoyette 		    (uintptr_t)first_entry->object.uvm_obj,
   2387   1.29     chuck 		    UVM_ET_ISSUBMAP(first_entry));
   2388    1.1       mrg 
   2389   1.10       mrg 		/*
   2390   1.10       mrg 		 * drop reference to amap, if we've got one
   2391   1.10       mrg 		 */
   2392   1.10       mrg 
   2393   1.10       mrg 		if (first_entry->aref.ar_amap)
   2394   1.85       chs 			uvm_map_unreference_amap(first_entry, flags);
   2395   1.10       mrg 
   2396   1.10       mrg 		/*
   2397   1.10       mrg 		 * drop reference to our backing object, if we've got one
   2398   1.10       mrg 		 */
   2399   1.85       chs 
   2400  1.120       chs 		KASSERT(!UVM_ET_ISSUBMAP(first_entry));
   2401  1.120       chs 		if (UVM_ET_ISOBJ(first_entry) &&
   2402  1.120       chs 		    first_entry->object.uvm_obj->pgops->pgo_detach) {
   2403  1.120       chs 			(*first_entry->object.uvm_obj->pgops->pgo_detach)
   2404  1.120       chs 				(first_entry->object.uvm_obj);
   2405   1.10       mrg 		}
   2406   1.10       mrg 		next_entry = first_entry->next;
   2407   1.10       mrg 		uvm_mapent_free(first_entry);
   2408   1.10       mrg 		first_entry = next_entry;
   2409   1.10       mrg 	}
   2410   1.10       mrg 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
   2411    1.1       mrg }
   2412    1.1       mrg 
   2413    1.1       mrg /*
   2414    1.1       mrg  *   E X T R A C T I O N   F U N C T I O N S
   2415    1.1       mrg  */
   2416    1.1       mrg 
   2417   1.98       chs /*
   2418    1.1       mrg  * uvm_map_reserve: reserve space in a vm_map for future use.
   2419    1.1       mrg  *
   2420   1.98       chs  * => we reserve space in a map by putting a dummy map entry in the
   2421    1.1       mrg  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
   2422    1.1       mrg  * => map should be unlocked (we will write lock it)
   2423    1.1       mrg  * => we return true if we were able to reserve space
   2424    1.1       mrg  * => XXXCDC: should be inline?
   2425    1.1       mrg  */
   2426    1.1       mrg 
   2427   1.10       mrg int
   2428  1.138     enami uvm_map_reserve(struct vm_map *map, vsize_t size,
   2429  1.138     enami     vaddr_t offset	/* hint for pmap_prefer */,
   2430  1.243      yamt     vsize_t align	/* alignment */,
   2431  1.210      yamt     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
   2432  1.324      matt     uvm_flag_t flags	/* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
   2433    1.1       mrg {
   2434  1.385     skrll 	UVMHIST_FUNC(__func__);
   2435  1.385     skrll 	UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
   2436  1.353  pgoyette 	    (uintptr_t)map, size, offset, (uintptr_t)raddr);
   2437   1.85       chs 
   2438   1.10       mrg 	size = round_page(size);
   2439   1.85       chs 
   2440   1.10       mrg 	/*
   2441   1.10       mrg 	 * reserve some virtual space.
   2442   1.10       mrg 	 */
   2443   1.85       chs 
   2444  1.243      yamt 	if (uvm_map(map, raddr, size, NULL, offset, align,
   2445   1.10       mrg 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
   2446  1.210      yamt 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
   2447   1.10       mrg 	    UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
   2448  1.234   thorpej 		return (false);
   2449   1.98       chs 	}
   2450   1.85       chs 
   2451  1.353  pgoyette 	UVMHIST_LOG(maphist, "<- done (*raddr=%#jx)", *raddr,0,0,0);
   2452  1.234   thorpej 	return (true);
   2453    1.1       mrg }
   2454    1.1       mrg 
   2455    1.1       mrg /*
   2456   1.98       chs  * uvm_map_replace: replace a reserved (blank) area of memory with
   2457    1.1       mrg  * real mappings.
   2458    1.1       mrg  *
   2459   1.98       chs  * => caller must WRITE-LOCK the map
   2460  1.234   thorpej  * => we return true if replacement was a success
   2461    1.1       mrg  * => we expect the newents chain to have nnewents entrys on it and
   2462    1.1       mrg  *    we expect newents->prev to point to the last entry on the list
   2463    1.1       mrg  * => note newents is allowed to be NULL
   2464    1.1       mrg  */
   2465    1.1       mrg 
   2466  1.275      yamt static int
   2467  1.138     enami uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
   2468  1.275      yamt     struct vm_map_entry *newents, int nnewents, vsize_t nsize,
   2469  1.275      yamt     struct vm_map_entry **oldentryp)
   2470   1.10       mrg {
   2471   1.99       chs 	struct vm_map_entry *oldent, *last;
   2472    1.1       mrg 
   2473  1.222      yamt 	uvm_map_check(map, "map_replace entry");
   2474  1.144      yamt 
   2475   1.10       mrg 	/*
   2476   1.10       mrg 	 * first find the blank map entry at the specified address
   2477   1.10       mrg 	 */
   2478   1.85       chs 
   2479   1.10       mrg 	if (!uvm_map_lookup_entry(map, start, &oldent)) {
   2480  1.234   thorpej 		return (false);
   2481   1.10       mrg 	}
   2482   1.85       chs 
   2483   1.10       mrg 	/*
   2484   1.10       mrg 	 * check to make sure we have a proper blank entry
   2485   1.10       mrg 	 */
   2486    1.1       mrg 
   2487  1.311      para 	if (end < oldent->end) {
   2488  1.311      para 		UVM_MAP_CLIP_END(map, oldent, end);
   2489  1.210      yamt 	}
   2490   1.98       chs 	if (oldent->start != start || oldent->end != end ||
   2491   1.10       mrg 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
   2492  1.234   thorpej 		return (false);
   2493   1.10       mrg 	}
   2494    1.1       mrg 
   2495    1.1       mrg #ifdef DIAGNOSTIC
   2496   1.99       chs 
   2497   1.10       mrg 	/*
   2498   1.10       mrg 	 * sanity check the newents chain
   2499   1.10       mrg 	 */
   2500   1.99       chs 
   2501   1.10       mrg 	{
   2502   1.99       chs 		struct vm_map_entry *tmpent = newents;
   2503   1.10       mrg 		int nent = 0;
   2504  1.275      yamt 		vsize_t sz = 0;
   2505   1.24       eeh 		vaddr_t cur = start;
   2506   1.10       mrg 
   2507   1.10       mrg 		while (tmpent) {
   2508   1.10       mrg 			nent++;
   2509  1.275      yamt 			sz += tmpent->end - tmpent->start;
   2510   1.10       mrg 			if (tmpent->start < cur)
   2511   1.10       mrg 				panic("uvm_map_replace1");
   2512  1.275      yamt 			if (tmpent->start >= tmpent->end || tmpent->end > end) {
   2513  1.286      matt 				panic("uvm_map_replace2: "
   2514  1.334      matt 				    "tmpent->start=%#"PRIxVADDR
   2515  1.334      matt 				    ", tmpent->end=%#"PRIxVADDR
   2516  1.334      matt 				    ", end=%#"PRIxVADDR,
   2517  1.286      matt 				    tmpent->start, tmpent->end, end);
   2518   1.10       mrg 			}
   2519   1.10       mrg 			cur = tmpent->end;
   2520   1.10       mrg 			if (tmpent->next) {
   2521   1.10       mrg 				if (tmpent->next->prev != tmpent)
   2522   1.10       mrg 					panic("uvm_map_replace3");
   2523   1.10       mrg 			} else {
   2524   1.10       mrg 				if (newents->prev != tmpent)
   2525   1.10       mrg 					panic("uvm_map_replace4");
   2526   1.10       mrg 			}
   2527   1.10       mrg 			tmpent = tmpent->next;
   2528   1.10       mrg 		}
   2529   1.10       mrg 		if (nent != nnewents)
   2530   1.10       mrg 			panic("uvm_map_replace5");
   2531  1.275      yamt 		if (sz != nsize)
   2532  1.275      yamt 			panic("uvm_map_replace6");
   2533   1.10       mrg 	}
   2534   1.10       mrg #endif
   2535   1.10       mrg 
   2536   1.10       mrg 	/*
   2537   1.10       mrg 	 * map entry is a valid blank!   replace it.   (this does all the
   2538   1.10       mrg 	 * work of map entry link/unlink...).
   2539   1.10       mrg 	 */
   2540   1.10       mrg 
   2541   1.10       mrg 	if (newents) {
   2542   1.99       chs 		last = newents->prev;
   2543   1.10       mrg 
   2544   1.10       mrg 		/* critical: flush stale hints out of map */
   2545   1.82   thorpej 		SAVE_HINT(map, map->hint, newents);
   2546   1.10       mrg 		if (map->first_free == oldent)
   2547   1.10       mrg 			map->first_free = last;
   2548   1.10       mrg 
   2549   1.10       mrg 		last->next = oldent->next;
   2550   1.10       mrg 		last->next->prev = last;
   2551  1.144      yamt 
   2552  1.144      yamt 		/* Fix RB tree */
   2553  1.144      yamt 		uvm_rb_remove(map, oldent);
   2554  1.144      yamt 
   2555   1.10       mrg 		newents->prev = oldent->prev;
   2556   1.10       mrg 		newents->prev->next = newents;
   2557   1.10       mrg 		map->nentries = map->nentries + (nnewents - 1);
   2558   1.10       mrg 
   2559  1.144      yamt 		/* Fixup the RB tree */
   2560  1.144      yamt 		{
   2561  1.144      yamt 			int i;
   2562  1.144      yamt 			struct vm_map_entry *tmp;
   2563  1.144      yamt 
   2564  1.144      yamt 			tmp = newents;
   2565  1.144      yamt 			for (i = 0; i < nnewents && tmp; i++) {
   2566  1.144      yamt 				uvm_rb_insert(map, tmp);
   2567  1.144      yamt 				tmp = tmp->next;
   2568  1.144      yamt 			}
   2569  1.144      yamt 		}
   2570   1.10       mrg 	} else {
   2571   1.10       mrg 		/* NULL list of new entries: just remove the old one */
   2572  1.221      yamt 		clear_hints(map, oldent);
   2573   1.10       mrg 		uvm_map_entry_unlink(map, oldent);
   2574   1.10       mrg 	}
   2575  1.275      yamt 	map->size -= end - start - nsize;
   2576   1.10       mrg 
   2577  1.222      yamt 	uvm_map_check(map, "map_replace leave");
   2578   1.10       mrg 
   2579   1.10       mrg 	/*
   2580  1.209      yamt 	 * now we can free the old blank entry and return.
   2581   1.10       mrg 	 */
   2582    1.1       mrg 
   2583  1.253      yamt 	*oldentryp = oldent;
   2584  1.234   thorpej 	return (true);
   2585    1.1       mrg }
   2586    1.1       mrg 
   2587    1.1       mrg /*
   2588    1.1       mrg  * uvm_map_extract: extract a mapping from a map and put it somewhere
   2589    1.1       mrg  *	(maybe removing the old mapping)
   2590    1.1       mrg  *
   2591    1.1       mrg  * => maps should be unlocked (we will write lock them)
   2592    1.1       mrg  * => returns 0 on success, error code otherwise
   2593    1.1       mrg  * => start must be page aligned
   2594    1.1       mrg  * => len must be page sized
   2595    1.1       mrg  * => flags:
   2596    1.1       mrg  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
   2597    1.1       mrg  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
   2598    1.1       mrg  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
   2599    1.1       mrg  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
   2600  1.337  christos  *      UVM_EXTRACT_PROT_ALL: set prot to UVM_PROT_ALL as we go
   2601    1.1       mrg  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
   2602    1.1       mrg  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
   2603    1.1       mrg  *             be used from within the kernel in a kernel level map <<<
   2604    1.1       mrg  */
   2605    1.1       mrg 
   2606   1.10       mrg int
   2607  1.138     enami uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
   2608  1.138     enami     struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
   2609   1.10       mrg {
   2610  1.163   mycroft 	vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
   2611   1.99       chs 	struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
   2612   1.99       chs 	    *deadentry, *oldentry;
   2613  1.253      yamt 	struct vm_map_entry *resentry = NULL; /* a dummy reservation entry */
   2614  1.325    martin 	vsize_t elen __unused;
   2615   1.10       mrg 	int nchain, error, copy_ok;
   2616  1.275      yamt 	vsize_t nsize;
   2617  1.385     skrll 	UVMHIST_FUNC(__func__);
   2618  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
   2619  1.353  pgoyette 	    (uintptr_t)srcmap, start, len, 0);
   2620  1.353  pgoyette 	UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)",
   2621  1.353  pgoyette 	    (uintptr_t)dstmap, flags, 0, 0);
   2622   1.10       mrg 
   2623   1.10       mrg 	/*
   2624   1.10       mrg 	 * step 0: sanity check: start must be on a page boundary, length
   2625   1.10       mrg 	 * must be page sized.  can't ask for CONTIG/QREF if you asked for
   2626   1.10       mrg 	 * REMOVE.
   2627   1.10       mrg 	 */
   2628   1.10       mrg 
   2629   1.85       chs 	KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
   2630   1.85       chs 	KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
   2631   1.85       chs 		(flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
   2632   1.10       mrg 
   2633   1.10       mrg 	/*
   2634   1.10       mrg 	 * step 1: reserve space in the target map for the extracted area
   2635   1.10       mrg 	 */
   2636   1.10       mrg 
   2637  1.210      yamt 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
   2638  1.210      yamt 		dstaddr = vm_map_min(dstmap);
   2639  1.380  riastrad 		if (!uvm_map_reserve(dstmap, len, start,
   2640  1.324      matt 		    atop(start) & uvmexp.colormask, &dstaddr,
   2641  1.324      matt 		    UVM_FLAG_COLORMATCH))
   2642  1.210      yamt 			return (ENOMEM);
   2643  1.324      matt 		KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0);
   2644  1.210      yamt 		*dstaddrp = dstaddr;	/* pass address back to caller */
   2645  1.353  pgoyette 		UVMHIST_LOG(maphist, "  dstaddr=%#jx", dstaddr,0,0,0);
   2646  1.210      yamt 	} else {
   2647  1.210      yamt 		dstaddr = *dstaddrp;
   2648  1.210      yamt 	}
   2649   1.10       mrg 
   2650   1.10       mrg 	/*
   2651   1.98       chs 	 * step 2: setup for the extraction process loop by init'ing the
   2652   1.10       mrg 	 * map entry chain, locking src map, and looking up the first useful
   2653   1.10       mrg 	 * entry in the map.
   2654   1.10       mrg 	 */
   2655    1.1       mrg 
   2656   1.10       mrg 	end = start + len;
   2657   1.10       mrg 	newend = dstaddr + len;
   2658   1.10       mrg 	chain = endchain = NULL;
   2659   1.10       mrg 	nchain = 0;
   2660  1.275      yamt 	nsize = 0;
   2661   1.10       mrg 	vm_map_lock(srcmap);
   2662   1.10       mrg 
   2663   1.10       mrg 	if (uvm_map_lookup_entry(srcmap, start, &entry)) {
   2664   1.10       mrg 
   2665   1.10       mrg 		/* "start" is within an entry */
   2666   1.10       mrg 		if (flags & UVM_EXTRACT_QREF) {
   2667   1.85       chs 
   2668   1.10       mrg 			/*
   2669   1.10       mrg 			 * for quick references we don't clip the entry, so
   2670   1.10       mrg 			 * the entry may map space "before" the starting
   2671   1.10       mrg 			 * virtual address... this is the "fudge" factor
   2672   1.10       mrg 			 * (which can be non-zero only the first time
   2673   1.10       mrg 			 * through the "while" loop in step 3).
   2674   1.10       mrg 			 */
   2675   1.85       chs 
   2676   1.10       mrg 			fudge = start - entry->start;
   2677   1.10       mrg 		} else {
   2678   1.85       chs 
   2679   1.10       mrg 			/*
   2680   1.10       mrg 			 * normal reference: we clip the map to fit (thus
   2681   1.10       mrg 			 * fudge is zero)
   2682   1.10       mrg 			 */
   2683   1.85       chs 
   2684  1.311      para 			UVM_MAP_CLIP_START(srcmap, entry, start);
   2685   1.82   thorpej 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
   2686   1.10       mrg 			fudge = 0;
   2687   1.10       mrg 		}
   2688   1.85       chs 	} else {
   2689    1.1       mrg 
   2690   1.10       mrg 		/* "start" is not within an entry ... skip to next entry */
   2691   1.10       mrg 		if (flags & UVM_EXTRACT_CONTIG) {
   2692   1.10       mrg 			error = EINVAL;
   2693   1.10       mrg 			goto bad;    /* definite hole here ... */
   2694   1.10       mrg 		}
   2695    1.1       mrg 
   2696   1.10       mrg 		entry = entry->next;
   2697   1.10       mrg 		fudge = 0;
   2698   1.10       mrg 	}
   2699   1.85       chs 
   2700   1.10       mrg 	/* save values from srcmap for step 6 */
   2701   1.10       mrg 	orig_entry = entry;
   2702   1.10       mrg 	orig_fudge = fudge;
   2703    1.1       mrg 
   2704   1.10       mrg 	/*
   2705   1.10       mrg 	 * step 3: now start looping through the map entries, extracting
   2706   1.10       mrg 	 * as we go.
   2707   1.10       mrg 	 */
   2708    1.1       mrg 
   2709   1.10       mrg 	while (entry->start < end && entry != &srcmap->header) {
   2710   1.85       chs 
   2711   1.10       mrg 		/* if we are not doing a quick reference, clip it */
   2712   1.10       mrg 		if ((flags & UVM_EXTRACT_QREF) == 0)
   2713  1.311      para 			UVM_MAP_CLIP_END(srcmap, entry, end);
   2714   1.10       mrg 
   2715   1.10       mrg 		/* clear needs_copy (allow chunking) */
   2716   1.10       mrg 		if (UVM_ET_ISNEEDSCOPY(entry)) {
   2717  1.212      yamt 			amap_copy(srcmap, entry,
   2718  1.212      yamt 			    AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
   2719   1.10       mrg 			if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
   2720   1.10       mrg 				error = ENOMEM;
   2721   1.10       mrg 				goto bad;
   2722   1.10       mrg 			}
   2723   1.85       chs 
   2724   1.10       mrg 			/* amap_copy could clip (during chunk)!  update fudge */
   2725   1.10       mrg 			if (fudge) {
   2726  1.163   mycroft 				fudge = start - entry->start;
   2727   1.10       mrg 				orig_fudge = fudge;
   2728   1.10       mrg 			}
   2729   1.10       mrg 		}
   2730    1.1       mrg 
   2731   1.10       mrg 		/* calculate the offset of this from "start" */
   2732   1.10       mrg 		oldoffset = (entry->start + fudge) - start;
   2733    1.1       mrg 
   2734   1.10       mrg 		/* allocate a new map entry */
   2735  1.126    bouyer 		newentry = uvm_mapent_alloc(dstmap, 0);
   2736   1.10       mrg 		if (newentry == NULL) {
   2737   1.10       mrg 			error = ENOMEM;
   2738   1.10       mrg 			goto bad;
   2739   1.10       mrg 		}
   2740   1.10       mrg 
   2741   1.10       mrg 		/* set up new map entry */
   2742   1.10       mrg 		newentry->next = NULL;
   2743   1.10       mrg 		newentry->prev = endchain;
   2744   1.10       mrg 		newentry->start = dstaddr + oldoffset;
   2745   1.10       mrg 		newentry->end =
   2746   1.10       mrg 		    newentry->start + (entry->end - (entry->start + fudge));
   2747   1.37       chs 		if (newentry->end > newend || newentry->end < newentry->start)
   2748   1.10       mrg 			newentry->end = newend;
   2749   1.10       mrg 		newentry->object.uvm_obj = entry->object.uvm_obj;
   2750   1.10       mrg 		if (newentry->object.uvm_obj) {
   2751   1.10       mrg 			if (newentry->object.uvm_obj->pgops->pgo_reference)
   2752   1.10       mrg 				newentry->object.uvm_obj->pgops->
   2753   1.10       mrg 				    pgo_reference(newentry->object.uvm_obj);
   2754  1.354       mrg 			newentry->offset = entry->offset + fudge;
   2755   1.10       mrg 		} else {
   2756   1.10       mrg 			newentry->offset = 0;
   2757   1.10       mrg 		}
   2758   1.10       mrg 		newentry->etype = entry->etype;
   2759  1.337  christos 		if (flags & UVM_EXTRACT_PROT_ALL) {
   2760  1.337  christos 			newentry->protection = newentry->max_protection =
   2761  1.337  christos 			    UVM_PROT_ALL;
   2762  1.337  christos 		} else {
   2763  1.337  christos 			newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
   2764  1.337  christos 			    entry->max_protection : entry->protection;
   2765  1.337  christos 			newentry->max_protection = entry->max_protection;
   2766  1.337  christos 		}
   2767   1.10       mrg 		newentry->inheritance = entry->inheritance;
   2768   1.10       mrg 		newentry->wired_count = 0;
   2769   1.10       mrg 		newentry->aref.ar_amap = entry->aref.ar_amap;
   2770   1.10       mrg 		if (newentry->aref.ar_amap) {
   2771   1.34     chuck 			newentry->aref.ar_pageoff =
   2772   1.34     chuck 			    entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
   2773   1.85       chs 			uvm_map_reference_amap(newentry, AMAP_SHARED |
   2774   1.10       mrg 			    ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
   2775   1.10       mrg 		} else {
   2776   1.34     chuck 			newentry->aref.ar_pageoff = 0;
   2777   1.10       mrg 		}
   2778   1.10       mrg 		newentry->advice = entry->advice;
   2779  1.245      yamt 		if ((flags & UVM_EXTRACT_QREF) != 0) {
   2780  1.245      yamt 			newentry->flags |= UVM_MAP_NOMERGE;
   2781  1.245      yamt 		}
   2782   1.10       mrg 
   2783   1.10       mrg 		/* now link it on the chain */
   2784   1.10       mrg 		nchain++;
   2785  1.275      yamt 		nsize += newentry->end - newentry->start;
   2786   1.10       mrg 		if (endchain == NULL) {
   2787   1.10       mrg 			chain = endchain = newentry;
   2788   1.10       mrg 		} else {
   2789   1.10       mrg 			endchain->next = newentry;
   2790   1.10       mrg 			endchain = newentry;
   2791   1.10       mrg 		}
   2792   1.10       mrg 
   2793   1.10       mrg 		/* end of 'while' loop! */
   2794   1.98       chs 		if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
   2795   1.10       mrg 		    (entry->next == &srcmap->header ||
   2796   1.10       mrg 		    entry->next->start != entry->end)) {
   2797   1.10       mrg 			error = EINVAL;
   2798   1.10       mrg 			goto bad;
   2799   1.10       mrg 		}
   2800   1.10       mrg 		entry = entry->next;
   2801   1.10       mrg 		fudge = 0;
   2802   1.10       mrg 	}
   2803   1.10       mrg 
   2804   1.10       mrg 	/*
   2805   1.10       mrg 	 * step 4: close off chain (in format expected by uvm_map_replace)
   2806   1.10       mrg 	 */
   2807   1.10       mrg 
   2808   1.10       mrg 	if (chain)
   2809   1.10       mrg 		chain->prev = endchain;
   2810   1.10       mrg 
   2811   1.10       mrg 	/*
   2812   1.10       mrg 	 * step 5: attempt to lock the dest map so we can pmap_copy.
   2813   1.98       chs 	 * note usage of copy_ok:
   2814   1.10       mrg 	 *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
   2815   1.10       mrg 	 *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
   2816   1.10       mrg 	 */
   2817   1.85       chs 
   2818  1.234   thorpej 	if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
   2819   1.10       mrg 		copy_ok = 1;
   2820   1.10       mrg 		if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
   2821  1.275      yamt 		    nchain, nsize, &resentry)) {
   2822   1.10       mrg 			if (srcmap != dstmap)
   2823   1.10       mrg 				vm_map_unlock(dstmap);
   2824   1.10       mrg 			error = EIO;
   2825   1.10       mrg 			goto bad;
   2826   1.10       mrg 		}
   2827   1.10       mrg 	} else {
   2828   1.10       mrg 		copy_ok = 0;
   2829   1.10       mrg 		/* replace defered until step 7 */
   2830   1.10       mrg 	}
   2831   1.10       mrg 
   2832   1.10       mrg 	/*
   2833   1.10       mrg 	 * step 6: traverse the srcmap a second time to do the following:
   2834   1.10       mrg 	 *  - if we got a lock on the dstmap do pmap_copy
   2835   1.10       mrg 	 *  - if UVM_EXTRACT_REMOVE remove the entries
   2836   1.10       mrg 	 * we make use of orig_entry and orig_fudge (saved in step 2)
   2837   1.10       mrg 	 */
   2838   1.10       mrg 
   2839   1.10       mrg 	if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
   2840   1.10       mrg 
   2841   1.10       mrg 		/* purge possible stale hints from srcmap */
   2842   1.10       mrg 		if (flags & UVM_EXTRACT_REMOVE) {
   2843   1.82   thorpej 			SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
   2844  1.220      yamt 			if (srcmap->first_free != &srcmap->header &&
   2845  1.220      yamt 			    srcmap->first_free->start >= start)
   2846   1.10       mrg 				srcmap->first_free = orig_entry->prev;
   2847   1.10       mrg 		}
   2848   1.10       mrg 
   2849   1.10       mrg 		entry = orig_entry;
   2850   1.10       mrg 		fudge = orig_fudge;
   2851   1.10       mrg 		deadentry = NULL;	/* for UVM_EXTRACT_REMOVE */
   2852   1.10       mrg 
   2853   1.10       mrg 		while (entry->start < end && entry != &srcmap->header) {
   2854   1.10       mrg 			if (copy_ok) {
   2855   1.74   thorpej 				oldoffset = (entry->start + fudge) - start;
   2856   1.90       chs 				elen = MIN(end, entry->end) -
   2857   1.74   thorpej 				    (entry->start + fudge);
   2858   1.74   thorpej 				pmap_copy(dstmap->pmap, srcmap->pmap,
   2859   1.74   thorpej 				    dstaddr + oldoffset, elen,
   2860   1.74   thorpej 				    entry->start + fudge);
   2861   1.10       mrg 			}
   2862   1.10       mrg 
   2863   1.74   thorpej 			/* we advance "entry" in the following if statement */
   2864   1.10       mrg 			if (flags & UVM_EXTRACT_REMOVE) {
   2865  1.376        ad #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
   2866  1.372        ad 				uvm_map_lock_entry(entry, RW_WRITER);
   2867  1.376        ad #else
   2868  1.376        ad 				uvm_map_lock_entry(entry, RW_READER);
   2869  1.376        ad #endif
   2870   1.98       chs 				pmap_remove(srcmap->pmap, entry->start,
   2871   1.20     chuck 						entry->end);
   2872  1.298     rmind 				uvm_map_unlock_entry(entry);
   2873  1.139     enami 				oldentry = entry;	/* save entry */
   2874  1.139     enami 				entry = entry->next;	/* advance */
   2875   1.20     chuck 				uvm_map_entry_unlink(srcmap, oldentry);
   2876   1.20     chuck 							/* add to dead list */
   2877   1.20     chuck 				oldentry->next = deadentry;
   2878   1.20     chuck 				deadentry = oldentry;
   2879  1.139     enami 			} else {
   2880  1.139     enami 				entry = entry->next;		/* advance */
   2881   1.10       mrg 			}
   2882   1.10       mrg 
   2883   1.10       mrg 			/* end of 'while' loop */
   2884   1.10       mrg 			fudge = 0;
   2885   1.10       mrg 		}
   2886  1.105     chris 		pmap_update(srcmap->pmap);
   2887   1.10       mrg 
   2888   1.10       mrg 		/*
   2889   1.10       mrg 		 * unlock dstmap.  we will dispose of deadentry in
   2890   1.10       mrg 		 * step 7 if needed
   2891   1.10       mrg 		 */
   2892   1.85       chs 
   2893   1.10       mrg 		if (copy_ok && srcmap != dstmap)
   2894   1.10       mrg 			vm_map_unlock(dstmap);
   2895   1.10       mrg 
   2896   1.99       chs 	} else {
   2897   1.99       chs 		deadentry = NULL;
   2898   1.10       mrg 	}
   2899   1.10       mrg 
   2900   1.10       mrg 	/*
   2901   1.10       mrg 	 * step 7: we are done with the source map, unlock.   if copy_ok
   2902   1.10       mrg 	 * is 0 then we have not replaced the dummy mapping in dstmap yet
   2903   1.10       mrg 	 * and we need to do so now.
   2904   1.10       mrg 	 */
   2905   1.10       mrg 
   2906   1.10       mrg 	vm_map_unlock(srcmap);
   2907   1.10       mrg 	if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
   2908   1.10       mrg 		uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
   2909   1.10       mrg 
   2910   1.10       mrg 	/* now do the replacement if we didn't do it in step 5 */
   2911   1.10       mrg 	if (copy_ok == 0) {
   2912   1.10       mrg 		vm_map_lock(dstmap);
   2913   1.10       mrg 		error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
   2914  1.275      yamt 		    nchain, nsize, &resentry);
   2915   1.10       mrg 		vm_map_unlock(dstmap);
   2916   1.10       mrg 
   2917  1.234   thorpej 		if (error == false) {
   2918   1.10       mrg 			error = EIO;
   2919   1.10       mrg 			goto bad2;
   2920   1.10       mrg 		}
   2921   1.10       mrg 	}
   2922  1.144      yamt 
   2923  1.253      yamt 	if (resentry != NULL)
   2924  1.253      yamt 		uvm_mapent_free(resentry);
   2925  1.253      yamt 
   2926  1.139     enami 	return (0);
   2927   1.10       mrg 
   2928   1.10       mrg 	/*
   2929   1.10       mrg 	 * bad: failure recovery
   2930   1.10       mrg 	 */
   2931   1.10       mrg bad:
   2932   1.10       mrg 	vm_map_unlock(srcmap);
   2933   1.10       mrg bad2:			/* src already unlocked */
   2934   1.10       mrg 	if (chain)
   2935   1.10       mrg 		uvm_unmap_detach(chain,
   2936   1.10       mrg 		    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
   2937  1.144      yamt 
   2938  1.253      yamt 	if (resentry != NULL)
   2939  1.253      yamt 		uvm_mapent_free(resentry);
   2940  1.253      yamt 
   2941  1.210      yamt 	if ((flags & UVM_EXTRACT_RESERVED) == 0) {
   2942  1.210      yamt 		uvm_unmap(dstmap, dstaddr, dstaddr+len);   /* ??? */
   2943  1.210      yamt 	}
   2944  1.139     enami 	return (error);
   2945   1.10       mrg }
   2946   1.10       mrg 
   2947   1.10       mrg /* end of extraction functions */
   2948    1.1       mrg 
   2949    1.1       mrg /*
   2950    1.1       mrg  * uvm_map_submap: punch down part of a map into a submap
   2951    1.1       mrg  *
   2952    1.1       mrg  * => only the kernel_map is allowed to be submapped
   2953    1.1       mrg  * => the purpose of submapping is to break up the locking granularity
   2954    1.1       mrg  *	of a larger map
   2955    1.1       mrg  * => the range specified must have been mapped previously with a uvm_map()
   2956    1.1       mrg  *	call [with uobj==NULL] to create a blank map entry in the main map.
   2957    1.1       mrg  *	[And it had better still be blank!]
   2958    1.1       mrg  * => maps which contain submaps should never be copied or forked.
   2959   1.98       chs  * => to remove a submap, use uvm_unmap() on the main map
   2960    1.1       mrg  *	and then uvm_map_deallocate() the submap.
   2961    1.1       mrg  * => main map must be unlocked.
   2962    1.1       mrg  * => submap must have been init'd and have a zero reference count.
   2963    1.1       mrg  *	[need not be locked as we don't actually reference it]
   2964    1.1       mrg  */
   2965   1.85       chs 
   2966   1.10       mrg int
   2967  1.138     enami uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
   2968  1.138     enami     struct vm_map *submap)
   2969   1.10       mrg {
   2970   1.99       chs 	struct vm_map_entry *entry;
   2971   1.94       chs 	int error;
   2972    1.1       mrg 
   2973   1.10       mrg 	vm_map_lock(map);
   2974   1.85       chs 	VM_MAP_RANGE_CHECK(map, start, end);
   2975    1.1       mrg 
   2976   1.10       mrg 	if (uvm_map_lookup_entry(map, start, &entry)) {
   2977  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   2978  1.311      para 		UVM_MAP_CLIP_END(map, entry, end);	/* to be safe */
   2979   1.94       chs 	} else {
   2980   1.10       mrg 		entry = NULL;
   2981   1.10       mrg 	}
   2982    1.1       mrg 
   2983   1.98       chs 	if (entry != NULL &&
   2984   1.10       mrg 	    entry->start == start && entry->end == end &&
   2985   1.10       mrg 	    entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
   2986   1.10       mrg 	    !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
   2987   1.29     chuck 		entry->etype |= UVM_ET_SUBMAP;
   2988   1.10       mrg 		entry->object.sub_map = submap;
   2989   1.10       mrg 		entry->offset = 0;
   2990   1.10       mrg 		uvm_map_reference(submap);
   2991   1.94       chs 		error = 0;
   2992   1.10       mrg 	} else {
   2993   1.94       chs 		error = EINVAL;
   2994   1.10       mrg 	}
   2995   1.10       mrg 	vm_map_unlock(map);
   2996  1.174      yamt 
   2997   1.94       chs 	return error;
   2998    1.1       mrg }
   2999    1.1       mrg 
   3000  1.175      yamt /*
   3001  1.344     joerg  * uvm_map_protect_user: change map protection on behalf of the user.
   3002  1.344     joerg  * Enforces PAX settings as necessary.
   3003  1.344     joerg  */
   3004  1.344     joerg int
   3005  1.344     joerg uvm_map_protect_user(struct lwp *l, vaddr_t start, vaddr_t end,
   3006  1.344     joerg     vm_prot_t new_prot)
   3007  1.344     joerg {
   3008  1.344     joerg 	int error;
   3009  1.344     joerg 
   3010  1.344     joerg 	if ((error = PAX_MPROTECT_VALIDATE(l, new_prot)))
   3011  1.344     joerg 		return error;
   3012  1.344     joerg 
   3013  1.344     joerg 	return uvm_map_protect(&l->l_proc->p_vmspace->vm_map, start, end,
   3014  1.344     joerg 	    new_prot, false);
   3015  1.344     joerg }
   3016  1.344     joerg 
   3017  1.344     joerg 
   3018  1.344     joerg /*
   3019    1.1       mrg  * uvm_map_protect: change map protection
   3020    1.1       mrg  *
   3021    1.1       mrg  * => set_max means set max_protection.
   3022    1.1       mrg  * => map must be unlocked.
   3023    1.1       mrg  */
   3024    1.1       mrg 
   3025  1.139     enami #define MASK(entry)	(UVM_ET_ISCOPYONWRITE(entry) ? \
   3026   1.36   mycroft 			 ~VM_PROT_WRITE : VM_PROT_ALL)
   3027    1.1       mrg 
   3028   1.10       mrg int
   3029  1.138     enami uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
   3030  1.233   thorpej     vm_prot_t new_prot, bool set_max)
   3031   1.10       mrg {
   3032   1.99       chs 	struct vm_map_entry *current, *entry;
   3033   1.94       chs 	int error = 0;
   3034  1.385     skrll 	UVMHIST_FUNC(__func__);
   3035  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
   3036  1.353  pgoyette 	    (uintptr_t)map, start, end, new_prot);
   3037   1.85       chs 
   3038   1.10       mrg 	vm_map_lock(map);
   3039   1.10       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   3040   1.10       mrg 	if (uvm_map_lookup_entry(map, start, &entry)) {
   3041  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   3042   1.10       mrg 	} else {
   3043   1.10       mrg 		entry = entry->next;
   3044   1.10       mrg 	}
   3045   1.10       mrg 
   3046    1.1       mrg 	/*
   3047   1.10       mrg 	 * make a first pass to check for protection violations.
   3048    1.1       mrg 	 */
   3049    1.1       mrg 
   3050   1.10       mrg 	current = entry;
   3051   1.10       mrg 	while ((current != &map->header) && (current->start < end)) {
   3052   1.65   thorpej 		if (UVM_ET_ISSUBMAP(current)) {
   3053   1.94       chs 			error = EINVAL;
   3054   1.65   thorpej 			goto out;
   3055   1.65   thorpej 		}
   3056   1.10       mrg 		if ((new_prot & current->max_protection) != new_prot) {
   3057   1.94       chs 			error = EACCES;
   3058   1.65   thorpej 			goto out;
   3059  1.112   thorpej 		}
   3060  1.112   thorpej 		/*
   3061  1.112   thorpej 		 * Don't allow VM_PROT_EXECUTE to be set on entries that
   3062  1.112   thorpej 		 * point to vnodes that are associated with a NOEXEC file
   3063  1.112   thorpej 		 * system.
   3064  1.112   thorpej 		 */
   3065  1.112   thorpej 		if (UVM_ET_ISOBJ(current) &&
   3066  1.112   thorpej 		    UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
   3067  1.112   thorpej 			struct vnode *vp =
   3068  1.112   thorpej 			    (struct vnode *) current->object.uvm_obj;
   3069  1.112   thorpej 
   3070  1.112   thorpej 			if ((new_prot & VM_PROT_EXECUTE) != 0 &&
   3071  1.112   thorpej 			    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
   3072  1.112   thorpej 				error = EACCES;
   3073  1.112   thorpej 				goto out;
   3074  1.112   thorpej 			}
   3075   1.10       mrg 		}
   3076  1.224      elad 
   3077   1.65   thorpej 		current = current->next;
   3078   1.10       mrg 	}
   3079   1.10       mrg 
   3080   1.10       mrg 	/* go back and fix up protections (no need to clip this time). */
   3081   1.10       mrg 
   3082   1.10       mrg 	current = entry;
   3083   1.10       mrg 	while ((current != &map->header) && (current->start < end)) {
   3084   1.10       mrg 		vm_prot_t old_prot;
   3085   1.85       chs 
   3086  1.311      para 		UVM_MAP_CLIP_END(map, current, end);
   3087   1.10       mrg 		old_prot = current->protection;
   3088   1.10       mrg 		if (set_max)
   3089   1.10       mrg 			current->protection =
   3090   1.10       mrg 			    (current->max_protection = new_prot) & old_prot;
   3091   1.10       mrg 		else
   3092   1.10       mrg 			current->protection = new_prot;
   3093   1.10       mrg 
   3094   1.10       mrg 		/*
   3095   1.98       chs 		 * update physical map if necessary.  worry about copy-on-write
   3096   1.10       mrg 		 * here -- CHECK THIS XXX
   3097   1.10       mrg 		 */
   3098   1.10       mrg 
   3099   1.10       mrg 		if (current->protection != old_prot) {
   3100   1.29     chuck 			/* update pmap! */
   3101  1.376        ad #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
   3102  1.372        ad 			uvm_map_lock_entry(current, RW_WRITER);
   3103  1.376        ad #else
   3104  1.376        ad 			uvm_map_lock_entry(current, RW_READER);
   3105  1.376        ad #endif
   3106   1.29     chuck 			pmap_protect(map->pmap, current->start, current->end,
   3107  1.358      maxv 			    current->protection & MASK(current));
   3108  1.298     rmind 			uvm_map_unlock_entry(current);
   3109  1.109   thorpej 
   3110  1.109   thorpej 			/*
   3111  1.109   thorpej 			 * If this entry points at a vnode, and the
   3112  1.109   thorpej 			 * protection includes VM_PROT_EXECUTE, mark
   3113  1.111   thorpej 			 * the vnode as VEXECMAP.
   3114  1.109   thorpej 			 */
   3115  1.109   thorpej 			if (UVM_ET_ISOBJ(current)) {
   3116  1.109   thorpej 				struct uvm_object *uobj =
   3117  1.109   thorpej 				    current->object.uvm_obj;
   3118  1.109   thorpej 
   3119  1.109   thorpej 				if (UVM_OBJ_IS_VNODE(uobj) &&
   3120  1.241        ad 				    (current->protection & VM_PROT_EXECUTE)) {
   3121  1.110   thorpej 					vn_markexec((struct vnode *) uobj);
   3122  1.241        ad 				}
   3123  1.109   thorpej 			}
   3124   1.65   thorpej 		}
   3125   1.10       mrg 
   3126   1.65   thorpej 		/*
   3127   1.65   thorpej 		 * If the map is configured to lock any future mappings,
   3128   1.65   thorpej 		 * wire this entry now if the old protection was VM_PROT_NONE
   3129   1.65   thorpej 		 * and the new protection is not VM_PROT_NONE.
   3130   1.65   thorpej 		 */
   3131   1.65   thorpej 
   3132   1.65   thorpej 		if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
   3133  1.358      maxv 		    VM_MAPENT_ISWIRED(current) == 0 &&
   3134   1.65   thorpej 		    old_prot == VM_PROT_NONE &&
   3135   1.65   thorpej 		    new_prot != VM_PROT_NONE) {
   3136  1.360       chs 
   3137  1.360       chs 			/*
   3138  1.360       chs 			 * We must call pmap_update() here because the
   3139  1.360       chs 			 * pmap_protect() call above might have removed some
   3140  1.360       chs 			 * pmap entries and uvm_map_pageable() might create
   3141  1.360       chs 			 * some new pmap entries that rely on the prior
   3142  1.360       chs 			 * removals being completely finished.
   3143  1.360       chs 			 */
   3144  1.360       chs 
   3145  1.360       chs 			pmap_update(map->pmap);
   3146  1.360       chs 
   3147  1.358      maxv 			if (uvm_map_pageable(map, current->start,
   3148  1.358      maxv 			    current->end, false,
   3149   1.94       chs 			    UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
   3150   1.99       chs 
   3151   1.65   thorpej 				/*
   3152   1.65   thorpej 				 * If locking the entry fails, remember the
   3153   1.65   thorpej 				 * error if it's the first one.  Note we
   3154   1.65   thorpej 				 * still continue setting the protection in
   3155   1.94       chs 				 * the map, but will return the error
   3156   1.94       chs 				 * condition regardless.
   3157   1.65   thorpej 				 *
   3158   1.65   thorpej 				 * XXX Ignore what the actual error is,
   3159   1.65   thorpej 				 * XXX just call it a resource shortage
   3160   1.65   thorpej 				 * XXX so that it doesn't get confused
   3161   1.65   thorpej 				 * XXX what uvm_map_protect() itself would
   3162   1.65   thorpej 				 * XXX normally return.
   3163   1.65   thorpej 				 */
   3164   1.99       chs 
   3165   1.94       chs 				error = ENOMEM;
   3166   1.65   thorpej 			}
   3167   1.10       mrg 		}
   3168   1.10       mrg 		current = current->next;
   3169   1.10       mrg 	}
   3170  1.105     chris 	pmap_update(map->pmap);
   3171   1.85       chs 
   3172   1.65   thorpej  out:
   3173   1.10       mrg 	vm_map_unlock(map);
   3174  1.174      yamt 
   3175  1.353  pgoyette 	UVMHIST_LOG(maphist, "<- done, error=%jd",error,0,0,0);
   3176   1.94       chs 	return error;
   3177    1.1       mrg }
   3178    1.1       mrg 
   3179    1.1       mrg #undef  MASK
   3180    1.1       mrg 
   3181   1.98       chs /*
   3182    1.1       mrg  * uvm_map_inherit: set inheritance code for range of addrs in map.
   3183    1.1       mrg  *
   3184    1.1       mrg  * => map must be unlocked
   3185    1.1       mrg  * => note that the inherit code is used during a "fork".  see fork
   3186    1.1       mrg  *	code for details.
   3187    1.1       mrg  */
   3188    1.1       mrg 
   3189   1.10       mrg int
   3190  1.138     enami uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
   3191  1.138     enami     vm_inherit_t new_inheritance)
   3192   1.10       mrg {
   3193   1.99       chs 	struct vm_map_entry *entry, *temp_entry;
   3194  1.385     skrll 	UVMHIST_FUNC(__func__);
   3195  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
   3196  1.353  pgoyette 	    (uintptr_t)map, start, end, new_inheritance);
   3197   1.10       mrg 
   3198   1.10       mrg 	switch (new_inheritance) {
   3199   1.80       wiz 	case MAP_INHERIT_NONE:
   3200   1.80       wiz 	case MAP_INHERIT_COPY:
   3201   1.80       wiz 	case MAP_INHERIT_SHARE:
   3202  1.330  christos 	case MAP_INHERIT_ZERO:
   3203   1.10       mrg 		break;
   3204   1.10       mrg 	default:
   3205   1.10       mrg 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
   3206   1.94       chs 		return EINVAL;
   3207   1.10       mrg 	}
   3208    1.1       mrg 
   3209   1.10       mrg 	vm_map_lock(map);
   3210   1.10       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   3211   1.10       mrg 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
   3212   1.10       mrg 		entry = temp_entry;
   3213  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   3214   1.10       mrg 	}  else {
   3215   1.10       mrg 		entry = temp_entry->next;
   3216   1.10       mrg 	}
   3217   1.10       mrg 	while ((entry != &map->header) && (entry->start < end)) {
   3218  1.311      para 		UVM_MAP_CLIP_END(map, entry, end);
   3219   1.10       mrg 		entry->inheritance = new_inheritance;
   3220   1.10       mrg 		entry = entry->next;
   3221   1.10       mrg 	}
   3222   1.10       mrg 	vm_map_unlock(map);
   3223   1.10       mrg 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
   3224   1.94       chs 	return 0;
   3225   1.41       mrg }
   3226   1.41       mrg 
   3227   1.98       chs /*
   3228   1.41       mrg  * uvm_map_advice: set advice code for range of addrs in map.
   3229   1.41       mrg  *
   3230   1.41       mrg  * => map must be unlocked
   3231   1.41       mrg  */
   3232   1.41       mrg 
   3233   1.41       mrg int
   3234  1.138     enami uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
   3235   1.41       mrg {
   3236   1.99       chs 	struct vm_map_entry *entry, *temp_entry;
   3237  1.385     skrll 	UVMHIST_FUNC(__func__);
   3238  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
   3239  1.353  pgoyette 	    (uintptr_t)map, start, end, new_advice);
   3240   1.41       mrg 
   3241   1.41       mrg 	vm_map_lock(map);
   3242   1.41       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   3243   1.41       mrg 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
   3244   1.41       mrg 		entry = temp_entry;
   3245  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   3246   1.41       mrg 	} else {
   3247   1.41       mrg 		entry = temp_entry->next;
   3248   1.41       mrg 	}
   3249   1.61   thorpej 
   3250   1.61   thorpej 	/*
   3251   1.61   thorpej 	 * XXXJRT: disallow holes?
   3252   1.61   thorpej 	 */
   3253   1.61   thorpej 
   3254   1.41       mrg 	while ((entry != &map->header) && (entry->start < end)) {
   3255  1.311      para 		UVM_MAP_CLIP_END(map, entry, end);
   3256   1.41       mrg 
   3257   1.41       mrg 		switch (new_advice) {
   3258   1.41       mrg 		case MADV_NORMAL:
   3259   1.41       mrg 		case MADV_RANDOM:
   3260   1.41       mrg 		case MADV_SEQUENTIAL:
   3261   1.41       mrg 			/* nothing special here */
   3262   1.41       mrg 			break;
   3263   1.41       mrg 
   3264   1.41       mrg 		default:
   3265   1.50       mrg 			vm_map_unlock(map);
   3266   1.41       mrg 			UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
   3267   1.94       chs 			return EINVAL;
   3268   1.41       mrg 		}
   3269   1.41       mrg 		entry->advice = new_advice;
   3270   1.41       mrg 		entry = entry->next;
   3271   1.41       mrg 	}
   3272   1.41       mrg 
   3273   1.41       mrg 	vm_map_unlock(map);
   3274   1.41       mrg 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
   3275   1.94       chs 	return 0;
   3276    1.1       mrg }
   3277    1.1       mrg 
   3278    1.1       mrg /*
   3279  1.271      yamt  * uvm_map_willneed: apply MADV_WILLNEED
   3280  1.271      yamt  */
   3281  1.271      yamt 
   3282  1.271      yamt int
   3283  1.271      yamt uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
   3284  1.271      yamt {
   3285  1.271      yamt 	struct vm_map_entry *entry;
   3286  1.385     skrll 	UVMHIST_FUNC(__func__);
   3287  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
   3288  1.353  pgoyette 	    (uintptr_t)map, start, end, 0);
   3289  1.271      yamt 
   3290  1.271      yamt 	vm_map_lock_read(map);
   3291  1.271      yamt 	VM_MAP_RANGE_CHECK(map, start, end);
   3292  1.271      yamt 	if (!uvm_map_lookup_entry(map, start, &entry)) {
   3293  1.271      yamt 		entry = entry->next;
   3294  1.271      yamt 	}
   3295  1.271      yamt 	while (entry->start < end) {
   3296  1.271      yamt 		struct vm_amap * const amap = entry->aref.ar_amap;
   3297  1.271      yamt 		struct uvm_object * const uobj = entry->object.uvm_obj;
   3298  1.271      yamt 
   3299  1.271      yamt 		KASSERT(entry != &map->header);
   3300  1.271      yamt 		KASSERT(start < entry->end);
   3301  1.271      yamt 		/*
   3302  1.296      yamt 		 * For now, we handle only the easy but commonly-requested case.
   3303  1.296      yamt 		 * ie. start prefetching of backing uobj pages.
   3304  1.271      yamt 		 *
   3305  1.296      yamt 		 * XXX It might be useful to pmap_enter() the already-in-core
   3306  1.296      yamt 		 * pages by inventing a "weak" mode for uvm_fault() which would
   3307  1.296      yamt 		 * only do the PGO_LOCKED pgo_get().
   3308  1.271      yamt 		 */
   3309  1.271      yamt 		if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) {
   3310  1.271      yamt 			off_t offset;
   3311  1.271      yamt 			off_t size;
   3312  1.271      yamt 
   3313  1.271      yamt 			offset = entry->offset;
   3314  1.271      yamt 			if (start < entry->start) {
   3315  1.271      yamt 				offset += entry->start - start;
   3316  1.271      yamt 			}
   3317  1.271      yamt 			size = entry->offset + (entry->end - entry->start);
   3318  1.271      yamt 			if (entry->end < end) {
   3319  1.271      yamt 				size -= end - entry->end;
   3320  1.271      yamt 			}
   3321  1.271      yamt 			uvm_readahead(uobj, offset, size);
   3322  1.271      yamt 		}
   3323  1.271      yamt 		entry = entry->next;
   3324  1.271      yamt 	}
   3325  1.271      yamt 	vm_map_unlock_read(map);
   3326  1.271      yamt 	UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
   3327  1.271      yamt 	return 0;
   3328  1.271      yamt }
   3329  1.271      yamt 
   3330  1.271      yamt /*
   3331    1.1       mrg  * uvm_map_pageable: sets the pageability of a range in a map.
   3332    1.1       mrg  *
   3333   1.56   thorpej  * => wires map entries.  should not be used for transient page locking.
   3334   1.56   thorpej  *	for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
   3335  1.216  drochner  * => regions specified as not pageable require lock-down (wired) memory
   3336    1.1       mrg  *	and page tables.
   3337   1.59   thorpej  * => map must never be read-locked
   3338  1.234   thorpej  * => if islocked is true, map is already write-locked
   3339   1.59   thorpej  * => we always unlock the map, since we must downgrade to a read-lock
   3340   1.59   thorpej  *	to call uvm_fault_wire()
   3341    1.1       mrg  * => XXXCDC: check this and try and clean it up.
   3342    1.1       mrg  */
   3343    1.1       mrg 
   3344   1.19    kleink int
   3345  1.138     enami uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
   3346  1.233   thorpej     bool new_pageable, int lockflags)
   3347    1.1       mrg {
   3348   1.99       chs 	struct vm_map_entry *entry, *start_entry, *failed_entry;
   3349   1.10       mrg 	int rv;
   3350   1.60   thorpej #ifdef DIAGNOSTIC
   3351   1.60   thorpej 	u_int timestamp_save;
   3352   1.60   thorpej #endif
   3353  1.385     skrll 	UVMHIST_FUNC(__func__);
   3354  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
   3355  1.353  pgoyette 	    (uintptr_t)map, start, end, new_pageable);
   3356   1.85       chs 	KASSERT(map->flags & VM_MAP_PAGEABLE);
   3357   1.45   thorpej 
   3358   1.64   thorpej 	if ((lockflags & UVM_LK_ENTER) == 0)
   3359   1.59   thorpej 		vm_map_lock(map);
   3360   1.10       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   3361   1.10       mrg 
   3362   1.98       chs 	/*
   3363   1.10       mrg 	 * only one pageability change may take place at one time, since
   3364   1.10       mrg 	 * uvm_fault_wire assumes it will be called only once for each
   3365   1.10       mrg 	 * wiring/unwiring.  therefore, we have to make sure we're actually
   3366   1.10       mrg 	 * changing the pageability for the entire region.  we do so before
   3367   1.98       chs 	 * making any changes.
   3368   1.10       mrg 	 */
   3369   1.10       mrg 
   3370  1.234   thorpej 	if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
   3371   1.64   thorpej 		if ((lockflags & UVM_LK_EXIT) == 0)
   3372   1.64   thorpej 			vm_map_unlock(map);
   3373   1.85       chs 
   3374   1.94       chs 		UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
   3375   1.94       chs 		return EFAULT;
   3376   1.10       mrg 	}
   3377   1.10       mrg 	entry = start_entry;
   3378   1.10       mrg 
   3379  1.359       kre 	if (start == end) {		/* nothing required */
   3380  1.359       kre 		if ((lockflags & UVM_LK_EXIT) == 0)
   3381  1.359       kre 			vm_map_unlock(map);
   3382  1.359       kre 
   3383  1.359       kre 		UVMHIST_LOG(maphist,"<- done (nothing)",0,0,0,0);
   3384  1.359       kre 		return 0;
   3385  1.359       kre 	}
   3386  1.359       kre 
   3387   1.98       chs 	/*
   3388  1.100       wiz 	 * handle wiring and unwiring separately.
   3389   1.10       mrg 	 */
   3390    1.1       mrg 
   3391   1.56   thorpej 	if (new_pageable) {		/* unwire */
   3392  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   3393   1.85       chs 
   3394   1.10       mrg 		/*
   3395   1.10       mrg 		 * unwiring.  first ensure that the range to be unwired is
   3396   1.98       chs 		 * really wired down and that there are no holes.
   3397   1.10       mrg 		 */
   3398   1.85       chs 
   3399   1.10       mrg 		while ((entry != &map->header) && (entry->start < end)) {
   3400   1.10       mrg 			if (entry->wired_count == 0 ||
   3401   1.10       mrg 			    (entry->end < end &&
   3402   1.55   thorpej 			     (entry->next == &map->header ||
   3403   1.55   thorpej 			      entry->next->start > entry->end))) {
   3404   1.64   thorpej 				if ((lockflags & UVM_LK_EXIT) == 0)
   3405   1.64   thorpej 					vm_map_unlock(map);
   3406   1.94       chs 				UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
   3407   1.94       chs 				return EINVAL;
   3408   1.10       mrg 			}
   3409   1.10       mrg 			entry = entry->next;
   3410   1.10       mrg 		}
   3411   1.10       mrg 
   3412   1.98       chs 		/*
   3413   1.56   thorpej 		 * POSIX 1003.1b - a single munlock call unlocks a region,
   3414   1.56   thorpej 		 * regardless of the number of mlock calls made on that
   3415   1.56   thorpej 		 * region.
   3416   1.10       mrg 		 */
   3417   1.85       chs 
   3418   1.10       mrg 		entry = start_entry;
   3419   1.10       mrg 		while ((entry != &map->header) && (entry->start < end)) {
   3420  1.311      para 			UVM_MAP_CLIP_END(map, entry, end);
   3421   1.56   thorpej 			if (VM_MAPENT_ISWIRED(entry))
   3422   1.10       mrg 				uvm_map_entry_unwire(map, entry);
   3423   1.10       mrg 			entry = entry->next;
   3424   1.10       mrg 		}
   3425   1.64   thorpej 		if ((lockflags & UVM_LK_EXIT) == 0)
   3426   1.64   thorpej 			vm_map_unlock(map);
   3427   1.10       mrg 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
   3428   1.94       chs 		return 0;
   3429   1.10       mrg 	}
   3430   1.10       mrg 
   3431   1.10       mrg 	/*
   3432   1.10       mrg 	 * wire case: in two passes [XXXCDC: ugly block of code here]
   3433   1.10       mrg 	 *
   3434   1.10       mrg 	 * 1: holding the write lock, we create any anonymous maps that need
   3435   1.10       mrg 	 *    to be created.  then we clip each map entry to the region to
   3436   1.98       chs 	 *    be wired and increment its wiring count.
   3437   1.10       mrg 	 *
   3438   1.10       mrg 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
   3439   1.56   thorpej 	 *    in the pages for any newly wired area (wired_count == 1).
   3440   1.10       mrg 	 *
   3441   1.10       mrg 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
   3442   1.10       mrg 	 *    deadlock with another thread that may have faulted on one of
   3443   1.10       mrg 	 *    the pages to be wired (it would mark the page busy, blocking
   3444   1.10       mrg 	 *    us, then in turn block on the map lock that we hold).  because
   3445   1.10       mrg 	 *    of problems in the recursive lock package, we cannot upgrade
   3446   1.10       mrg 	 *    to a write lock in vm_map_lookup.  thus, any actions that
   3447   1.10       mrg 	 *    require the write lock must be done beforehand.  because we
   3448   1.10       mrg 	 *    keep the read lock on the map, the copy-on-write status of the
   3449   1.10       mrg 	 *    entries we modify here cannot change.
   3450   1.10       mrg 	 */
   3451   1.10       mrg 
   3452   1.10       mrg 	while ((entry != &map->header) && (entry->start < end)) {
   3453   1.55   thorpej 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
   3454   1.85       chs 
   3455   1.85       chs 			/*
   3456   1.10       mrg 			 * perform actions of vm_map_lookup that need the
   3457   1.10       mrg 			 * write lock on the map: create an anonymous map
   3458   1.10       mrg 			 * for a copy-on-write region, or an anonymous map
   3459   1.29     chuck 			 * for a zero-fill region.  (XXXCDC: submap case
   3460   1.29     chuck 			 * ok?)
   3461   1.10       mrg 			 */
   3462   1.85       chs 
   3463   1.29     chuck 			if (!UVM_ET_ISSUBMAP(entry)) {  /* not submap */
   3464   1.98       chs 				if (UVM_ET_ISNEEDSCOPY(entry) &&
   3465  1.117       chs 				    ((entry->max_protection & VM_PROT_WRITE) ||
   3466   1.54   thorpej 				     (entry->object.uvm_obj == NULL))) {
   3467  1.212      yamt 					amap_copy(map, entry, 0, start, end);
   3468   1.10       mrg 					/* XXXCDC: wait OK? */
   3469   1.10       mrg 				}
   3470   1.10       mrg 			}
   3471   1.55   thorpej 		}
   3472  1.311      para 		UVM_MAP_CLIP_START(map, entry, start);
   3473  1.311      para 		UVM_MAP_CLIP_END(map, entry, end);
   3474   1.10       mrg 		entry->wired_count++;
   3475   1.10       mrg 
   3476   1.10       mrg 		/*
   3477   1.98       chs 		 * Check for holes
   3478   1.10       mrg 		 */
   3479   1.85       chs 
   3480   1.54   thorpej 		if (entry->protection == VM_PROT_NONE ||
   3481   1.54   thorpej 		    (entry->end < end &&
   3482   1.54   thorpej 		     (entry->next == &map->header ||
   3483   1.54   thorpej 		      entry->next->start > entry->end))) {
   3484   1.85       chs 
   3485   1.10       mrg 			/*
   3486   1.10       mrg 			 * found one.  amap creation actions do not need to
   3487   1.98       chs 			 * be undone, but the wired counts need to be restored.
   3488   1.10       mrg 			 */
   3489   1.85       chs 
   3490   1.10       mrg 			while (entry != &map->header && entry->end > start) {
   3491   1.10       mrg 				entry->wired_count--;
   3492   1.10       mrg 				entry = entry->prev;
   3493   1.10       mrg 			}
   3494   1.64   thorpej 			if ((lockflags & UVM_LK_EXIT) == 0)
   3495   1.64   thorpej 				vm_map_unlock(map);
   3496   1.10       mrg 			UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
   3497   1.94       chs 			return EINVAL;
   3498   1.10       mrg 		}
   3499   1.10       mrg 		entry = entry->next;
   3500   1.10       mrg 	}
   3501   1.10       mrg 
   3502   1.10       mrg 	/*
   3503   1.10       mrg 	 * Pass 2.
   3504   1.10       mrg 	 */
   3505   1.51   thorpej 
   3506   1.60   thorpej #ifdef DIAGNOSTIC
   3507   1.60   thorpej 	timestamp_save = map->timestamp;
   3508   1.60   thorpej #endif
   3509   1.60   thorpej 	vm_map_busy(map);
   3510  1.249      yamt 	vm_map_unlock(map);
   3511   1.10       mrg 
   3512   1.10       mrg 	rv = 0;
   3513   1.10       mrg 	entry = start_entry;
   3514   1.10       mrg 	while (entry != &map->header && entry->start < end) {
   3515   1.51   thorpej 		if (entry->wired_count == 1) {
   3516   1.44   thorpej 			rv = uvm_fault_wire(map, entry->start, entry->end,
   3517  1.216  drochner 			    entry->max_protection, 1);
   3518   1.10       mrg 			if (rv) {
   3519   1.94       chs 
   3520   1.51   thorpej 				/*
   3521   1.51   thorpej 				 * wiring failed.  break out of the loop.
   3522   1.51   thorpej 				 * we'll clean up the map below, once we
   3523   1.51   thorpej 				 * have a write lock again.
   3524   1.51   thorpej 				 */
   3525   1.94       chs 
   3526   1.51   thorpej 				break;
   3527   1.10       mrg 			}
   3528   1.10       mrg 		}
   3529   1.10       mrg 		entry = entry->next;
   3530   1.10       mrg 	}
   3531   1.10       mrg 
   3532  1.139     enami 	if (rv) {	/* failed? */
   3533   1.85       chs 
   3534   1.52   thorpej 		/*
   3535   1.52   thorpej 		 * Get back to an exclusive (write) lock.
   3536   1.52   thorpej 		 */
   3537   1.85       chs 
   3538  1.249      yamt 		vm_map_lock(map);
   3539   1.60   thorpej 		vm_map_unbusy(map);
   3540   1.60   thorpej 
   3541   1.60   thorpej #ifdef DIAGNOSTIC
   3542  1.252      yamt 		if (timestamp_save + 1 != map->timestamp)
   3543   1.60   thorpej 			panic("uvm_map_pageable: stale map");
   3544   1.60   thorpej #endif
   3545   1.10       mrg 
   3546   1.51   thorpej 		/*
   3547   1.51   thorpej 		 * first drop the wiring count on all the entries
   3548   1.51   thorpej 		 * which haven't actually been wired yet.
   3549   1.51   thorpej 		 */
   3550   1.85       chs 
   3551   1.54   thorpej 		failed_entry = entry;
   3552   1.54   thorpej 		while (entry != &map->header && entry->start < end) {
   3553   1.51   thorpej 			entry->wired_count--;
   3554   1.54   thorpej 			entry = entry->next;
   3555   1.54   thorpej 		}
   3556   1.51   thorpej 
   3557   1.51   thorpej 		/*
   3558   1.54   thorpej 		 * now, unwire all the entries that were successfully
   3559   1.54   thorpej 		 * wired above.
   3560   1.51   thorpej 		 */
   3561   1.85       chs 
   3562   1.54   thorpej 		entry = start_entry;
   3563   1.54   thorpej 		while (entry != failed_entry) {
   3564   1.54   thorpej 			entry->wired_count--;
   3565   1.55   thorpej 			if (VM_MAPENT_ISWIRED(entry) == 0)
   3566   1.54   thorpej 				uvm_map_entry_unwire(map, entry);
   3567   1.54   thorpej 			entry = entry->next;
   3568   1.54   thorpej 		}
   3569   1.64   thorpej 		if ((lockflags & UVM_LK_EXIT) == 0)
   3570   1.64   thorpej 			vm_map_unlock(map);
   3571  1.353  pgoyette 		UVMHIST_LOG(maphist, "<- done (RV=%jd)", rv,0,0,0);
   3572  1.139     enami 		return (rv);
   3573   1.10       mrg 	}
   3574   1.51   thorpej 
   3575   1.64   thorpej 	if ((lockflags & UVM_LK_EXIT) == 0) {
   3576   1.64   thorpej 		vm_map_unbusy(map);
   3577   1.64   thorpej 	} else {
   3578   1.85       chs 
   3579   1.64   thorpej 		/*
   3580   1.64   thorpej 		 * Get back to an exclusive (write) lock.
   3581   1.64   thorpej 		 */
   3582   1.85       chs 
   3583  1.249      yamt 		vm_map_lock(map);
   3584   1.64   thorpej 		vm_map_unbusy(map);
   3585   1.64   thorpej 	}
   3586   1.64   thorpej 
   3587   1.10       mrg 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
   3588   1.94       chs 	return 0;
   3589    1.1       mrg }
   3590    1.1       mrg 
   3591    1.1       mrg /*
   3592   1.54   thorpej  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
   3593   1.54   thorpej  * all mapped regions.
   3594   1.54   thorpej  *
   3595   1.54   thorpej  * => map must not be locked.
   3596   1.54   thorpej  * => if no flags are specified, all regions are unwired.
   3597   1.54   thorpej  * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
   3598   1.54   thorpej  */
   3599   1.54   thorpej 
   3600   1.54   thorpej int
   3601  1.138     enami uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
   3602   1.54   thorpej {
   3603   1.99       chs 	struct vm_map_entry *entry, *failed_entry;
   3604   1.54   thorpej 	vsize_t size;
   3605   1.54   thorpej 	int rv;
   3606   1.60   thorpej #ifdef DIAGNOSTIC
   3607   1.60   thorpej 	u_int timestamp_save;
   3608   1.60   thorpej #endif
   3609  1.385     skrll 	UVMHIST_FUNC(__func__);
   3610  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
   3611  1.353  pgoyette 	    0, 0);
   3612   1.54   thorpej 
   3613   1.85       chs 	KASSERT(map->flags & VM_MAP_PAGEABLE);
   3614   1.54   thorpej 
   3615   1.54   thorpej 	vm_map_lock(map);
   3616   1.54   thorpej 
   3617   1.54   thorpej 	/*
   3618   1.54   thorpej 	 * handle wiring and unwiring separately.
   3619   1.54   thorpej 	 */
   3620   1.54   thorpej 
   3621   1.54   thorpej 	if (flags == 0) {			/* unwire */
   3622   1.99       chs 
   3623   1.54   thorpej 		/*
   3624   1.56   thorpej 		 * POSIX 1003.1b -- munlockall unlocks all regions,
   3625   1.56   thorpej 		 * regardless of how many times mlockall has been called.
   3626   1.54   thorpej 		 */
   3627   1.99       chs 
   3628   1.54   thorpej 		for (entry = map->header.next; entry != &map->header;
   3629   1.54   thorpej 		     entry = entry->next) {
   3630   1.56   thorpej 			if (VM_MAPENT_ISWIRED(entry))
   3631   1.56   thorpej 				uvm_map_entry_unwire(map, entry);
   3632   1.54   thorpej 		}
   3633  1.238        ad 		map->flags &= ~VM_MAP_WIREFUTURE;
   3634   1.54   thorpej 		vm_map_unlock(map);
   3635   1.54   thorpej 		UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
   3636   1.94       chs 		return 0;
   3637   1.54   thorpej 	}
   3638   1.54   thorpej 
   3639   1.54   thorpej 	if (flags & MCL_FUTURE) {
   3640   1.99       chs 
   3641   1.54   thorpej 		/*
   3642   1.54   thorpej 		 * must wire all future mappings; remember this.
   3643   1.54   thorpej 		 */
   3644   1.99       chs 
   3645  1.238        ad 		map->flags |= VM_MAP_WIREFUTURE;
   3646   1.54   thorpej 	}
   3647   1.54   thorpej 
   3648   1.54   thorpej 	if ((flags & MCL_CURRENT) == 0) {
   3649   1.99       chs 
   3650   1.54   thorpej 		/*
   3651   1.54   thorpej 		 * no more work to do!
   3652   1.54   thorpej 		 */
   3653   1.99       chs 
   3654   1.54   thorpej 		UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
   3655   1.54   thorpej 		vm_map_unlock(map);
   3656   1.94       chs 		return 0;
   3657   1.54   thorpej 	}
   3658   1.54   thorpej 
   3659   1.54   thorpej 	/*
   3660   1.54   thorpej 	 * wire case: in three passes [XXXCDC: ugly block of code here]
   3661   1.54   thorpej 	 *
   3662   1.54   thorpej 	 * 1: holding the write lock, count all pages mapped by non-wired
   3663   1.54   thorpej 	 *    entries.  if this would cause us to go over our limit, we fail.
   3664   1.54   thorpej 	 *
   3665   1.54   thorpej 	 * 2: still holding the write lock, we create any anonymous maps that
   3666   1.54   thorpej 	 *    need to be created.  then we increment its wiring count.
   3667   1.54   thorpej 	 *
   3668   1.54   thorpej 	 * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
   3669   1.56   thorpej 	 *    in the pages for any newly wired area (wired_count == 1).
   3670   1.54   thorpej 	 *
   3671   1.54   thorpej 	 *    downgrading to a read lock for uvm_fault_wire avoids a possible
   3672   1.54   thorpej 	 *    deadlock with another thread that may have faulted on one of
   3673   1.54   thorpej 	 *    the pages to be wired (it would mark the page busy, blocking
   3674   1.54   thorpej 	 *    us, then in turn block on the map lock that we hold).  because
   3675   1.54   thorpej 	 *    of problems in the recursive lock package, we cannot upgrade
   3676   1.54   thorpej 	 *    to a write lock in vm_map_lookup.  thus, any actions that
   3677   1.54   thorpej 	 *    require the write lock must be done beforehand.  because we
   3678   1.54   thorpej 	 *    keep the read lock on the map, the copy-on-write status of the
   3679   1.54   thorpej 	 *    entries we modify here cannot change.
   3680   1.54   thorpej 	 */
   3681   1.54   thorpej 
   3682   1.54   thorpej 	for (size = 0, entry = map->header.next; entry != &map->header;
   3683   1.54   thorpej 	     entry = entry->next) {
   3684   1.54   thorpej 		if (entry->protection != VM_PROT_NONE &&
   3685   1.55   thorpej 		    VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
   3686   1.54   thorpej 			size += entry->end - entry->start;
   3687   1.54   thorpej 		}
   3688   1.54   thorpej 	}
   3689   1.54   thorpej 
   3690   1.54   thorpej 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
   3691   1.54   thorpej 		vm_map_unlock(map);
   3692   1.94       chs 		return ENOMEM;
   3693   1.54   thorpej 	}
   3694   1.54   thorpej 
   3695   1.54   thorpej 	if (limit != 0 &&
   3696   1.54   thorpej 	    (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
   3697   1.54   thorpej 		vm_map_unlock(map);
   3698   1.94       chs 		return ENOMEM;
   3699   1.54   thorpej 	}
   3700   1.54   thorpej 
   3701   1.54   thorpej 	/*
   3702   1.54   thorpej 	 * Pass 2.
   3703   1.54   thorpej 	 */
   3704   1.54   thorpej 
   3705   1.54   thorpej 	for (entry = map->header.next; entry != &map->header;
   3706   1.54   thorpej 	     entry = entry->next) {
   3707   1.54   thorpej 		if (entry->protection == VM_PROT_NONE)
   3708   1.54   thorpej 			continue;
   3709   1.55   thorpej 		if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
   3710   1.99       chs 
   3711   1.54   thorpej 			/*
   3712   1.54   thorpej 			 * perform actions of vm_map_lookup that need the
   3713   1.54   thorpej 			 * write lock on the map: create an anonymous map
   3714   1.54   thorpej 			 * for a copy-on-write region, or an anonymous map
   3715   1.54   thorpej 			 * for a zero-fill region.  (XXXCDC: submap case
   3716   1.54   thorpej 			 * ok?)
   3717   1.54   thorpej 			 */
   3718   1.99       chs 
   3719   1.54   thorpej 			if (!UVM_ET_ISSUBMAP(entry)) {	/* not submap */
   3720   1.98       chs 				if (UVM_ET_ISNEEDSCOPY(entry) &&
   3721  1.117       chs 				    ((entry->max_protection & VM_PROT_WRITE) ||
   3722   1.54   thorpej 				     (entry->object.uvm_obj == NULL))) {
   3723  1.212      yamt 					amap_copy(map, entry, 0, entry->start,
   3724  1.212      yamt 					    entry->end);
   3725   1.54   thorpej 					/* XXXCDC: wait OK? */
   3726   1.54   thorpej 				}
   3727   1.54   thorpej 			}
   3728   1.55   thorpej 		}
   3729   1.54   thorpej 		entry->wired_count++;
   3730   1.54   thorpej 	}
   3731   1.54   thorpej 
   3732   1.54   thorpej 	/*
   3733   1.54   thorpej 	 * Pass 3.
   3734   1.54   thorpej 	 */
   3735   1.54   thorpej 
   3736   1.60   thorpej #ifdef DIAGNOSTIC
   3737   1.60   thorpej 	timestamp_save = map->timestamp;
   3738   1.60   thorpej #endif
   3739   1.60   thorpej 	vm_map_busy(map);
   3740  1.249      yamt 	vm_map_unlock(map);
   3741   1.54   thorpej 
   3742   1.94       chs 	rv = 0;
   3743   1.54   thorpej 	for (entry = map->header.next; entry != &map->header;
   3744   1.54   thorpej 	     entry = entry->next) {
   3745   1.54   thorpej 		if (entry->wired_count == 1) {
   3746   1.54   thorpej 			rv = uvm_fault_wire(map, entry->start, entry->end,
   3747  1.216  drochner 			    entry->max_protection, 1);
   3748   1.54   thorpej 			if (rv) {
   3749   1.99       chs 
   3750   1.54   thorpej 				/*
   3751   1.54   thorpej 				 * wiring failed.  break out of the loop.
   3752   1.54   thorpej 				 * we'll clean up the map below, once we
   3753   1.54   thorpej 				 * have a write lock again.
   3754   1.54   thorpej 				 */
   3755   1.99       chs 
   3756   1.54   thorpej 				break;
   3757   1.54   thorpej 			}
   3758   1.54   thorpej 		}
   3759   1.54   thorpej 	}
   3760   1.54   thorpej 
   3761   1.99       chs 	if (rv) {
   3762   1.99       chs 
   3763   1.54   thorpej 		/*
   3764   1.54   thorpej 		 * Get back an exclusive (write) lock.
   3765   1.54   thorpej 		 */
   3766   1.99       chs 
   3767  1.249      yamt 		vm_map_lock(map);
   3768   1.60   thorpej 		vm_map_unbusy(map);
   3769   1.60   thorpej 
   3770   1.60   thorpej #ifdef DIAGNOSTIC
   3771  1.252      yamt 		if (timestamp_save + 1 != map->timestamp)
   3772   1.60   thorpej 			panic("uvm_map_pageable_all: stale map");
   3773   1.60   thorpej #endif
   3774   1.54   thorpej 
   3775   1.54   thorpej 		/*
   3776   1.54   thorpej 		 * first drop the wiring count on all the entries
   3777   1.54   thorpej 		 * which haven't actually been wired yet.
   3778   1.67   thorpej 		 *
   3779   1.67   thorpej 		 * Skip VM_PROT_NONE entries like we did above.
   3780   1.54   thorpej 		 */
   3781   1.99       chs 
   3782   1.54   thorpej 		failed_entry = entry;
   3783   1.54   thorpej 		for (/* nothing */; entry != &map->header;
   3784   1.67   thorpej 		     entry = entry->next) {
   3785   1.67   thorpej 			if (entry->protection == VM_PROT_NONE)
   3786   1.67   thorpej 				continue;
   3787   1.54   thorpej 			entry->wired_count--;
   3788   1.67   thorpej 		}
   3789   1.54   thorpej 
   3790   1.54   thorpej 		/*
   3791   1.54   thorpej 		 * now, unwire all the entries that were successfully
   3792   1.54   thorpej 		 * wired above.
   3793   1.67   thorpej 		 *
   3794   1.67   thorpej 		 * Skip VM_PROT_NONE entries like we did above.
   3795   1.54   thorpej 		 */
   3796   1.99       chs 
   3797   1.54   thorpej 		for (entry = map->header.next; entry != failed_entry;
   3798   1.54   thorpej 		     entry = entry->next) {
   3799   1.67   thorpej 			if (entry->protection == VM_PROT_NONE)
   3800   1.67   thorpej 				continue;
   3801   1.54   thorpej 			entry->wired_count--;
   3802   1.67   thorpej 			if (VM_MAPENT_ISWIRED(entry))
   3803   1.54   thorpej 				uvm_map_entry_unwire(map, entry);
   3804   1.54   thorpej 		}
   3805   1.54   thorpej 		vm_map_unlock(map);
   3806  1.353  pgoyette 		UVMHIST_LOG(maphist,"<- done (RV=%jd)", rv,0,0,0);
   3807   1.54   thorpej 		return (rv);
   3808   1.54   thorpej 	}
   3809   1.54   thorpej 
   3810   1.60   thorpej 	vm_map_unbusy(map);
   3811   1.54   thorpej 
   3812   1.54   thorpej 	UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
   3813   1.94       chs 	return 0;
   3814   1.54   thorpej }
   3815   1.54   thorpej 
   3816   1.54   thorpej /*
   3817   1.61   thorpej  * uvm_map_clean: clean out a map range
   3818    1.1       mrg  *
   3819    1.1       mrg  * => valid flags:
   3820   1.61   thorpej  *   if (flags & PGO_CLEANIT): dirty pages are cleaned first
   3821    1.1       mrg  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
   3822    1.1       mrg  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
   3823    1.1       mrg  *   if (flags & PGO_FREE): any cached pages are freed after clean
   3824    1.1       mrg  * => returns an error if any part of the specified range isn't mapped
   3825   1.98       chs  * => never a need to flush amap layer since the anonymous memory has
   3826   1.61   thorpej  *	no permanent home, but may deactivate pages there
   3827   1.61   thorpej  * => called from sys_msync() and sys_madvise()
   3828    1.1       mrg  * => caller must not write-lock map (read OK).
   3829    1.1       mrg  * => we may sleep while cleaning if SYNCIO [with map read-locked]
   3830    1.1       mrg  */
   3831    1.1       mrg 
   3832   1.10       mrg int
   3833  1.138     enami uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
   3834   1.10       mrg {
   3835   1.99       chs 	struct vm_map_entry *current, *entry;
   3836   1.61   thorpej 	struct uvm_object *uobj;
   3837   1.61   thorpej 	struct vm_amap *amap;
   3838  1.375        ad 	struct vm_anon *anon;
   3839   1.61   thorpej 	struct vm_page *pg;
   3840   1.61   thorpej 	vaddr_t offset;
   3841   1.24       eeh 	vsize_t size;
   3842  1.188       dbj 	voff_t uoff;
   3843  1.106       chs 	int error, refs;
   3844  1.385     skrll 	UVMHIST_FUNC(__func__);
   3845  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
   3846  1.385     skrll 	    (uintptr_t)map, start, end, flags);
   3847   1.85       chs 
   3848   1.85       chs 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
   3849   1.85       chs 		(PGO_FREE|PGO_DEACTIVATE));
   3850   1.61   thorpej 
   3851   1.10       mrg 	vm_map_lock_read(map);
   3852   1.10       mrg 	VM_MAP_RANGE_CHECK(map, start, end);
   3853  1.234   thorpej 	if (uvm_map_lookup_entry(map, start, &entry) == false) {
   3854   1.10       mrg 		vm_map_unlock_read(map);
   3855   1.94       chs 		return EFAULT;
   3856   1.10       mrg 	}
   3857   1.10       mrg 
   3858   1.10       mrg 	/*
   3859  1.186       chs 	 * Make a first pass to check for holes and wiring problems.
   3860   1.10       mrg 	 */
   3861   1.85       chs 
   3862   1.10       mrg 	for (current = entry; current->start < end; current = current->next) {
   3863   1.10       mrg 		if (UVM_ET_ISSUBMAP(current)) {
   3864   1.10       mrg 			vm_map_unlock_read(map);
   3865   1.94       chs 			return EINVAL;
   3866   1.10       mrg 		}
   3867  1.186       chs 		if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
   3868  1.186       chs 			vm_map_unlock_read(map);
   3869  1.186       chs 			return EBUSY;
   3870  1.186       chs 		}
   3871   1.90       chs 		if (end <= current->end) {
   3872   1.90       chs 			break;
   3873   1.90       chs 		}
   3874   1.90       chs 		if (current->end != current->next->start) {
   3875   1.10       mrg 			vm_map_unlock_read(map);
   3876   1.94       chs 			return EFAULT;
   3877   1.10       mrg 		}
   3878   1.10       mrg 	}
   3879   1.10       mrg 
   3880   1.94       chs 	error = 0;
   3881   1.90       chs 	for (current = entry; start < end; current = current->next) {
   3882  1.283  uebayasi 		amap = current->aref.ar_amap;	/* upper layer */
   3883  1.283  uebayasi 		uobj = current->object.uvm_obj;	/* lower layer */
   3884   1.85       chs 		KASSERT(start >= current->start);
   3885    1.1       mrg 
   3886   1.10       mrg 		/*
   3887   1.61   thorpej 		 * No amap cleaning necessary if:
   3888   1.61   thorpej 		 *
   3889   1.61   thorpej 		 *	(1) There's no amap.
   3890   1.61   thorpej 		 *
   3891   1.61   thorpej 		 *	(2) We're not deactivating or freeing pages.
   3892   1.10       mrg 		 */
   3893   1.85       chs 
   3894   1.90       chs 		if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
   3895   1.61   thorpej 			goto flush_object;
   3896   1.61   thorpej 
   3897   1.61   thorpej 		offset = start - current->start;
   3898   1.90       chs 		size = MIN(end, current->end) - start;
   3899  1.303     rmind 
   3900  1.372        ad 		amap_lock(amap, RW_WRITER);
   3901   1.90       chs 		for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
   3902   1.61   thorpej 			anon = amap_lookup(&current->aref, offset);
   3903   1.61   thorpej 			if (anon == NULL)
   3904   1.61   thorpej 				continue;
   3905   1.61   thorpej 
   3906  1.298     rmind 			KASSERT(anon->an_lock == amap->am_lock);
   3907  1.192      yamt 			pg = anon->an_page;
   3908   1.63   thorpej 			if (pg == NULL) {
   3909   1.63   thorpej 				continue;
   3910   1.63   thorpej 			}
   3911  1.332       chs 			if (pg->flags & PG_BUSY) {
   3912  1.332       chs 				continue;
   3913  1.332       chs 			}
   3914   1.63   thorpej 
   3915   1.61   thorpej 			switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
   3916   1.85       chs 
   3917   1.61   thorpej 			/*
   3918  1.115       chs 			 * In these first 3 cases, we just deactivate the page.
   3919   1.61   thorpej 			 */
   3920   1.85       chs 
   3921   1.61   thorpej 			case PGO_CLEANIT|PGO_FREE:
   3922   1.61   thorpej 			case PGO_CLEANIT|PGO_DEACTIVATE:
   3923   1.61   thorpej 			case PGO_DEACTIVATE:
   3924   1.68   thorpej  deactivate_it:
   3925   1.61   thorpej 				/*
   3926  1.115       chs 				 * skip the page if it's loaned or wired,
   3927  1.115       chs 				 * since it shouldn't be on a paging queue
   3928  1.115       chs 				 * at all in these cases.
   3929   1.61   thorpej 				 */
   3930   1.85       chs 
   3931  1.115       chs 				if (pg->loan_count != 0 ||
   3932  1.115       chs 				    pg->wire_count != 0) {
   3933   1.61   thorpej 					continue;
   3934   1.61   thorpej 				}
   3935   1.85       chs 				KASSERT(pg->uanon == anon);
   3936  1.369        ad 				uvm_pagelock(pg);
   3937   1.61   thorpej 				uvm_pagedeactivate(pg);
   3938  1.369        ad 				uvm_pageunlock(pg);
   3939   1.61   thorpej 				continue;
   3940   1.61   thorpej 
   3941   1.61   thorpej 			case PGO_FREE:
   3942   1.85       chs 
   3943   1.68   thorpej 				/*
   3944   1.68   thorpej 				 * If there are multiple references to
   3945   1.68   thorpej 				 * the amap, just deactivate the page.
   3946   1.68   thorpej 				 */
   3947   1.85       chs 
   3948   1.68   thorpej 				if (amap_refs(amap) > 1)
   3949   1.68   thorpej 					goto deactivate_it;
   3950   1.68   thorpej 
   3951  1.115       chs 				/* skip the page if it's wired */
   3952   1.62   thorpej 				if (pg->wire_count != 0) {
   3953   1.62   thorpej 					continue;
   3954   1.62   thorpej 				}
   3955   1.66   thorpej 				amap_unadd(&current->aref, offset);
   3956   1.61   thorpej 				refs = --anon->an_ref;
   3957  1.298     rmind 				if (refs == 0) {
   3958  1.375        ad 					uvm_anfree(anon);
   3959  1.298     rmind 				}
   3960   1.61   thorpej 				continue;
   3961   1.61   thorpej 			}
   3962   1.61   thorpej 		}
   3963  1.375        ad 		amap_unlock(amap);
   3964    1.1       mrg 
   3965   1.61   thorpej  flush_object:
   3966   1.10       mrg 		/*
   3967   1.33     chuck 		 * flush pages if we've got a valid backing object.
   3968  1.116       chs 		 * note that we must always clean object pages before
   3969  1.116       chs 		 * freeing them since otherwise we could reveal stale
   3970  1.116       chs 		 * data from files.
   3971   1.10       mrg 		 */
   3972    1.1       mrg 
   3973  1.188       dbj 		uoff = current->offset + (start - current->start);
   3974   1.90       chs 		size = MIN(end, current->end) - start;
   3975   1.61   thorpej 		if (uobj != NULL) {
   3976  1.372        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
   3977  1.136   thorpej 			if (uobj->pgops->pgo_put != NULL)
   3978  1.188       dbj 				error = (uobj->pgops->pgo_put)(uobj, uoff,
   3979  1.188       dbj 				    uoff + size, flags | PGO_CLEANIT);
   3980  1.136   thorpej 			else
   3981  1.136   thorpej 				error = 0;
   3982   1.10       mrg 		}
   3983   1.10       mrg 		start += size;
   3984   1.10       mrg 	}
   3985    1.1       mrg 	vm_map_unlock_read(map);
   3986   1.98       chs 	return (error);
   3987    1.1       mrg }
   3988    1.1       mrg 
   3989    1.1       mrg 
   3990    1.1       mrg /*
   3991    1.1       mrg  * uvm_map_checkprot: check protection in map
   3992    1.1       mrg  *
   3993    1.1       mrg  * => must allow specified protection in a fully allocated region.
   3994    1.1       mrg  * => map must be read or write locked by caller.
   3995    1.1       mrg  */
   3996    1.1       mrg 
   3997  1.233   thorpej bool
   3998  1.138     enami uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
   3999  1.138     enami     vm_prot_t protection)
   4000   1.10       mrg {
   4001   1.99       chs 	struct vm_map_entry *entry;
   4002   1.99       chs 	struct vm_map_entry *tmp_entry;
   4003   1.10       mrg 
   4004   1.94       chs 	if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
   4005  1.234   thorpej 		return (false);
   4006   1.94       chs 	}
   4007   1.94       chs 	entry = tmp_entry;
   4008   1.94       chs 	while (start < end) {
   4009   1.94       chs 		if (entry == &map->header) {
   4010  1.234   thorpej 			return (false);
   4011   1.94       chs 		}
   4012   1.85       chs 
   4013   1.10       mrg 		/*
   4014   1.10       mrg 		 * no holes allowed
   4015   1.10       mrg 		 */
   4016   1.10       mrg 
   4017   1.94       chs 		if (start < entry->start) {
   4018  1.234   thorpej 			return (false);
   4019   1.94       chs 		}
   4020   1.10       mrg 
   4021   1.10       mrg 		/*
   4022   1.10       mrg 		 * check protection associated with entry
   4023   1.10       mrg 		 */
   4024    1.1       mrg 
   4025   1.94       chs 		if ((entry->protection & protection) != protection) {
   4026  1.234   thorpej 			return (false);
   4027   1.94       chs 		}
   4028   1.94       chs 		start = entry->end;
   4029   1.94       chs 		entry = entry->next;
   4030   1.94       chs 	}
   4031  1.234   thorpej 	return (true);
   4032    1.1       mrg }
   4033    1.1       mrg 
   4034    1.1       mrg /*
   4035    1.1       mrg  * uvmspace_alloc: allocate a vmspace structure.
   4036    1.1       mrg  *
   4037    1.1       mrg  * - structure includes vm_map and pmap
   4038    1.1       mrg  * - XXX: no locking on this structure
   4039    1.1       mrg  * - refcnt set to 1, rest must be init'd by caller
   4040    1.1       mrg  */
   4041   1.10       mrg struct vmspace *
   4042  1.327    martin uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
   4043   1.10       mrg {
   4044   1.10       mrg 	struct vmspace *vm;
   4045  1.385     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   4046   1.10       mrg 
   4047  1.248        ad 	vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
   4048  1.327    martin 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
   4049  1.353  pgoyette 	UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
   4050   1.15   thorpej 	return (vm);
   4051   1.15   thorpej }
   4052   1.15   thorpej 
   4053   1.15   thorpej /*
   4054   1.15   thorpej  * uvmspace_init: initialize a vmspace structure.
   4055   1.15   thorpej  *
   4056   1.15   thorpej  * - XXX: no locking on this structure
   4057  1.132      matt  * - refcnt set to 1, rest must be init'd by caller
   4058   1.15   thorpej  */
   4059   1.15   thorpej void
   4060  1.327    martin uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
   4061  1.327    martin     vaddr_t vmax, bool topdown)
   4062   1.15   thorpej {
   4063  1.385     skrll 	UVMHIST_FUNC(__func__);
   4064  1.385     skrll 	UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
   4065  1.353  pgoyette 	    (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax);
   4066  1.353  pgoyette 	UVMHIST_LOG(maphist, "   topdown=%ju)", topdown, 0, 0, 0);
   4067  1.334      matt 
   4068   1.23     perry 	memset(vm, 0, sizeof(*vm));
   4069  1.199  christos 	uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
   4070  1.327    martin 	    | (topdown ? VM_MAP_TOPDOWN : 0)
   4071  1.131    atatat 	    );
   4072   1.15   thorpej 	if (pmap)
   4073   1.15   thorpej 		pmap_reference(pmap);
   4074   1.15   thorpej 	else
   4075   1.15   thorpej 		pmap = pmap_create();
   4076   1.15   thorpej 	vm->vm_map.pmap = pmap;
   4077   1.10       mrg 	vm->vm_refcnt = 1;
   4078   1.15   thorpej 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
   4079    1.1       mrg }
   4080    1.1       mrg 
   4081    1.1       mrg /*
   4082  1.168  junyoung  * uvmspace_share: share a vmspace between two processes
   4083    1.1       mrg  *
   4084    1.1       mrg  * - used for vfork, threads(?)
   4085    1.1       mrg  */
   4086    1.1       mrg 
   4087   1.10       mrg void
   4088  1.138     enami uvmspace_share(struct proc *p1, struct proc *p2)
   4089    1.1       mrg {
   4090  1.139     enami 
   4091  1.215      yamt 	uvmspace_addref(p1->p_vmspace);
   4092   1.10       mrg 	p2->p_vmspace = p1->p_vmspace;
   4093    1.1       mrg }
   4094    1.1       mrg 
   4095  1.282     rmind #if 0
   4096  1.282     rmind 
   4097    1.1       mrg /*
   4098    1.1       mrg  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
   4099    1.1       mrg  *
   4100    1.1       mrg  * - XXX: no locking on vmspace
   4101    1.1       mrg  */
   4102    1.1       mrg 
   4103   1.10       mrg void
   4104  1.138     enami uvmspace_unshare(struct lwp *l)
   4105   1.10       mrg {
   4106  1.128   thorpej 	struct proc *p = l->l_proc;
   4107   1.10       mrg 	struct vmspace *nvm, *ovm = p->p_vmspace;
   4108   1.85       chs 
   4109   1.10       mrg 	if (ovm->vm_refcnt == 1)
   4110   1.10       mrg 		/* nothing to do: vmspace isn't shared in the first place */
   4111   1.10       mrg 		return;
   4112   1.85       chs 
   4113   1.10       mrg 	/* make a new vmspace, still holding old one */
   4114   1.10       mrg 	nvm = uvmspace_fork(ovm);
   4115   1.10       mrg 
   4116  1.254        ad 	kpreempt_disable();
   4117  1.128   thorpej 	pmap_deactivate(l);		/* unbind old vmspace */
   4118   1.98       chs 	p->p_vmspace = nvm;
   4119  1.128   thorpej 	pmap_activate(l);		/* switch to new vmspace */
   4120  1.254        ad 	kpreempt_enable();
   4121   1.13   thorpej 
   4122   1.10       mrg 	uvmspace_free(ovm);		/* drop reference to old vmspace */
   4123    1.1       mrg }
   4124    1.1       mrg 
   4125  1.282     rmind #endif
   4126  1.282     rmind 
   4127  1.317    martin 
   4128  1.317    martin /*
   4129  1.317    martin  * uvmspace_spawn: a new process has been spawned and needs a vmspace
   4130  1.317    martin  */
   4131  1.317    martin 
   4132  1.317    martin void
   4133  1.327    martin uvmspace_spawn(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
   4134  1.317    martin {
   4135  1.317    martin 	struct proc *p = l->l_proc;
   4136  1.317    martin 	struct vmspace *nvm;
   4137  1.317    martin 
   4138  1.317    martin #ifdef __HAVE_CPU_VMSPACE_EXEC
   4139  1.317    martin 	cpu_vmspace_exec(l, start, end);
   4140  1.317    martin #endif
   4141  1.317    martin 
   4142  1.327    martin 	nvm = uvmspace_alloc(start, end, topdown);
   4143  1.317    martin 	kpreempt_disable();
   4144  1.317    martin 	p->p_vmspace = nvm;
   4145  1.317    martin 	pmap_activate(l);
   4146  1.317    martin 	kpreempt_enable();
   4147  1.317    martin }
   4148  1.317    martin 
   4149    1.1       mrg /*
   4150    1.1       mrg  * uvmspace_exec: the process wants to exec a new program
   4151    1.1       mrg  */
   4152    1.1       mrg 
   4153   1.10       mrg void
   4154  1.327    martin uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
   4155    1.1       mrg {
   4156  1.128   thorpej 	struct proc *p = l->l_proc;
   4157   1.10       mrg 	struct vmspace *nvm, *ovm = p->p_vmspace;
   4158  1.302    martin 	struct vm_map *map;
   4159  1.373        ad 	int flags;
   4160    1.1       mrg 
   4161  1.317    martin 	KASSERT(ovm != NULL);
   4162  1.294      matt #ifdef __HAVE_CPU_VMSPACE_EXEC
   4163  1.294      matt 	cpu_vmspace_exec(l, start, end);
   4164  1.294      matt #endif
   4165    1.1       mrg 
   4166  1.302    martin 	map = &ovm->vm_map;
   4167  1.302    martin 	/*
   4168   1.10       mrg 	 * see if more than one process is using this vmspace...
   4169   1.10       mrg 	 */
   4170    1.1       mrg 
   4171  1.327    martin 	if (ovm->vm_refcnt == 1
   4172  1.327    martin 	    && topdown == ((ovm->vm_map.flags & VM_MAP_TOPDOWN) != 0)) {
   4173    1.1       mrg 
   4174   1.10       mrg 		/*
   4175   1.10       mrg 		 * if p is the only process using its vmspace then we can safely
   4176   1.10       mrg 		 * recycle that vmspace for the program that is being exec'd.
   4177  1.327    martin 		 * But only if TOPDOWN matches the requested value for the new
   4178  1.327    martin 		 * vm space!
   4179   1.10       mrg 		 */
   4180    1.1       mrg 
   4181   1.10       mrg 		/*
   4182   1.10       mrg 		 * SYSV SHM semantics require us to kill all segments on an exec
   4183   1.10       mrg 		 */
   4184  1.336  pgoyette 		if (uvm_shmexit && ovm->vm_shm)
   4185  1.336  pgoyette 			(*uvm_shmexit)(ovm);
   4186   1.54   thorpej 
   4187   1.54   thorpej 		/*
   4188   1.54   thorpej 		 * POSIX 1003.1b -- "lock future mappings" is revoked
   4189   1.54   thorpej 		 * when a process execs another program image.
   4190   1.54   thorpej 		 */
   4191   1.99       chs 
   4192  1.238        ad 		map->flags &= ~VM_MAP_WIREFUTURE;
   4193   1.10       mrg 
   4194   1.10       mrg 		/*
   4195  1.378        ad 		 * now unmap the old program.
   4196  1.380  riastrad 		 *
   4197  1.378        ad 		 * XXX set VM_MAP_DYING for the duration, so pmap_update()
   4198  1.378        ad 		 * is not called until the pmap has been totally cleared out
   4199  1.378        ad 		 * after pmap_remove_all(), or it can confuse some pmap
   4200  1.378        ad 		 * implementations.  it would be nice to handle this by
   4201  1.378        ad 		 * deferring the pmap_update() while it is known the address
   4202  1.378        ad 		 * space is not visible to any user LWP other than curlwp,
   4203  1.378        ad 		 * but there isn't an elegant way of inferring that right
   4204  1.378        ad 		 * now.
   4205   1.10       mrg 		 */
   4206   1.99       chs 
   4207  1.373        ad 		flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
   4208  1.378        ad 		map->flags |= VM_MAP_DYING;
   4209  1.373        ad 		uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
   4210  1.378        ad 		map->flags &= ~VM_MAP_DYING;
   4211  1.378        ad 		pmap_update(map->pmap);
   4212  1.144      yamt 		KASSERT(map->header.prev == &map->header);
   4213  1.144      yamt 		KASSERT(map->nentries == 0);
   4214   1.93       eeh 
   4215   1.93       eeh 		/*
   4216   1.93       eeh 		 * resize the map
   4217   1.93       eeh 		 */
   4218   1.99       chs 
   4219  1.184       chs 		vm_map_setmin(map, start);
   4220  1.184       chs 		vm_map_setmax(map, end);
   4221   1.10       mrg 	} else {
   4222   1.10       mrg 
   4223   1.10       mrg 		/*
   4224   1.10       mrg 		 * p's vmspace is being shared, so we can't reuse it for p since
   4225   1.10       mrg 		 * it is still being used for others.   allocate a new vmspace
   4226   1.10       mrg 		 * for p
   4227   1.10       mrg 		 */
   4228   1.99       chs 
   4229  1.327    martin 		nvm = uvmspace_alloc(start, end, topdown);
   4230    1.1       mrg 
   4231   1.10       mrg 		/*
   4232   1.10       mrg 		 * install new vmspace and drop our ref to the old one.
   4233   1.10       mrg 		 */
   4234   1.10       mrg 
   4235  1.254        ad 		kpreempt_disable();
   4236  1.128   thorpej 		pmap_deactivate(l);
   4237   1.10       mrg 		p->p_vmspace = nvm;
   4238  1.128   thorpej 		pmap_activate(l);
   4239  1.254        ad 		kpreempt_enable();
   4240   1.13   thorpej 
   4241   1.10       mrg 		uvmspace_free(ovm);
   4242   1.10       mrg 	}
   4243    1.1       mrg }
   4244    1.1       mrg 
   4245    1.1       mrg /*
   4246  1.368   msaitoh  * uvmspace_addref: add a reference to a vmspace.
   4247  1.215      yamt  */
   4248  1.215      yamt 
   4249  1.215      yamt void
   4250  1.215      yamt uvmspace_addref(struct vmspace *vm)
   4251  1.215      yamt {
   4252  1.215      yamt 
   4253  1.371        ad 	KASSERT((vm->vm_map.flags & VM_MAP_DYING) == 0);
   4254  1.215      yamt 	KASSERT(vm->vm_refcnt > 0);
   4255  1.371        ad 	atomic_inc_uint(&vm->vm_refcnt);
   4256  1.215      yamt }
   4257  1.215      yamt 
   4258  1.215      yamt /*
   4259    1.1       mrg  * uvmspace_free: free a vmspace data structure
   4260    1.1       mrg  */
   4261    1.1       mrg 
   4262   1.10       mrg void
   4263  1.138     enami uvmspace_free(struct vmspace *vm)
   4264    1.1       mrg {
   4265   1.99       chs 	struct vm_map_entry *dead_entries;
   4266  1.171        pk 	struct vm_map *map = &vm->vm_map;
   4267  1.373        ad 	int flags;
   4268  1.172        he 
   4269  1.385     skrll 	UVMHIST_FUNC(__func__);
   4270  1.385     skrll 	UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm,
   4271  1.385     skrll 	    vm->vm_refcnt, 0, 0);
   4272  1.392  riastrad 
   4273  1.393  riastrad 	membar_release();
   4274  1.371        ad 	if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0)
   4275  1.120       chs 		return;
   4276  1.393  riastrad 	membar_acquire();
   4277   1.99       chs 
   4278  1.120       chs 	/*
   4279  1.120       chs 	 * at this point, there should be no other references to the map.
   4280  1.120       chs 	 * delete all of the mappings, then destroy the pmap.
   4281  1.120       chs 	 */
   4282   1.99       chs 
   4283  1.120       chs 	map->flags |= VM_MAP_DYING;
   4284  1.373        ad 	flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
   4285  1.336  pgoyette 
   4286  1.120       chs 	/* Get rid of any SYSV shared memory segments. */
   4287  1.336  pgoyette 	if (uvm_shmexit && vm->vm_shm != NULL)
   4288  1.336  pgoyette 		(*uvm_shmexit)(vm);
   4289  1.314     rmind 
   4290  1.120       chs 	if (map->nentries) {
   4291  1.184       chs 		uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
   4292  1.373        ad 		    &dead_entries, flags);
   4293  1.120       chs 		if (dead_entries != NULL)
   4294  1.120       chs 			uvm_unmap_detach(dead_entries, 0);
   4295   1.10       mrg 	}
   4296  1.146      yamt 	KASSERT(map->nentries == 0);
   4297  1.146      yamt 	KASSERT(map->size == 0);
   4298  1.314     rmind 
   4299  1.239        ad 	mutex_destroy(&map->misc_lock);
   4300  1.239        ad 	rw_destroy(&map->lock);
   4301  1.255        ad 	cv_destroy(&map->cv);
   4302  1.120       chs 	pmap_destroy(map->pmap);
   4303  1.248        ad 	pool_cache_put(&uvm_vmspace_cache, vm);
   4304    1.1       mrg }
   4305    1.1       mrg 
   4306  1.329  christos static struct vm_map_entry *
   4307  1.329  christos uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
   4308  1.329  christos     int flags)
   4309  1.329  christos {
   4310  1.329  christos 	struct vm_map_entry *new_entry;
   4311  1.329  christos 
   4312  1.329  christos 	new_entry = uvm_mapent_alloc(new_map, 0);
   4313  1.329  christos 	/* old_entry -> new_entry */
   4314  1.329  christos 	uvm_mapent_copy(old_entry, new_entry);
   4315  1.329  christos 
   4316  1.329  christos 	/* new pmap has nothing wired in it */
   4317  1.329  christos 	new_entry->wired_count = 0;
   4318  1.329  christos 
   4319  1.329  christos 	/*
   4320  1.329  christos 	 * gain reference to object backing the map (can't
   4321  1.329  christos 	 * be a submap, already checked this case).
   4322  1.329  christos 	 */
   4323  1.329  christos 
   4324  1.329  christos 	if (new_entry->aref.ar_amap)
   4325  1.329  christos 		uvm_map_reference_amap(new_entry, flags);
   4326  1.329  christos 
   4327  1.329  christos 	if (new_entry->object.uvm_obj &&
   4328  1.329  christos 	    new_entry->object.uvm_obj->pgops->pgo_reference)
   4329  1.329  christos 		new_entry->object.uvm_obj->pgops->pgo_reference(
   4330  1.329  christos 			new_entry->object.uvm_obj);
   4331  1.329  christos 
   4332  1.329  christos 	/* insert entry at end of new_map's entry list */
   4333  1.329  christos 	uvm_map_entry_link(new_map, new_map->header.prev,
   4334  1.329  christos 	    new_entry);
   4335  1.329  christos 
   4336  1.329  christos 	return new_entry;
   4337  1.329  christos }
   4338  1.329  christos 
   4339  1.329  christos /*
   4340  1.329  christos  * share the mapping: this means we want the old and
   4341  1.329  christos  * new entries to share amaps and backing objects.
   4342  1.329  christos  */
   4343  1.329  christos static void
   4344  1.329  christos uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
   4345  1.329  christos     struct vm_map_entry *old_entry)
   4346  1.329  christos {
   4347  1.329  christos 	/*
   4348  1.329  christos 	 * if the old_entry needs a new amap (due to prev fork)
   4349  1.329  christos 	 * then we need to allocate it now so that we have
   4350  1.329  christos 	 * something we own to share with the new_entry.   [in
   4351  1.329  christos 	 * other words, we need to clear needs_copy]
   4352  1.329  christos 	 */
   4353  1.329  christos 
   4354  1.329  christos 	if (UVM_ET_ISNEEDSCOPY(old_entry)) {
   4355  1.329  christos 		/* get our own amap, clears needs_copy */
   4356  1.329  christos 		amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
   4357  1.329  christos 		    0, 0);
   4358  1.329  christos 		/* XXXCDC: WAITOK??? */
   4359  1.329  christos 	}
   4360  1.329  christos 
   4361  1.329  christos 	uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
   4362  1.329  christos }
   4363  1.329  christos 
   4364  1.329  christos 
   4365  1.329  christos static void
   4366  1.329  christos uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
   4367  1.329  christos     struct vm_map_entry *old_entry)
   4368  1.329  christos {
   4369  1.329  christos 	struct vm_map_entry *new_entry;
   4370  1.329  christos 
   4371  1.329  christos 	/*
   4372  1.329  christos 	 * copy-on-write the mapping (using mmap's
   4373  1.329  christos 	 * MAP_PRIVATE semantics)
   4374  1.329  christos 	 *
   4375  1.329  christos 	 * allocate new_entry, adjust reference counts.
   4376  1.329  christos 	 * (note that new references are read-only).
   4377  1.329  christos 	 */
   4378  1.329  christos 
   4379  1.329  christos 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
   4380  1.329  christos 
   4381  1.329  christos 	new_entry->etype |=
   4382  1.329  christos 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
   4383  1.329  christos 
   4384  1.329  christos 	/*
   4385  1.329  christos 	 * the new entry will need an amap.  it will either
   4386  1.329  christos 	 * need to be copied from the old entry or created
   4387  1.329  christos 	 * from scratch (if the old entry does not have an
   4388  1.329  christos 	 * amap).  can we defer this process until later
   4389  1.329  christos 	 * (by setting "needs_copy") or do we need to copy
   4390  1.329  christos 	 * the amap now?
   4391  1.329  christos 	 *
   4392  1.329  christos 	 * we must copy the amap now if any of the following
   4393  1.329  christos 	 * conditions hold:
   4394  1.329  christos 	 * 1. the old entry has an amap and that amap is
   4395  1.329  christos 	 *    being shared.  this means that the old (parent)
   4396  1.329  christos 	 *    process is sharing the amap with another
   4397  1.329  christos 	 *    process.  if we do not clear needs_copy here
   4398  1.329  christos 	 *    we will end up in a situation where both the
   4399  1.394    andvar 	 *    parent and child process are referring to the
   4400  1.329  christos 	 *    same amap with "needs_copy" set.  if the
   4401  1.329  christos 	 *    parent write-faults, the fault routine will
   4402  1.329  christos 	 *    clear "needs_copy" in the parent by allocating
   4403  1.329  christos 	 *    a new amap.   this is wrong because the
   4404  1.329  christos 	 *    parent is supposed to be sharing the old amap
   4405  1.329  christos 	 *    and the new amap will break that.
   4406  1.329  christos 	 *
   4407  1.329  christos 	 * 2. if the old entry has an amap and a non-zero
   4408  1.329  christos 	 *    wire count then we are going to have to call
   4409  1.329  christos 	 *    amap_cow_now to avoid page faults in the
   4410  1.329  christos 	 *    parent process.   since amap_cow_now requires
   4411  1.329  christos 	 *    "needs_copy" to be clear we might as well
   4412  1.329  christos 	 *    clear it here as well.
   4413  1.329  christos 	 *
   4414  1.329  christos 	 */
   4415  1.329  christos 
   4416  1.329  christos 	if (old_entry->aref.ar_amap != NULL) {
   4417  1.329  christos 		if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
   4418  1.329  christos 		    VM_MAPENT_ISWIRED(old_entry)) {
   4419  1.329  christos 
   4420  1.329  christos 			amap_copy(new_map, new_entry,
   4421  1.329  christos 			    AMAP_COPY_NOCHUNK, 0, 0);
   4422  1.329  christos 			/* XXXCDC: M_WAITOK ... ok? */
   4423  1.329  christos 		}
   4424  1.329  christos 	}
   4425  1.329  christos 
   4426  1.329  christos 	/*
   4427  1.329  christos 	 * if the parent's entry is wired down, then the
   4428  1.329  christos 	 * parent process does not want page faults on
   4429  1.329  christos 	 * access to that memory.  this means that we
   4430  1.329  christos 	 * cannot do copy-on-write because we can't write
   4431  1.329  christos 	 * protect the old entry.   in this case we
   4432  1.329  christos 	 * resolve all copy-on-write faults now, using
   4433  1.329  christos 	 * amap_cow_now.   note that we have already
   4434  1.329  christos 	 * allocated any needed amap (above).
   4435  1.329  christos 	 */
   4436  1.329  christos 
   4437  1.329  christos 	if (VM_MAPENT_ISWIRED(old_entry)) {
   4438  1.329  christos 
   4439  1.329  christos 		/*
   4440  1.329  christos 		 * resolve all copy-on-write faults now
   4441  1.329  christos 		 * (note that there is nothing to do if
   4442  1.329  christos 		 * the old mapping does not have an amap).
   4443  1.329  christos 		 */
   4444  1.329  christos 		if (old_entry->aref.ar_amap)
   4445  1.329  christos 			amap_cow_now(new_map, new_entry);
   4446  1.329  christos 
   4447  1.329  christos 	} else {
   4448  1.329  christos 		/*
   4449  1.329  christos 		 * setup mappings to trigger copy-on-write faults
   4450  1.329  christos 		 * we must write-protect the parent if it has
   4451  1.329  christos 		 * an amap and it is not already "needs_copy"...
   4452  1.329  christos 		 * if it is already "needs_copy" then the parent
   4453  1.329  christos 		 * has already been write-protected by a previous
   4454  1.329  christos 		 * fork operation.
   4455  1.329  christos 		 */
   4456  1.329  christos 		if (old_entry->aref.ar_amap &&
   4457  1.329  christos 		    !UVM_ET_ISNEEDSCOPY(old_entry)) {
   4458  1.329  christos 			if (old_entry->max_protection & VM_PROT_WRITE) {
   4459  1.376        ad #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
   4460  1.372        ad 				uvm_map_lock_entry(old_entry, RW_WRITER);
   4461  1.376        ad #else
   4462  1.376        ad 				uvm_map_lock_entry(old_entry, RW_READER);
   4463  1.376        ad #endif
   4464  1.329  christos 				pmap_protect(old_map->pmap,
   4465  1.329  christos 				    old_entry->start, old_entry->end,
   4466  1.329  christos 				    old_entry->protection & ~VM_PROT_WRITE);
   4467  1.362   mlelstv 				uvm_map_unlock_entry(old_entry);
   4468  1.329  christos 			}
   4469  1.329  christos 			old_entry->etype |= UVM_ET_NEEDSCOPY;
   4470  1.329  christos 		}
   4471  1.329  christos 	}
   4472  1.329  christos }
   4473  1.329  christos 
   4474    1.1       mrg /*
   4475  1.330  christos  * zero the mapping: the new entry will be zero initialized
   4476  1.330  christos  */
   4477  1.330  christos static void
   4478  1.330  christos uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
   4479  1.330  christos     struct vm_map_entry *old_entry)
   4480  1.330  christos {
   4481  1.330  christos 	struct vm_map_entry *new_entry;
   4482  1.330  christos 
   4483  1.330  christos 	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
   4484  1.330  christos 
   4485  1.330  christos 	new_entry->etype |=
   4486  1.330  christos 	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
   4487  1.330  christos 
   4488  1.330  christos 	if (new_entry->aref.ar_amap) {
   4489  1.330  christos 		uvm_map_unreference_amap(new_entry, 0);
   4490  1.330  christos 		new_entry->aref.ar_pageoff = 0;
   4491  1.330  christos 		new_entry->aref.ar_amap = NULL;
   4492  1.330  christos 	}
   4493  1.330  christos 
   4494  1.330  christos 	if (UVM_ET_ISOBJ(new_entry)) {
   4495  1.330  christos 		if (new_entry->object.uvm_obj->pgops->pgo_detach)
   4496  1.330  christos 			new_entry->object.uvm_obj->pgops->pgo_detach(
   4497  1.330  christos 			    new_entry->object.uvm_obj);
   4498  1.330  christos 		new_entry->object.uvm_obj = NULL;
   4499  1.390       chs 		new_entry->offset = 0;
   4500  1.330  christos 		new_entry->etype &= ~UVM_ET_OBJ;
   4501  1.330  christos 	}
   4502  1.330  christos }
   4503  1.330  christos 
   4504  1.330  christos /*
   4505    1.1       mrg  *   F O R K   -   m a i n   e n t r y   p o i n t
   4506    1.1       mrg  */
   4507    1.1       mrg /*
   4508    1.1       mrg  * uvmspace_fork: fork a process' main map
   4509    1.1       mrg  *
   4510    1.1       mrg  * => create a new vmspace for child process from parent.
   4511    1.1       mrg  * => parent's map must not be locked.
   4512    1.1       mrg  */
   4513    1.1       mrg 
   4514   1.10       mrg struct vmspace *
   4515  1.138     enami uvmspace_fork(struct vmspace *vm1)
   4516   1.10       mrg {
   4517   1.10       mrg 	struct vmspace *vm2;
   4518   1.99       chs 	struct vm_map *old_map = &vm1->vm_map;
   4519   1.99       chs 	struct vm_map *new_map;
   4520   1.99       chs 	struct vm_map_entry *old_entry;
   4521  1.385     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   4522    1.1       mrg 
   4523   1.10       mrg 	vm_map_lock(old_map);
   4524    1.1       mrg 
   4525  1.327    martin 	vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
   4526  1.327    martin 	    vm1->vm_map.flags & VM_MAP_TOPDOWN);
   4527   1.23     perry 	memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
   4528  1.235  christos 	    (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
   4529   1.10       mrg 	new_map = &vm2->vm_map;		  /* XXX */
   4530   1.10       mrg 
   4531   1.10       mrg 	old_entry = old_map->header.next;
   4532  1.162     pooka 	new_map->size = old_map->size;
   4533   1.10       mrg 
   4534   1.10       mrg 	/*
   4535   1.10       mrg 	 * go entry-by-entry
   4536   1.10       mrg 	 */
   4537    1.1       mrg 
   4538   1.10       mrg 	while (old_entry != &old_map->header) {
   4539    1.1       mrg 
   4540   1.10       mrg 		/*
   4541   1.10       mrg 		 * first, some sanity checks on the old entry
   4542   1.10       mrg 		 */
   4543   1.99       chs 
   4544   1.94       chs 		KASSERT(!UVM_ET_ISSUBMAP(old_entry));
   4545   1.94       chs 		KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
   4546   1.94       chs 			!UVM_ET_ISNEEDSCOPY(old_entry));
   4547    1.1       mrg 
   4548   1.10       mrg 		switch (old_entry->inheritance) {
   4549   1.80       wiz 		case MAP_INHERIT_NONE:
   4550   1.10       mrg 			/*
   4551  1.162     pooka 			 * drop the mapping, modify size
   4552   1.10       mrg 			 */
   4553  1.162     pooka 			new_map->size -= old_entry->end - old_entry->start;
   4554   1.10       mrg 			break;
   4555   1.10       mrg 
   4556   1.80       wiz 		case MAP_INHERIT_SHARE:
   4557  1.329  christos 			uvm_mapent_forkshared(new_map, old_map, old_entry);
   4558   1.10       mrg 			break;
   4559   1.10       mrg 
   4560   1.80       wiz 		case MAP_INHERIT_COPY:
   4561  1.329  christos 			uvm_mapent_forkcopy(new_map, old_map, old_entry);
   4562  1.329  christos 			break;
   4563   1.10       mrg 
   4564  1.330  christos 		case MAP_INHERIT_ZERO:
   4565  1.330  christos 			uvm_mapent_forkzero(new_map, old_map, old_entry);
   4566  1.330  christos 			break;
   4567  1.329  christos 		default:
   4568  1.329  christos 			KASSERT(0);
   4569   1.10       mrg 			break;
   4570  1.329  christos 		}
   4571   1.10       mrg 		old_entry = old_entry->next;
   4572    1.1       mrg 	}
   4573    1.1       mrg 
   4574  1.268        ad 	pmap_update(old_map->pmap);
   4575   1.98       chs 	vm_map_unlock(old_map);
   4576    1.1       mrg 
   4577  1.336  pgoyette 	if (uvm_shmfork && vm1->vm_shm)
   4578  1.336  pgoyette 		(*uvm_shmfork)(vm1, vm2);
   4579   1.39   thorpej 
   4580   1.39   thorpej #ifdef PMAP_FORK
   4581   1.39   thorpej 	pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
   4582    1.1       mrg #endif
   4583    1.1       mrg 
   4584   1.10       mrg 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
   4585  1.139     enami 	return (vm2);
   4586    1.1       mrg }
   4587    1.1       mrg 
   4588    1.1       mrg 
   4589  1.174      yamt /*
   4590  1.194      yamt  * uvm_mapent_trymerge: try to merge an entry with its neighbors.
   4591  1.194      yamt  *
   4592  1.194      yamt  * => called with map locked.
   4593  1.194      yamt  * => return non zero if successfully merged.
   4594  1.194      yamt  */
   4595  1.194      yamt 
   4596  1.194      yamt int
   4597  1.194      yamt uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
   4598  1.194      yamt {
   4599  1.194      yamt 	struct uvm_object *uobj;
   4600  1.194      yamt 	struct vm_map_entry *next;
   4601  1.194      yamt 	struct vm_map_entry *prev;
   4602  1.195      yamt 	vsize_t size;
   4603  1.194      yamt 	int merged = 0;
   4604  1.233   thorpej 	bool copying;
   4605  1.194      yamt 	int newetype;
   4606  1.194      yamt 
   4607  1.194      yamt 	if (entry->aref.ar_amap != NULL) {
   4608  1.194      yamt 		return 0;
   4609  1.194      yamt 	}
   4610  1.194      yamt 	if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
   4611  1.194      yamt 		return 0;
   4612  1.194      yamt 	}
   4613  1.194      yamt 
   4614  1.194      yamt 	uobj = entry->object.uvm_obj;
   4615  1.195      yamt 	size = entry->end - entry->start;
   4616  1.194      yamt 	copying = (flags & UVM_MERGE_COPYING) != 0;
   4617  1.194      yamt 	newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
   4618  1.194      yamt 
   4619  1.194      yamt 	next = entry->next;
   4620  1.194      yamt 	if (next != &map->header &&
   4621  1.194      yamt 	    next->start == entry->end &&
   4622  1.194      yamt 	    ((copying && next->aref.ar_amap != NULL &&
   4623  1.194      yamt 	    amap_refs(next->aref.ar_amap) == 1) ||
   4624  1.194      yamt 	    (!copying && next->aref.ar_amap == NULL)) &&
   4625  1.194      yamt 	    UVM_ET_ISCOMPATIBLE(next, newetype,
   4626  1.194      yamt 	    uobj, entry->flags, entry->protection,
   4627  1.194      yamt 	    entry->max_protection, entry->inheritance, entry->advice,
   4628  1.195      yamt 	    entry->wired_count) &&
   4629  1.195      yamt 	    (uobj == NULL || entry->offset + size == next->offset)) {
   4630  1.194      yamt 		int error;
   4631  1.194      yamt 
   4632  1.194      yamt 		if (copying) {
   4633  1.195      yamt 			error = amap_extend(next, size,
   4634  1.194      yamt 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
   4635  1.194      yamt 		} else {
   4636  1.194      yamt 			error = 0;
   4637  1.194      yamt 		}
   4638  1.194      yamt 		if (error == 0) {
   4639  1.197      yamt 			if (uobj) {
   4640  1.197      yamt 				if (uobj->pgops->pgo_detach) {
   4641  1.197      yamt 					uobj->pgops->pgo_detach(uobj);
   4642  1.197      yamt 				}
   4643  1.194      yamt 			}
   4644  1.194      yamt 
   4645  1.194      yamt 			entry->end = next->end;
   4646  1.221      yamt 			clear_hints(map, next);
   4647  1.194      yamt 			uvm_map_entry_unlink(map, next);
   4648  1.194      yamt 			if (copying) {
   4649  1.194      yamt 				entry->aref = next->aref;
   4650  1.194      yamt 				entry->etype &= ~UVM_ET_NEEDSCOPY;
   4651  1.194      yamt 			}
   4652  1.222      yamt 			uvm_map_check(map, "trymerge forwardmerge");
   4653  1.311      para 			uvm_mapent_free(next);
   4654  1.194      yamt 			merged++;
   4655  1.194      yamt 		}
   4656  1.194      yamt 	}
   4657  1.194      yamt 
   4658  1.194      yamt 	prev = entry->prev;
   4659  1.194      yamt 	if (prev != &map->header &&
   4660  1.194      yamt 	    prev->end == entry->start &&
   4661  1.194      yamt 	    ((copying && !merged && prev->aref.ar_amap != NULL &&
   4662  1.194      yamt 	    amap_refs(prev->aref.ar_amap) == 1) ||
   4663  1.194      yamt 	    (!copying && prev->aref.ar_amap == NULL)) &&
   4664  1.194      yamt 	    UVM_ET_ISCOMPATIBLE(prev, newetype,
   4665  1.194      yamt 	    uobj, entry->flags, entry->protection,
   4666  1.194      yamt 	    entry->max_protection, entry->inheritance, entry->advice,
   4667  1.195      yamt 	    entry->wired_count) &&
   4668  1.196      yamt 	    (uobj == NULL ||
   4669  1.196      yamt 	    prev->offset + prev->end - prev->start == entry->offset)) {
   4670  1.194      yamt 		int error;
   4671  1.194      yamt 
   4672  1.194      yamt 		if (copying) {
   4673  1.195      yamt 			error = amap_extend(prev, size,
   4674  1.194      yamt 			    AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
   4675  1.194      yamt 		} else {
   4676  1.194      yamt 			error = 0;
   4677  1.194      yamt 		}
   4678  1.194      yamt 		if (error == 0) {
   4679  1.197      yamt 			if (uobj) {
   4680  1.197      yamt 				if (uobj->pgops->pgo_detach) {
   4681  1.197      yamt 					uobj->pgops->pgo_detach(uobj);
   4682  1.197      yamt 				}
   4683  1.197      yamt 				entry->offset = prev->offset;
   4684  1.194      yamt 			}
   4685  1.194      yamt 
   4686  1.194      yamt 			entry->start = prev->start;
   4687  1.221      yamt 			clear_hints(map, prev);
   4688  1.194      yamt 			uvm_map_entry_unlink(map, prev);
   4689  1.194      yamt 			if (copying) {
   4690  1.194      yamt 				entry->aref = prev->aref;
   4691  1.194      yamt 				entry->etype &= ~UVM_ET_NEEDSCOPY;
   4692  1.194      yamt 			}
   4693  1.222      yamt 			uvm_map_check(map, "trymerge backmerge");
   4694  1.311      para 			uvm_mapent_free(prev);
   4695  1.194      yamt 			merged++;
   4696  1.194      yamt 		}
   4697  1.194      yamt 	}
   4698  1.194      yamt 
   4699  1.194      yamt 	return merged;
   4700  1.194      yamt }
   4701  1.194      yamt 
   4702  1.211      yamt /*
   4703  1.211      yamt  * uvm_map_setup: init map
   4704  1.211      yamt  *
   4705  1.211      yamt  * => map must not be in service yet.
   4706  1.211      yamt  */
   4707  1.211      yamt 
   4708  1.211      yamt void
   4709  1.211      yamt uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
   4710  1.211      yamt {
   4711  1.211      yamt 
   4712  1.263      matt 	rb_tree_init(&map->rb_tree, &uvm_map_tree_ops);
   4713  1.211      yamt 	map->header.next = map->header.prev = &map->header;
   4714  1.211      yamt 	map->nentries = 0;
   4715  1.211      yamt 	map->size = 0;
   4716  1.211      yamt 	map->ref_count = 1;
   4717  1.211      yamt 	vm_map_setmin(map, vmin);
   4718  1.211      yamt 	vm_map_setmax(map, vmax);
   4719  1.211      yamt 	map->flags = flags;
   4720  1.211      yamt 	map->first_free = &map->header;
   4721  1.211      yamt 	map->hint = &map->header;
   4722  1.211      yamt 	map->timestamp = 0;
   4723  1.238        ad 	map->busy = NULL;
   4724  1.238        ad 
   4725  1.240        ad 	rw_init(&map->lock);
   4726  1.238        ad 	cv_init(&map->cv, "vm_map");
   4727  1.314     rmind 	mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE);
   4728  1.211      yamt }
   4729  1.211      yamt 
   4730  1.211      yamt /*
   4731  1.211      yamt  *   U N M A P   -   m a i n   e n t r y   p o i n t
   4732  1.211      yamt  */
   4733  1.211      yamt 
   4734  1.211      yamt /*
   4735  1.211      yamt  * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
   4736  1.211      yamt  *
   4737  1.211      yamt  * => caller must check alignment and size
   4738  1.211      yamt  * => map must be unlocked (we will lock it)
   4739  1.211      yamt  * => flags is UVM_FLAG_QUANTUM or 0.
   4740  1.211      yamt  */
   4741  1.211      yamt 
   4742  1.211      yamt void
   4743  1.211      yamt uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
   4744  1.211      yamt {
   4745  1.211      yamt 	struct vm_map_entry *dead_entries;
   4746  1.385     skrll 	UVMHIST_FUNC(__func__);
   4747  1.385     skrll 	UVMHIST_CALLARGS(maphist, "  (map=%#jx, start=%#jx, end=%#jx)",
   4748  1.385     skrll 	    (uintptr_t)map, start, end, 0);
   4749  1.211      yamt 
   4750  1.364       mrg 	KASSERTMSG(start < end,
   4751  1.364       mrg 	    "%s: map %p: start %#jx < end %#jx", __func__, map,
   4752  1.364       mrg 	    (uintmax_t)start, (uintmax_t)end);
   4753  1.246   xtraeme 	if (map == kernel_map) {
   4754  1.244      yamt 		LOCKDEBUG_MEM_CHECK((void *)start, end - start);
   4755  1.246   xtraeme 	}
   4756  1.351       chs 
   4757  1.211      yamt 	/*
   4758  1.211      yamt 	 * work now done by helper functions.   wipe the pmap's and then
   4759  1.211      yamt 	 * detach from the dead entries...
   4760  1.211      yamt 	 */
   4761  1.211      yamt 	vm_map_lock(map);
   4762  1.311      para 	uvm_unmap_remove(map, start, end, &dead_entries, flags);
   4763  1.211      yamt 	vm_map_unlock(map);
   4764  1.211      yamt 
   4765  1.211      yamt 	if (dead_entries != NULL)
   4766  1.211      yamt 		uvm_unmap_detach(dead_entries, 0);
   4767  1.211      yamt 
   4768  1.211      yamt 	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
   4769  1.211      yamt }
   4770  1.211      yamt 
   4771  1.211      yamt 
   4772  1.211      yamt /*
   4773  1.211      yamt  * uvm_map_reference: add reference to a map
   4774  1.211      yamt  *
   4775  1.371        ad  * => map need not be locked
   4776  1.211      yamt  */
   4777  1.211      yamt 
   4778  1.211      yamt void
   4779  1.211      yamt uvm_map_reference(struct vm_map *map)
   4780  1.211      yamt {
   4781  1.371        ad 
   4782  1.371        ad 	atomic_inc_uint(&map->ref_count);
   4783  1.211      yamt }
   4784  1.211      yamt 
   4785  1.298     rmind void
   4786  1.372        ad uvm_map_lock_entry(struct vm_map_entry *entry, krw_t op)
   4787  1.298     rmind {
   4788  1.298     rmind 
   4789  1.299     rmind 	if (entry->aref.ar_amap != NULL) {
   4790  1.372        ad 		amap_lock(entry->aref.ar_amap, op);
   4791  1.299     rmind 	}
   4792  1.298     rmind 	if (UVM_ET_ISOBJ(entry)) {
   4793  1.372        ad 		rw_enter(entry->object.uvm_obj->vmobjlock, op);
   4794  1.298     rmind 	}
   4795  1.298     rmind }
   4796  1.298     rmind 
   4797  1.298     rmind void
   4798  1.298     rmind uvm_map_unlock_entry(struct vm_map_entry *entry)
   4799  1.298     rmind {
   4800  1.298     rmind 
   4801  1.299     rmind 	if (UVM_ET_ISOBJ(entry)) {
   4802  1.372        ad 		rw_exit(entry->object.uvm_obj->vmobjlock);
   4803  1.299     rmind 	}
   4804  1.298     rmind 	if (entry->aref.ar_amap != NULL) {
   4805  1.298     rmind 		amap_unlock(entry->aref.ar_amap);
   4806  1.298     rmind 	}
   4807  1.298     rmind }
   4808  1.298     rmind 
   4809  1.383   thorpej #define	UVM_VOADDR_TYPE_MASK	0x3UL
   4810  1.383   thorpej #define	UVM_VOADDR_TYPE_UOBJ	0x1UL
   4811  1.383   thorpej #define	UVM_VOADDR_TYPE_ANON	0x2UL
   4812  1.383   thorpej #define	UVM_VOADDR_OBJECT_MASK	~UVM_VOADDR_TYPE_MASK
   4813  1.383   thorpej 
   4814  1.383   thorpej #define	UVM_VOADDR_GET_TYPE(voa)					\
   4815  1.383   thorpej 	((voa)->object & UVM_VOADDR_TYPE_MASK)
   4816  1.383   thorpej #define	UVM_VOADDR_GET_OBJECT(voa)					\
   4817  1.383   thorpej 	((voa)->object & UVM_VOADDR_OBJECT_MASK)
   4818  1.383   thorpej #define	UVM_VOADDR_SET_OBJECT(voa, obj, type)				\
   4819  1.383   thorpej do {									\
   4820  1.383   thorpej 	KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0);	\
   4821  1.383   thorpej 	(voa)->object = ((uintptr_t)(obj)) | (type);			\
   4822  1.383   thorpej } while (/*CONSTCOND*/0)
   4823  1.383   thorpej 
   4824  1.383   thorpej #define	UVM_VOADDR_GET_UOBJ(voa)					\
   4825  1.383   thorpej 	((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa))
   4826  1.383   thorpej #define	UVM_VOADDR_SET_UOBJ(voa, uobj)					\
   4827  1.383   thorpej 	UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ)
   4828  1.383   thorpej 
   4829  1.383   thorpej #define	UVM_VOADDR_GET_ANON(voa)					\
   4830  1.383   thorpej 	((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa))
   4831  1.383   thorpej #define	UVM_VOADDR_SET_ANON(voa, anon)					\
   4832  1.383   thorpej 	UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON)
   4833  1.383   thorpej 
   4834  1.379   thorpej /*
   4835  1.379   thorpej  * uvm_voaddr_acquire: returns the virtual object address corresponding
   4836  1.379   thorpej  * to the specified virtual address.
   4837  1.379   thorpej  *
   4838  1.379   thorpej  * => resolves COW so the true page identity is tracked.
   4839  1.379   thorpej  *
   4840  1.379   thorpej  * => acquires a reference on the page's owner (uvm_object or vm_anon)
   4841  1.379   thorpej  */
   4842  1.379   thorpej bool
   4843  1.379   thorpej uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
   4844  1.379   thorpej     struct uvm_voaddr * const voaddr)
   4845  1.379   thorpej {
   4846  1.379   thorpej 	struct vm_map_entry *entry;
   4847  1.379   thorpej 	struct vm_anon *anon = NULL;
   4848  1.379   thorpej 	bool result = false;
   4849  1.379   thorpej 	bool exclusive = false;
   4850  1.379   thorpej 	void (*unlock_fn)(struct vm_map *);
   4851  1.379   thorpej 
   4852  1.385     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   4853  1.386     skrll 	UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0);
   4854  1.379   thorpej 
   4855  1.379   thorpej 	const vaddr_t start = trunc_page(va);
   4856  1.379   thorpej 	const vaddr_t end = round_page(va+1);
   4857  1.379   thorpej 
   4858  1.379   thorpej  lookup_again:
   4859  1.379   thorpej 	if (__predict_false(exclusive)) {
   4860  1.379   thorpej 		vm_map_lock(map);
   4861  1.379   thorpej 		unlock_fn = vm_map_unlock;
   4862  1.379   thorpej 	} else {
   4863  1.379   thorpej 		vm_map_lock_read(map);
   4864  1.379   thorpej 		unlock_fn = vm_map_unlock_read;
   4865  1.379   thorpej 	}
   4866  1.379   thorpej 
   4867  1.379   thorpej 	if (__predict_false(!uvm_map_lookup_entry(map, start, &entry))) {
   4868  1.379   thorpej 		unlock_fn(map);
   4869  1.379   thorpej 		UVMHIST_LOG(maphist,"<- done (no entry)",0,0,0,0);
   4870  1.379   thorpej 		return false;
   4871  1.379   thorpej 	}
   4872  1.379   thorpej 
   4873  1.379   thorpej 	if (__predict_false(entry->protection == VM_PROT_NONE)) {
   4874  1.379   thorpej 		unlock_fn(map);
   4875  1.379   thorpej 		UVMHIST_LOG(maphist,"<- done (PROT_NONE)",0,0,0,0);
   4876  1.379   thorpej 		return false;
   4877  1.379   thorpej 	}
   4878  1.379   thorpej 
   4879  1.379   thorpej 	/*
   4880  1.379   thorpej 	 * We have a fast path for the common case of "no COW resolution
   4881  1.379   thorpej 	 * needed" whereby we have taken a read lock on the map and if
   4882  1.379   thorpej 	 * we don't encounter any need to create a vm_anon then great!
   4883  1.379   thorpej 	 * But if we do, we loop around again, instead taking an exclusive
   4884  1.379   thorpej 	 * lock so that we can perform the fault.
   4885  1.379   thorpej 	 *
   4886  1.379   thorpej 	 * In the event that we have to resolve the fault, we do nearly the
   4887  1.379   thorpej 	 * same work as uvm_map_pageable() does:
   4888  1.379   thorpej 	 *
   4889  1.379   thorpej 	 * 1: holding the write lock, we create any anonymous maps that need
   4890  1.379   thorpej 	 *    to be created.  however, we do NOT need to clip the map entries
   4891  1.379   thorpej 	 *    in this case.
   4892  1.379   thorpej 	 *
   4893  1.379   thorpej 	 * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
   4894  1.379   thorpej 	 *    in the page (assuming the entry is not already wired).  this
   4895  1.379   thorpej 	 *    is done because we need the vm_anon to be present.
   4896  1.379   thorpej 	 */
   4897  1.379   thorpej 	if (__predict_true(!VM_MAPENT_ISWIRED(entry))) {
   4898  1.379   thorpej 
   4899  1.379   thorpej 		bool need_fault = false;
   4900  1.379   thorpej 
   4901  1.379   thorpej 		/*
   4902  1.379   thorpej 		 * perform the action of vm_map_lookup that need the
   4903  1.379   thorpej 		 * write lock on the map: create an anonymous map for
   4904  1.379   thorpej 		 * a copy-on-write region, or an anonymous map for
   4905  1.379   thorpej 		 * a zero-fill region.
   4906  1.379   thorpej 		 */
   4907  1.379   thorpej 		if (__predict_false(UVM_ET_ISSUBMAP(entry))) {
   4908  1.379   thorpej 			unlock_fn(map);
   4909  1.379   thorpej 			UVMHIST_LOG(maphist,"<- done (submap)",0,0,0,0);
   4910  1.379   thorpej 			return false;
   4911  1.379   thorpej 		}
   4912  1.379   thorpej 		if (__predict_false(UVM_ET_ISNEEDSCOPY(entry) &&
   4913  1.379   thorpej 		    ((entry->max_protection & VM_PROT_WRITE) ||
   4914  1.379   thorpej 		     (entry->object.uvm_obj == NULL)))) {
   4915  1.379   thorpej 			if (!exclusive) {
   4916  1.379   thorpej 				/* need to take the slow path */
   4917  1.379   thorpej 				KASSERT(unlock_fn == vm_map_unlock_read);
   4918  1.379   thorpej 				vm_map_unlock_read(map);
   4919  1.379   thorpej 				exclusive = true;
   4920  1.379   thorpej 				goto lookup_again;
   4921  1.379   thorpej 			}
   4922  1.379   thorpej 			need_fault = true;
   4923  1.379   thorpej 			amap_copy(map, entry, 0, start, end);
   4924  1.379   thorpej 			/* XXXCDC: wait OK? */
   4925  1.379   thorpej 		}
   4926  1.379   thorpej 
   4927  1.379   thorpej 		/*
   4928  1.379   thorpej 		 * do a quick check to see if the fault has already
   4929  1.379   thorpej 		 * been resolved to the upper layer.
   4930  1.379   thorpej 		 */
   4931  1.379   thorpej 		if (__predict_true(entry->aref.ar_amap != NULL &&
   4932  1.379   thorpej 				   need_fault == false)) {
   4933  1.379   thorpej 			amap_lock(entry->aref.ar_amap, RW_WRITER);
   4934  1.379   thorpej 			anon = amap_lookup(&entry->aref, start - entry->start);
   4935  1.379   thorpej 			if (__predict_true(anon != NULL)) {
   4936  1.379   thorpej 				/* amap unlocked below */
   4937  1.379   thorpej 				goto found_anon;
   4938  1.379   thorpej 			}
   4939  1.379   thorpej 			amap_unlock(entry->aref.ar_amap);
   4940  1.379   thorpej 			need_fault = true;
   4941  1.379   thorpej 		}
   4942  1.379   thorpej 
   4943  1.379   thorpej 		/*
   4944  1.379   thorpej 		 * we predict this test as false because if we reach
   4945  1.379   thorpej 		 * this point, then we are likely dealing with a
   4946  1.379   thorpej 		 * shared memory region backed by a uvm_object, in
   4947  1.379   thorpej 		 * which case a fault to create the vm_anon is not
   4948  1.379   thorpej 		 * necessary.
   4949  1.379   thorpej 		 */
   4950  1.379   thorpej 		if (__predict_false(need_fault)) {
   4951  1.379   thorpej 			if (exclusive) {
   4952  1.379   thorpej 				vm_map_busy(map);
   4953  1.379   thorpej 				vm_map_unlock(map);
   4954  1.379   thorpej 				unlock_fn = vm_map_unbusy;
   4955  1.379   thorpej 			}
   4956  1.379   thorpej 
   4957  1.379   thorpej 			if (uvm_fault_wire(map, start, end,
   4958  1.379   thorpej 					   entry->max_protection, 1)) {
   4959  1.379   thorpej 				/* wiring failed */
   4960  1.379   thorpej 				unlock_fn(map);
   4961  1.379   thorpej 				UVMHIST_LOG(maphist,"<- done (wire failed)",
   4962  1.379   thorpej 					    0,0,0,0);
   4963  1.379   thorpej 				return false;
   4964  1.379   thorpej 			}
   4965  1.379   thorpej 
   4966  1.379   thorpej 			/*
   4967  1.379   thorpej 			 * now that we have resolved the fault, we can unwire
   4968  1.379   thorpej 			 * the page.
   4969  1.379   thorpej 			 */
   4970  1.379   thorpej 			if (exclusive) {
   4971  1.379   thorpej 				vm_map_lock(map);
   4972  1.379   thorpej 				vm_map_unbusy(map);
   4973  1.379   thorpej 				unlock_fn = vm_map_unlock;
   4974  1.379   thorpej 			}
   4975  1.379   thorpej 
   4976  1.379   thorpej 			uvm_fault_unwire_locked(map, start, end);
   4977  1.379   thorpej 		}
   4978  1.379   thorpej 	}
   4979  1.379   thorpej 
   4980  1.379   thorpej 	/* check the upper layer */
   4981  1.379   thorpej 	if (entry->aref.ar_amap) {
   4982  1.379   thorpej 		amap_lock(entry->aref.ar_amap, RW_WRITER);
   4983  1.379   thorpej 		anon = amap_lookup(&entry->aref, start - entry->start);
   4984  1.379   thorpej 		if (anon) {
   4985  1.379   thorpej  found_anon:		KASSERT(anon->an_lock == entry->aref.ar_amap->am_lock);
   4986  1.379   thorpej 			anon->an_ref++;
   4987  1.382   thorpej 			rw_obj_hold(anon->an_lock);
   4988  1.379   thorpej 			KASSERT(anon->an_ref != 0);
   4989  1.383   thorpej 			UVM_VOADDR_SET_ANON(voaddr, anon);
   4990  1.379   thorpej 			voaddr->offset = va & PAGE_MASK;
   4991  1.379   thorpej 			result = true;
   4992  1.379   thorpej 		}
   4993  1.379   thorpej 		amap_unlock(entry->aref.ar_amap);
   4994  1.379   thorpej 	}
   4995  1.379   thorpej 
   4996  1.379   thorpej 	/* check the lower layer */
   4997  1.379   thorpej 	if (!result && UVM_ET_ISOBJ(entry)) {
   4998  1.379   thorpej 		struct uvm_object *uobj = entry->object.uvm_obj;
   4999  1.379   thorpej 
   5000  1.379   thorpej 		KASSERT(uobj != NULL);
   5001  1.379   thorpej 		(*uobj->pgops->pgo_reference)(uobj);
   5002  1.383   thorpej 		UVM_VOADDR_SET_UOBJ(voaddr, uobj);
   5003  1.379   thorpej 		voaddr->offset = entry->offset + (va - entry->start);
   5004  1.379   thorpej 		result = true;
   5005  1.379   thorpej 	}
   5006  1.379   thorpej 
   5007  1.379   thorpej 	unlock_fn(map);
   5008  1.379   thorpej 
   5009  1.379   thorpej 	if (result) {
   5010  1.379   thorpej 		UVMHIST_LOG(maphist,
   5011  1.386     skrll 		    "<- done OK (type=%jd,owner=%#jx,offset=%#jx)",
   5012  1.383   thorpej 		    UVM_VOADDR_GET_TYPE(voaddr),
   5013  1.383   thorpej 		    UVM_VOADDR_GET_OBJECT(voaddr),
   5014  1.383   thorpej 		    voaddr->offset, 0);
   5015  1.379   thorpej 	} else {
   5016  1.379   thorpej 		UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0);
   5017  1.379   thorpej 	}
   5018  1.379   thorpej 
   5019  1.379   thorpej 	return result;
   5020  1.379   thorpej }
   5021  1.379   thorpej 
   5022  1.379   thorpej /*
   5023  1.379   thorpej  * uvm_voaddr_release: release the references held by the
   5024  1.379   thorpej  * vitual object address.
   5025  1.379   thorpej  */
   5026  1.379   thorpej void
   5027  1.379   thorpej uvm_voaddr_release(struct uvm_voaddr * const voaddr)
   5028  1.379   thorpej {
   5029  1.379   thorpej 
   5030  1.383   thorpej 	switch (UVM_VOADDR_GET_TYPE(voaddr)) {
   5031  1.383   thorpej 	case UVM_VOADDR_TYPE_UOBJ: {
   5032  1.383   thorpej 		struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr);
   5033  1.379   thorpej 
   5034  1.379   thorpej 		KASSERT(uobj != NULL);
   5035  1.379   thorpej 		KASSERT(uobj->pgops->pgo_detach != NULL);
   5036  1.379   thorpej 		(*uobj->pgops->pgo_detach)(uobj);
   5037  1.379   thorpej 		break;
   5038  1.379   thorpej 	    }
   5039  1.379   thorpej 	case UVM_VOADDR_TYPE_ANON: {
   5040  1.383   thorpej 		struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr);
   5041  1.382   thorpej 		krwlock_t *lock;
   5042  1.379   thorpej 
   5043  1.379   thorpej 		KASSERT(anon != NULL);
   5044  1.382   thorpej 		rw_enter((lock = anon->an_lock), RW_WRITER);
   5045  1.379   thorpej 	    	KASSERT(anon->an_ref > 0);
   5046  1.382   thorpej 		if (--anon->an_ref == 0) {
   5047  1.382   thorpej 			uvm_anfree(anon);
   5048  1.379   thorpej 		}
   5049  1.382   thorpej 		rw_exit(lock);
   5050  1.382   thorpej 		rw_obj_free(lock);
   5051  1.379   thorpej 	    	break;
   5052  1.379   thorpej 	    }
   5053  1.379   thorpej 	default:
   5054  1.379   thorpej 		panic("uvm_voaddr_release: bad type");
   5055  1.379   thorpej 	}
   5056  1.379   thorpej 	memset(voaddr, 0, sizeof(*voaddr));
   5057  1.379   thorpej }
   5058  1.379   thorpej 
   5059  1.379   thorpej /*
   5060  1.379   thorpej  * uvm_voaddr_compare: compare two uvm_voaddr objects.
   5061  1.379   thorpej  *
   5062  1.379   thorpej  * => memcmp() semantics
   5063  1.379   thorpej  */
   5064  1.379   thorpej int
   5065  1.379   thorpej uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1,
   5066  1.379   thorpej     const struct uvm_voaddr * const voaddr2)
   5067  1.379   thorpej {
   5068  1.383   thorpej 	const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1);
   5069  1.383   thorpej 	const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2);
   5070  1.379   thorpej 
   5071  1.383   thorpej 	KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ ||
   5072  1.383   thorpej 		type1 == UVM_VOADDR_TYPE_ANON);
   5073  1.379   thorpej 
   5074  1.383   thorpej 	KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ ||
   5075  1.383   thorpej 		type2 == UVM_VOADDR_TYPE_ANON);
   5076  1.380  riastrad 
   5077  1.383   thorpej 	if (type1 < type2)
   5078  1.379   thorpej 		return -1;
   5079  1.383   thorpej 	if (type1 > type2)
   5080  1.379   thorpej 		return 1;
   5081  1.380  riastrad 
   5082  1.383   thorpej 	const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1);
   5083  1.383   thorpej 	const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2);
   5084  1.379   thorpej 
   5085  1.379   thorpej 	if (addr1 < addr2)
   5086  1.379   thorpej 		return -1;
   5087  1.379   thorpej 	if (addr1 > addr2)
   5088  1.379   thorpej 		return 1;
   5089  1.380  riastrad 
   5090  1.379   thorpej 	if (voaddr1->offset < voaddr2->offset)
   5091  1.379   thorpej 		return -1;
   5092  1.379   thorpej 	if (voaddr1->offset > voaddr2->offset)
   5093  1.379   thorpej 		return 1;
   5094  1.380  riastrad 
   5095  1.379   thorpej 	return 0;
   5096  1.379   thorpej }
   5097  1.379   thorpej 
   5098  1.270     pooka #if defined(DDB) || defined(DEBUGPRINT)
   5099  1.280   thorpej 
   5100  1.280   thorpej /*
   5101  1.280   thorpej  * uvm_map_printit: actually prints the map
   5102  1.280   thorpej  */
   5103  1.280   thorpej 
   5104  1.280   thorpej void
   5105  1.280   thorpej uvm_map_printit(struct vm_map *map, bool full,
   5106  1.280   thorpej     void (*pr)(const char *, ...))
   5107  1.280   thorpej {
   5108  1.280   thorpej 	struct vm_map_entry *entry;
   5109  1.280   thorpej 
   5110  1.334      matt 	(*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map),
   5111  1.280   thorpej 	    vm_map_max(map));
   5112  1.334      matt 	(*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n",
   5113  1.280   thorpej 	    map->nentries, map->size, map->ref_count, map->timestamp,
   5114  1.280   thorpej 	    map->flags);
   5115  1.280   thorpej 	(*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
   5116  1.280   thorpej 	    pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
   5117  1.280   thorpej 	if (!full)
   5118  1.280   thorpej 		return;
   5119  1.397  riastrad 	(*pr)("\tmin=%"PRIxVADDR", max=%"PRIxVADDR"\n",
   5120  1.397  riastrad 	    vm_map_min(map), vm_map_max(map));
   5121  1.280   thorpej 	for (entry = map->header.next; entry != &map->header;
   5122  1.280   thorpej 	    entry = entry->next) {
   5123  1.334      matt 		(*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n",
   5124  1.280   thorpej 		    entry, entry->start, entry->end, entry->object.uvm_obj,
   5125  1.280   thorpej 		    (long long)entry->offset, entry->aref.ar_amap,
   5126  1.280   thorpej 		    entry->aref.ar_pageoff);
   5127  1.280   thorpej 		(*pr)(
   5128  1.280   thorpej 		    "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
   5129  1.397  riastrad 		    "wc=%d, adv=%d%s\n",
   5130  1.280   thorpej 		    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
   5131  1.280   thorpej 		    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
   5132  1.280   thorpej 		    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
   5133  1.280   thorpej 		    entry->protection, entry->max_protection,
   5134  1.397  riastrad 		    entry->inheritance, entry->wired_count, entry->advice,
   5135  1.397  riastrad 		    entry == map->first_free ? " (first_free)" : "");
   5136  1.280   thorpej 	}
   5137  1.280   thorpej }
   5138  1.280   thorpej 
   5139  1.247      yamt void
   5140  1.247      yamt uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   5141  1.247      yamt {
   5142  1.247      yamt 	struct vm_map *map;
   5143  1.247      yamt 
   5144  1.247      yamt 	for (map = kernel_map;;) {
   5145  1.247      yamt 		struct vm_map_entry *entry;
   5146  1.247      yamt 
   5147  1.247      yamt 		if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
   5148  1.247      yamt 			break;
   5149  1.247      yamt 		}
   5150  1.247      yamt 		(*pr)("%p is %p+%zu from VMMAP %p\n",
   5151  1.247      yamt 		    (void *)addr, (void *)entry->start,
   5152  1.247      yamt 		    (size_t)(addr - (uintptr_t)entry->start), map);
   5153  1.247      yamt 		if (!UVM_ET_ISSUBMAP(entry)) {
   5154  1.247      yamt 			break;
   5155  1.247      yamt 		}
   5156  1.247      yamt 		map = entry->object.sub_map;
   5157  1.247      yamt 	}
   5158  1.247      yamt }
   5159  1.280   thorpej 
   5160  1.280   thorpej #endif /* DDB || DEBUGPRINT */
   5161  1.288  drochner 
   5162  1.288  drochner #ifndef __USER_VA0_IS_SAFE
   5163  1.288  drochner static int
   5164  1.290  drochner sysctl_user_va0_disable(SYSCTLFN_ARGS)
   5165  1.288  drochner {
   5166  1.288  drochner 	struct sysctlnode node;
   5167  1.288  drochner 	int t, error;
   5168  1.288  drochner 
   5169  1.288  drochner 	node = *rnode;
   5170  1.288  drochner 	node.sysctl_data = &t;
   5171  1.290  drochner 	t = user_va0_disable;
   5172  1.288  drochner 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   5173  1.288  drochner 	if (error || newp == NULL)
   5174  1.288  drochner 		return (error);
   5175  1.288  drochner 
   5176  1.290  drochner 	if (!t && user_va0_disable &&
   5177  1.316      elad 	    kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MAP_VA_ZERO, 0,
   5178  1.316      elad 	    NULL, NULL, NULL))
   5179  1.288  drochner 		return EPERM;
   5180  1.288  drochner 
   5181  1.290  drochner 	user_va0_disable = !!t;
   5182  1.288  drochner 	return 0;
   5183  1.288  drochner }
   5184  1.335  christos #endif
   5185  1.335  christos 
   5186  1.335  christos static int
   5187  1.335  christos fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve,
   5188  1.335  christos     struct vm_map *m, struct vm_map_entry *e)
   5189  1.335  christos {
   5190  1.335  christos #ifndef _RUMPKERNEL
   5191  1.335  christos 	int error;
   5192  1.335  christos 
   5193  1.335  christos 	memset(kve, 0, sizeof(*kve));
   5194  1.335  christos 	KASSERT(e != NULL);
   5195  1.335  christos 	if (UVM_ET_ISOBJ(e)) {
   5196  1.335  christos 		struct uvm_object *uobj = e->object.uvm_obj;
   5197  1.335  christos 		KASSERT(uobj != NULL);
   5198  1.335  christos 		kve->kve_ref_count = uobj->uo_refs;
   5199  1.335  christos 		kve->kve_count = uobj->uo_npages;
   5200  1.335  christos 		if (UVM_OBJ_IS_VNODE(uobj)) {
   5201  1.335  christos 			struct vattr va;
   5202  1.335  christos 			struct vnode *vp = (struct vnode *)uobj;
   5203  1.335  christos 			vn_lock(vp, LK_SHARED | LK_RETRY);
   5204  1.335  christos 			error = VOP_GETATTR(vp, &va, l->l_cred);
   5205  1.335  christos 			VOP_UNLOCK(vp);
   5206  1.335  christos 			kve->kve_type = KVME_TYPE_VNODE;
   5207  1.335  christos 			if (error == 0) {
   5208  1.335  christos 				kve->kve_vn_size = vp->v_size;
   5209  1.335  christos 				kve->kve_vn_type = (int)vp->v_type;
   5210  1.335  christos 				kve->kve_vn_mode = va.va_mode;
   5211  1.335  christos 				kve->kve_vn_rdev = va.va_rdev;
   5212  1.335  christos 				kve->kve_vn_fileid = va.va_fileid;
   5213  1.335  christos 				kve->kve_vn_fsid = va.va_fsid;
   5214  1.335  christos 				error = vnode_to_path(kve->kve_path,
   5215  1.335  christos 				    sizeof(kve->kve_path) / 2, vp, l, p);
   5216  1.335  christos 			}
   5217  1.335  christos 		} else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
   5218  1.335  christos 			kve->kve_type = KVME_TYPE_KERN;
   5219  1.335  christos 		} else if (UVM_OBJ_IS_DEVICE(uobj)) {
   5220  1.335  christos 			kve->kve_type = KVME_TYPE_DEVICE;
   5221  1.335  christos 		} else if (UVM_OBJ_IS_AOBJ(uobj)) {
   5222  1.335  christos 			kve->kve_type = KVME_TYPE_ANON;
   5223  1.335  christos 		} else {
   5224  1.335  christos 			kve->kve_type = KVME_TYPE_OBJECT;
   5225  1.335  christos 		}
   5226  1.335  christos 	} else if (UVM_ET_ISSUBMAP(e)) {
   5227  1.335  christos 		struct vm_map *map = e->object.sub_map;
   5228  1.335  christos 		KASSERT(map != NULL);
   5229  1.335  christos 		kve->kve_ref_count = map->ref_count;
   5230  1.335  christos 		kve->kve_count = map->nentries;
   5231  1.335  christos 		kve->kve_type = KVME_TYPE_SUBMAP;
   5232  1.335  christos 	} else
   5233  1.335  christos 		kve->kve_type = KVME_TYPE_UNKNOWN;
   5234  1.335  christos 
   5235  1.335  christos 	kve->kve_start = e->start;
   5236  1.335  christos 	kve->kve_end = e->end;
   5237  1.335  christos 	kve->kve_offset = e->offset;
   5238  1.335  christos 	kve->kve_wired_count = e->wired_count;
   5239  1.335  christos 	kve->kve_inheritance = e->inheritance;
   5240  1.363  riastrad 	kve->kve_attributes = 0; /* unused */
   5241  1.335  christos 	kve->kve_advice = e->advice;
   5242  1.335  christos #define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \
   5243  1.335  christos 	(((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \
   5244  1.335  christos 	(((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0)
   5245  1.335  christos 	kve->kve_protection = PROT(e->protection);
   5246  1.335  christos 	kve->kve_max_protection = PROT(e->max_protection);
   5247  1.335  christos 	kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE)
   5248  1.335  christos 	    ? KVME_FLAG_COW : 0;
   5249  1.335  christos 	kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY)
   5250  1.335  christos 	    ? KVME_FLAG_NEEDS_COPY : 0;
   5251  1.335  christos 	kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN)
   5252  1.335  christos 	    ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP;
   5253  1.335  christos 	kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE)
   5254  1.335  christos 	    ? KVME_FLAG_PAGEABLE : 0;
   5255  1.335  christos #endif
   5256  1.335  christos 	return 0;
   5257  1.335  christos }
   5258  1.335  christos 
   5259  1.335  christos static int
   5260  1.335  christos fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp,
   5261  1.335  christos     size_t *oldlenp)
   5262  1.335  christos {
   5263  1.335  christos 	int error;
   5264  1.335  christos 	struct proc *p;
   5265  1.338  christos 	struct kinfo_vmentry *vme;
   5266  1.335  christos 	struct vmspace *vm;
   5267  1.335  christos 	struct vm_map *map;
   5268  1.335  christos 	struct vm_map_entry *entry;
   5269  1.335  christos 	char *dp;
   5270  1.338  christos 	size_t count, vmesize;
   5271  1.335  christos 
   5272  1.343  christos 	if (elem_size == 0 || elem_size > 2 * sizeof(*vme))
   5273  1.343  christos 		return EINVAL;
   5274  1.343  christos 
   5275  1.343  christos 	if (oldp) {
   5276  1.357     kamil 		if (*oldlenp > 10UL * 1024UL * 1024UL)
   5277  1.343  christos 			return E2BIG;
   5278  1.343  christos 		count = *oldlenp / elem_size;
   5279  1.343  christos 		if (count == 0)
   5280  1.343  christos 			return ENOMEM;
   5281  1.343  christos 		vmesize = count * sizeof(*vme);
   5282  1.343  christos 	} else
   5283  1.343  christos 		vmesize = 0;
   5284  1.335  christos 
   5285  1.335  christos 	if ((error = proc_find_locked(l, &p, pid)) != 0)
   5286  1.335  christos 		return error;
   5287  1.335  christos 
   5288  1.343  christos 	vme = NULL;
   5289  1.343  christos 	count = 0;
   5290  1.343  christos 
   5291  1.335  christos 	if ((error = proc_vmspace_getref(p, &vm)) != 0)
   5292  1.335  christos 		goto out;
   5293  1.335  christos 
   5294  1.335  christos 	map = &vm->vm_map;
   5295  1.335  christos 	vm_map_lock_read(map);
   5296  1.335  christos 
   5297  1.335  christos 	dp = oldp;
   5298  1.338  christos 	if (oldp)
   5299  1.338  christos 		vme = kmem_alloc(vmesize, KM_SLEEP);
   5300  1.335  christos 	for (entry = map->header.next; entry != &map->header;
   5301  1.335  christos 	    entry = entry->next) {
   5302  1.352  pgoyette 		if (oldp && (dp - (char *)oldp) < vmesize) {
   5303  1.338  christos 			error = fill_vmentry(l, p, &vme[count], map, entry);
   5304  1.335  christos 			if (error)
   5305  1.338  christos 				goto out;
   5306  1.335  christos 			dp += elem_size;
   5307  1.335  christos 		}
   5308  1.335  christos 		count++;
   5309  1.335  christos 	}
   5310  1.335  christos 	vm_map_unlock_read(map);
   5311  1.335  christos 	uvmspace_free(vm);
   5312  1.338  christos 
   5313  1.335  christos out:
   5314  1.335  christos 	if (pid != -1)
   5315  1.335  christos 		mutex_exit(p->p_lock);
   5316  1.335  christos 	if (error == 0) {
   5317  1.355  riastrad 		const u_int esize = uimin(sizeof(*vme), elem_size);
   5318  1.338  christos 		dp = oldp;
   5319  1.338  christos 		for (size_t i = 0; i < count; i++) {
   5320  1.352  pgoyette 			if (oldp && (dp - (char *)oldp) < vmesize) {
   5321  1.338  christos 				error = sysctl_copyout(l, &vme[i], dp, esize);
   5322  1.338  christos 				if (error)
   5323  1.338  christos 					break;
   5324  1.338  christos 				dp += elem_size;
   5325  1.338  christos 			} else
   5326  1.338  christos 				break;
   5327  1.338  christos 		}
   5328  1.335  christos 		count *= elem_size;
   5329  1.335  christos 		if (oldp != NULL && *oldlenp < count)
   5330  1.335  christos 			error = ENOSPC;
   5331  1.335  christos 		*oldlenp = count;
   5332  1.335  christos 	}
   5333  1.338  christos 	if (vme)
   5334  1.338  christos 		kmem_free(vme, vmesize);
   5335  1.335  christos 	return error;
   5336  1.335  christos }
   5337  1.335  christos 
   5338  1.335  christos static int
   5339  1.335  christos sysctl_vmproc(SYSCTLFN_ARGS)
   5340  1.335  christos {
   5341  1.335  christos 	int error;
   5342  1.335  christos 
   5343  1.335  christos 	if (namelen == 1 && name[0] == CTL_QUERY)
   5344  1.335  christos 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
   5345  1.335  christos 
   5346  1.335  christos 	if (namelen == 0)
   5347  1.335  christos 		return EINVAL;
   5348  1.335  christos 
   5349  1.335  christos 	switch (name[0]) {
   5350  1.335  christos 	case VM_PROC_MAP:
   5351  1.335  christos 		if (namelen != 3)
   5352  1.335  christos 			return EINVAL;
   5353  1.335  christos 		sysctl_unlock();
   5354  1.343  christos 		error = fill_vmentries(l, name[1], name[2], oldp, oldlenp);
   5355  1.335  christos 		sysctl_relock();
   5356  1.335  christos 		return error;
   5357  1.335  christos 	default:
   5358  1.335  christos 		return EINVAL;
   5359  1.335  christos 	}
   5360  1.335  christos }
   5361  1.288  drochner 
   5362  1.288  drochner SYSCTL_SETUP(sysctl_uvmmap_setup, "sysctl uvmmap setup")
   5363  1.288  drochner {
   5364  1.288  drochner 
   5365  1.335  christos 	sysctl_createv(clog, 0, NULL, NULL,
   5366  1.335  christos 		       CTLFLAG_PERMANENT,
   5367  1.335  christos 		       CTLTYPE_STRUCT, "proc",
   5368  1.335  christos 		       SYSCTL_DESCR("Process vm information"),
   5369  1.335  christos 		       sysctl_vmproc, 0, NULL, 0,
   5370  1.335  christos 		       CTL_VM, VM_PROC, CTL_EOL);
   5371  1.335  christos #ifndef __USER_VA0_IS_SAFE
   5372  1.288  drochner         sysctl_createv(clog, 0, NULL, NULL,
   5373  1.288  drochner                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   5374  1.289  drochner                        CTLTYPE_INT, "user_va0_disable",
   5375  1.288  drochner                        SYSCTL_DESCR("Disable VA 0"),
   5376  1.290  drochner                        sysctl_user_va0_disable, 0, &user_va0_disable, 0,
   5377  1.288  drochner                        CTL_VM, CTL_CREATE, CTL_EOL);
   5378  1.335  christos #endif
   5379  1.288  drochner }
   5380