Home | History | Annotate | Line # | Download | only in uvm
      1  1.237    andvar /*	$NetBSD: uvm_fault.c,v 1.237 2024/03/15 07:09:37 andvar Exp $	*/
      2    1.1       mrg 
      3    1.1       mrg /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5    1.1       mrg  * All rights reserved.
      6    1.1       mrg  *
      7    1.1       mrg  * Redistribution and use in source and binary forms, with or without
      8    1.1       mrg  * modification, are permitted provided that the following conditions
      9    1.1       mrg  * are met:
     10    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     11    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     12    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     13    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     14    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     15    1.1       mrg  *
     16    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17    1.1       mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18    1.1       mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19    1.1       mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20    1.1       mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21    1.1       mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22    1.1       mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23    1.1       mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24    1.1       mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25    1.1       mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26    1.4       mrg  *
     27    1.4       mrg  * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
     28    1.1       mrg  */
     29    1.1       mrg 
     30    1.1       mrg /*
     31    1.1       mrg  * uvm_fault.c: fault handler
     32    1.1       mrg  */
     33   1.71     lukem 
     34   1.71     lukem #include <sys/cdefs.h>
     35  1.237    andvar __KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.237 2024/03/15 07:09:37 andvar Exp $");
     36   1.71     lukem 
     37   1.71     lukem #include "opt_uvmhist.h"
     38    1.1       mrg 
     39    1.1       mrg #include <sys/param.h>
     40    1.1       mrg #include <sys/systm.h>
     41  1.210    martin #include <sys/atomic.h>
     42    1.1       mrg #include <sys/kernel.h>
     43    1.1       mrg #include <sys/mman.h>
     44    1.1       mrg 
     45    1.1       mrg #include <uvm/uvm.h>
     46  1.227        ad #include <uvm/uvm_pdpolicy.h>
     47  1.233  riastrad #include <uvm/uvm_rndsource.h>
     48    1.1       mrg 
     49    1.1       mrg /*
     50    1.1       mrg  *
     51    1.1       mrg  * a word on page faults:
     52    1.1       mrg  *
     53    1.1       mrg  * types of page faults we handle:
     54    1.1       mrg  *
     55    1.1       mrg  * CASE 1: upper layer faults                   CASE 2: lower layer faults
     56    1.1       mrg  *
     57    1.1       mrg  *    CASE 1A         CASE 1B                  CASE 2A        CASE 2B
     58    1.1       mrg  *    read/write1     write>1                  read/write   +-cow_write/zero
     59   1.63       chs  *         |             |                         |        |
     60    1.1       mrg  *      +--|--+       +--|--+     +-----+       +  |  +     | +-----+
     61  1.127  uebayasi  * amap |  V  |       |  ---------> new |          |        | |  ^  |
     62    1.1       mrg  *      +-----+       +-----+     +-----+       +  |  +     | +--|--+
     63    1.1       mrg  *                                                 |        |    |
     64    1.1       mrg  *      +-----+       +-----+                   +--|--+     | +--|--+
     65  1.127  uebayasi  * uobj | d/c |       | d/c |                   |  V  |     +----+  |
     66    1.1       mrg  *      +-----+       +-----+                   +-----+       +-----+
     67    1.1       mrg  *
     68    1.1       mrg  * d/c = don't care
     69   1.63       chs  *
     70    1.1       mrg  *   case [0]: layerless fault
     71    1.1       mrg  *	no amap or uobj is present.   this is an error.
     72    1.1       mrg  *
     73    1.1       mrg  *   case [1]: upper layer fault [anon active]
     74    1.1       mrg  *     1A: [read] or [write with anon->an_ref == 1]
     75  1.127  uebayasi  *		I/O takes place in upper level anon and uobj is not touched.
     76    1.1       mrg  *     1B: [write with anon->an_ref > 1]
     77    1.1       mrg  *		new anon is alloc'd and data is copied off ["COW"]
     78    1.1       mrg  *
     79    1.1       mrg  *   case [2]: lower layer fault [uobj]
     80    1.1       mrg  *     2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
     81    1.1       mrg  *		I/O takes place directly in object.
     82    1.1       mrg  *     2B: [write to copy_on_write] or [read on NULL uobj]
     83   1.63       chs  *		data is "promoted" from uobj to a new anon.
     84    1.1       mrg  *		if uobj is null, then we zero fill.
     85    1.1       mrg  *
     86    1.1       mrg  * we follow the standard UVM locking protocol ordering:
     87    1.1       mrg  *
     88   1.63       chs  * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
     89    1.1       mrg  * we hold a PG_BUSY page if we unlock for I/O
     90    1.1       mrg  *
     91    1.1       mrg  *
     92    1.1       mrg  * the code is structured as follows:
     93   1.63       chs  *
     94    1.1       mrg  *     - init the "IN" params in the ufi structure
     95  1.177      yamt  *   ReFault: (ERESTART returned to the loop in uvm_fault_internal)
     96    1.1       mrg  *     - do lookups [locks maps], check protection, handle needs_copy
     97    1.1       mrg  *     - check for case 0 fault (error)
     98    1.1       mrg  *     - establish "range" of fault
     99    1.1       mrg  *     - if we have an amap lock it and extract the anons
    100    1.1       mrg  *     - if sequential advice deactivate pages behind us
    101    1.1       mrg  *     - at the same time check pmap for unmapped areas and anon for pages
    102    1.1       mrg  *	 that we could map in (and do map it if found)
    103    1.1       mrg  *     - check object for resident pages that we could map in
    104    1.1       mrg  *     - if (case 2) goto Case2
    105    1.1       mrg  *     - >>> handle case 1
    106    1.1       mrg  *           - ensure source anon is resident in RAM
    107    1.1       mrg  *           - if case 1B alloc new anon and copy from source
    108    1.1       mrg  *           - map the correct page in
    109    1.1       mrg  *   Case2:
    110    1.1       mrg  *     - >>> handle case 2
    111    1.1       mrg  *           - ensure source page is resident (if uobj)
    112    1.1       mrg  *           - if case 2B alloc new anon and copy from source (could be zero
    113    1.1       mrg  *		fill if uobj == NULL)
    114    1.1       mrg  *           - map the correct page in
    115    1.1       mrg  *     - done!
    116    1.1       mrg  *
    117    1.1       mrg  * note on paging:
    118    1.1       mrg  *   if we have to do I/O we place a PG_BUSY page in the correct object,
    119    1.1       mrg  * unlock everything, and do the I/O.   when I/O is done we must reverify
    120    1.1       mrg  * the state of the world before assuming that our data structures are
    121    1.1       mrg  * valid.   [because mappings could change while the map is unlocked]
    122    1.1       mrg  *
    123    1.1       mrg  *  alternative 1: unbusy the page in question and restart the page fault
    124    1.1       mrg  *    from the top (ReFault).   this is easy but does not take advantage
    125   1.63       chs  *    of the information that we already have from our previous lookup,
    126    1.1       mrg  *    although it is possible that the "hints" in the vm_map will help here.
    127    1.1       mrg  *
    128    1.1       mrg  * alternative 2: the system already keeps track of a "version" number of
    129    1.1       mrg  *    a map.   [i.e. every time you write-lock a map (e.g. to change a
    130    1.1       mrg  *    mapping) you bump the version number up by one...]   so, we can save
    131    1.1       mrg  *    the version number of the map before we release the lock and start I/O.
    132    1.1       mrg  *    then when I/O is done we can relock and check the version numbers
    133    1.1       mrg  *    to see if anything changed.    this might save us some over 1 because
    134    1.1       mrg  *    we don't have to unbusy the page and may be less compares(?).
    135    1.1       mrg  *
    136    1.1       mrg  * alternative 3: put in backpointers or a way to "hold" part of a map
    137    1.1       mrg  *    in place while I/O is in progress.   this could be complex to
    138    1.1       mrg  *    implement (especially with structures like amap that can be referenced
    139    1.1       mrg  *    by multiple map entries, and figuring out what should wait could be
    140    1.1       mrg  *    complex as well...).
    141    1.1       mrg  *
    142  1.125        ad  * we use alternative 2.  given that we are multi-threaded now we may want
    143  1.125        ad  * to reconsider the choice.
    144    1.1       mrg  */
    145    1.1       mrg 
    146    1.1       mrg /*
    147    1.1       mrg  * local data structures
    148    1.1       mrg  */
    149    1.1       mrg 
    150    1.1       mrg struct uvm_advice {
    151    1.7       mrg 	int advice;
    152    1.7       mrg 	int nback;
    153    1.7       mrg 	int nforw;
    154    1.1       mrg };
    155    1.1       mrg 
    156    1.1       mrg /*
    157    1.1       mrg  * page range array:
    158   1.63       chs  * note: index in array must match "advice" value
    159    1.1       mrg  * XXX: borrowed numbers from freebsd.   do they work well for us?
    160    1.1       mrg  */
    161    1.1       mrg 
    162   1.95   thorpej static const struct uvm_advice uvmadvice[] = {
    163  1.186     rmind 	{ UVM_ADV_NORMAL, 3, 4 },
    164  1.186     rmind 	{ UVM_ADV_RANDOM, 0, 0 },
    165  1.186     rmind 	{ UVM_ADV_SEQUENTIAL, 8, 7},
    166    1.1       mrg };
    167    1.1       mrg 
    168   1.69       chs #define UVM_MAXRANGE 16	/* must be MAX() of nback+nforw+1 */
    169    1.1       mrg 
    170    1.1       mrg /*
    171    1.1       mrg  * private prototypes
    172    1.1       mrg  */
    173    1.1       mrg 
    174    1.1       mrg /*
    175    1.1       mrg  * inline functions
    176    1.1       mrg  */
    177    1.1       mrg 
    178    1.1       mrg /*
    179    1.1       mrg  * uvmfault_anonflush: try and deactivate pages in specified anons
    180    1.1       mrg  *
    181    1.1       mrg  * => does not have to deactivate page if it is busy
    182    1.1       mrg  */
    183    1.1       mrg 
    184  1.103     perry static inline void
    185   1.95   thorpej uvmfault_anonflush(struct vm_anon **anons, int n)
    186    1.1       mrg {
    187    1.7       mrg 	int lcv;
    188    1.7       mrg 	struct vm_page *pg;
    189   1.63       chs 
    190  1.163  uebayasi 	for (lcv = 0; lcv < n; lcv++) {
    191    1.7       mrg 		if (anons[lcv] == NULL)
    192    1.7       mrg 			continue;
    193  1.222        ad 		KASSERT(rw_lock_held(anons[lcv]->an_lock));
    194   1.94      yamt 		pg = anons[lcv]->an_page;
    195  1.117      yamt 		if (pg && (pg->flags & PG_BUSY) == 0) {
    196  1.214        ad 			uvm_pagelock(pg);
    197  1.212        ad 			uvm_pagedeactivate(pg);
    198  1.214        ad 			uvm_pageunlock(pg);
    199    1.7       mrg 		}
    200    1.7       mrg 	}
    201    1.1       mrg }
    202    1.1       mrg 
    203    1.1       mrg /*
    204    1.1       mrg  * normal functions
    205    1.1       mrg  */
    206    1.1       mrg 
    207    1.1       mrg /*
    208    1.1       mrg  * uvmfault_amapcopy: clear "needs_copy" in a map.
    209    1.1       mrg  *
    210    1.1       mrg  * => called with VM data structures unlocked (usually, see below)
    211    1.1       mrg  * => we get a write lock on the maps and clear needs_copy for a VA
    212    1.1       mrg  * => if we are out of RAM we sleep (waiting for more)
    213    1.1       mrg  */
    214    1.1       mrg 
    215    1.7       mrg static void
    216   1.95   thorpej uvmfault_amapcopy(struct uvm_faultinfo *ufi)
    217    1.1       mrg {
    218   1.69       chs 	for (;;) {
    219    1.1       mrg 
    220    1.7       mrg 		/*
    221    1.7       mrg 		 * no mapping?  give up.
    222    1.7       mrg 		 */
    223    1.1       mrg 
    224  1.119   thorpej 		if (uvmfault_lookup(ufi, true) == false)
    225    1.7       mrg 			return;
    226    1.1       mrg 
    227    1.7       mrg 		/*
    228    1.7       mrg 		 * copy if needed.
    229    1.7       mrg 		 */
    230    1.1       mrg 
    231    1.7       mrg 		if (UVM_ET_ISNEEDSCOPY(ufi->entry))
    232  1.108      yamt 			amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT,
    233   1.13     chuck 				ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
    234    1.1       mrg 
    235    1.7       mrg 		/*
    236    1.7       mrg 		 * didn't work?  must be out of RAM.   unlock and sleep.
    237    1.7       mrg 		 */
    238    1.7       mrg 
    239    1.7       mrg 		if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
    240  1.119   thorpej 			uvmfault_unlockmaps(ufi, true);
    241    1.7       mrg 			uvm_wait("fltamapcopy");
    242    1.7       mrg 			continue;
    243    1.7       mrg 		}
    244    1.7       mrg 
    245    1.7       mrg 		/*
    246    1.7       mrg 		 * got it!   unlock and return.
    247    1.7       mrg 		 */
    248   1.63       chs 
    249  1.119   thorpej 		uvmfault_unlockmaps(ufi, true);
    250    1.7       mrg 		return;
    251    1.7       mrg 	}
    252    1.7       mrg 	/*NOTREACHED*/
    253    1.1       mrg }
    254    1.1       mrg 
    255    1.1       mrg /*
    256    1.1       mrg  * uvmfault_anonget: get data in an anon into a non-busy, non-released
    257    1.1       mrg  * page in that anon.
    258    1.1       mrg  *
    259  1.187     rmind  * => Map, amap and thus anon should be locked by caller.
    260  1.187     rmind  * => If we fail, we unlock everything and error is returned.
    261  1.187     rmind  * => If we are successful, return with everything still locked.
    262  1.187     rmind  * => We do not move the page on the queues [gets moved later].  If we
    263  1.187     rmind  *    allocate a new page [we_own], it gets put on the queues.  Either way,
    264  1.187     rmind  *    the result is that the page is on the queues at return time
    265  1.187     rmind  * => For pages which are on loan from a uvm_object (and thus are not owned
    266  1.187     rmind  *    by the anon): if successful, return with the owning object locked.
    267  1.187     rmind  *    The caller must unlock this object when it unlocks everything else.
    268    1.1       mrg  */
    269    1.1       mrg 
    270   1.47       chs int
    271   1.95   thorpej uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
    272   1.95   thorpej     struct vm_anon *anon)
    273    1.7       mrg {
    274    1.7       mrg 	struct vm_page *pg;
    275  1.222        ad 	krw_t lock_type;
    276  1.237    andvar 	int error __unused; /* used for VMSWAP */
    277  1.187     rmind 
    278  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    279  1.222        ad 	KASSERT(rw_lock_held(anon->an_lock));
    280  1.188     rmind 	KASSERT(anon->an_lock == amap->am_lock);
    281   1.53   thorpej 
    282  1.187     rmind 	/* Increment the counters.*/
    283  1.213        ad 	cpu_count(CPU_COUNT_FLTANGET, 1);
    284  1.187     rmind 	if (anon->an_page) {
    285  1.124        ad 		curlwp->l_ru.ru_minflt++;
    286  1.187     rmind 	} else {
    287  1.124        ad 		curlwp->l_ru.ru_majflt++;
    288  1.187     rmind 	}
    289  1.187     rmind 	error = 0;
    290    1.7       mrg 
    291   1.63       chs 	/*
    292  1.187     rmind 	 * Loop until we get the anon data, or fail.
    293    1.7       mrg 	 */
    294    1.7       mrg 
    295   1.69       chs 	for (;;) {
    296  1.187     rmind 		bool we_own, locked;
    297  1.187     rmind 		/*
    298  1.187     rmind 		 * Note: 'we_own' will become true if we set PG_BUSY on a page.
    299  1.187     rmind 		 */
    300  1.187     rmind 		we_own = false;
    301   1.94      yamt 		pg = anon->an_page;
    302    1.1       mrg 
    303    1.7       mrg 		/*
    304  1.187     rmind 		 * If there is a resident page and it is loaned, then anon
    305  1.187     rmind 		 * may not own it.  Call out to uvm_anon_lockloanpg() to
    306  1.187     rmind 		 * identify and lock the real owner of the page.
    307    1.7       mrg 		 */
    308    1.7       mrg 
    309    1.7       mrg 		if (pg && pg->loan_count)
    310   1.13     chuck 			pg = uvm_anon_lockloanpg(anon);
    311    1.7       mrg 
    312    1.7       mrg 		/*
    313  1.187     rmind 		 * Is page resident?  Make sure it is not busy/released.
    314    1.7       mrg 		 */
    315    1.7       mrg 
    316  1.222        ad 		lock_type = rw_lock_op(anon->an_lock);
    317    1.7       mrg 		if (pg) {
    318    1.7       mrg 
    319    1.7       mrg 			/*
    320    1.7       mrg 			 * at this point, if the page has a uobject [meaning
    321    1.7       mrg 			 * we have it on loan], then that uobject is locked
    322    1.7       mrg 			 * by us!   if the page is busy, we drop all the
    323    1.7       mrg 			 * locks (including uobject) and try again.
    324    1.7       mrg 			 */
    325    1.7       mrg 
    326   1.69       chs 			if ((pg->flags & PG_BUSY) == 0) {
    327    1.7       mrg 				UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
    328  1.187     rmind 				return 0;
    329    1.7       mrg 			}
    330  1.213        ad 			cpu_count(CPU_COUNT_FLTPGWAIT, 1);
    331    1.7       mrg 
    332    1.7       mrg 			/*
    333  1.187     rmind 			 * The last unlock must be an atomic unlock and wait
    334  1.187     rmind 			 * on the owner of page.
    335    1.7       mrg 			 */
    336   1.69       chs 
    337  1.187     rmind 			if (pg->uobject) {
    338  1.187     rmind 				/* Owner of page is UVM object. */
    339  1.186     rmind 				uvmfault_unlockall(ufi, amap, NULL);
    340    1.7       mrg 				UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
    341    1.7       mrg 				    0,0,0);
    342  1.218        ad 				uvm_pagewait(pg, pg->uobject->vmobjlock, "anonget1");
    343    1.7       mrg 			} else {
    344  1.187     rmind 				/* Owner of page is anon. */
    345  1.186     rmind 				uvmfault_unlockall(ufi, NULL, NULL);
    346    1.7       mrg 				UVMHIST_LOG(maphist, " unlock+wait on anon",0,
    347    1.7       mrg 				    0,0,0);
    348  1.218        ad 				uvm_pagewait(pg, anon->an_lock, "anonget2");
    349    1.7       mrg 			}
    350    1.7       mrg 		} else {
    351  1.101      yamt #if defined(VMSWAP)
    352    1.7       mrg 			/*
    353  1.222        ad 			 * No page, therefore allocate one.  A write lock is
    354  1.222        ad 			 * required for this.  If the caller didn't supply
    355  1.222        ad 			 * one, fail now and have them retry.
    356    1.7       mrg 			 */
    357   1.69       chs 
    358  1.222        ad 			if (lock_type == RW_READER) {
    359  1.222        ad 				return ENOLCK;
    360  1.222        ad 			}
    361  1.180     enami 			pg = uvm_pagealloc(NULL,
    362  1.180     enami 			    ufi != NULL ? ufi->orig_rvaddr : 0,
    363  1.185   tsutsui 			    anon, ufi != NULL ? UVM_FLAG_COLORMATCH : 0);
    364  1.187     rmind 			if (pg == NULL) {
    365  1.187     rmind 				/* Out of memory.  Wait a little. */
    366  1.186     rmind 				uvmfault_unlockall(ufi, amap, NULL);
    367  1.213        ad 				cpu_count(CPU_COUNT_FLTNORAM, 1);
    368    1.7       mrg 				UVMHIST_LOG(maphist, "  noram -- UVM_WAIT",0,
    369    1.7       mrg 				    0,0,0);
    370   1.93      yamt 				if (!uvm_reclaimable()) {
    371   1.93      yamt 					return ENOMEM;
    372   1.93      yamt 				}
    373    1.7       mrg 				uvm_wait("flt_noram1");
    374    1.7       mrg 			} else {
    375  1.187     rmind 				/* PG_BUSY bit is set. */
    376  1.119   thorpej 				we_own = true;
    377  1.186     rmind 				uvmfault_unlockall(ufi, amap, NULL);
    378    1.7       mrg 
    379    1.7       mrg 				/*
    380  1.215        ad 				 * Pass a PG_BUSY+PG_FAKE clean page into
    381  1.187     rmind 				 * the uvm_swap_get() function with all data
    382  1.187     rmind 				 * structures unlocked.  Note that it is OK
    383  1.187     rmind 				 * to read an_swslot here, because we hold
    384  1.187     rmind 				 * PG_BUSY on the page.
    385    1.7       mrg 				 */
    386  1.213        ad 				cpu_count(CPU_COUNT_PAGEINS, 1);
    387   1.58       chs 				error = uvm_swap_get(pg, anon->an_swslot,
    388    1.7       mrg 				    PGO_SYNCIO);
    389    1.7       mrg 
    390    1.7       mrg 				/*
    391  1.187     rmind 				 * We clean up after the I/O below in the
    392  1.187     rmind 				 * 'we_own' case.
    393    1.7       mrg 				 */
    394    1.7       mrg 			}
    395  1.187     rmind #else
    396  1.101      yamt 			panic("%s: no page", __func__);
    397  1.101      yamt #endif /* defined(VMSWAP) */
    398    1.7       mrg 		}
    399    1.7       mrg 
    400    1.7       mrg 		/*
    401  1.187     rmind 		 * Re-lock the map and anon.
    402    1.7       mrg 		 */
    403    1.7       mrg 
    404    1.7       mrg 		locked = uvmfault_relock(ufi);
    405  1.186     rmind 		if (locked || we_own) {
    406  1.222        ad 			rw_enter(anon->an_lock, lock_type);
    407    1.7       mrg 		}
    408    1.7       mrg 
    409    1.7       mrg 		/*
    410  1.187     rmind 		 * If we own the page (i.e. we set PG_BUSY), then we need
    411  1.187     rmind 		 * to clean up after the I/O.  There are three cases to
    412    1.7       mrg 		 * consider:
    413  1.187     rmind 		 *
    414  1.187     rmind 		 * 1) Page was released during I/O: free anon and ReFault.
    415  1.187     rmind 		 * 2) I/O not OK.  Free the page and cause the fault to fail.
    416  1.187     rmind 		 * 3) I/O OK!  Activate the page and sync with the non-we_own
    417  1.187     rmind 		 *    case (i.e. drop anon lock if not locked).
    418    1.7       mrg 		 */
    419   1.63       chs 
    420    1.7       mrg 		if (we_own) {
    421  1.222        ad 			KASSERT(lock_type == RW_WRITER);
    422  1.101      yamt #if defined(VMSWAP)
    423   1.58       chs 			if (error) {
    424    1.1       mrg 
    425   1.47       chs 				/*
    426  1.187     rmind 				 * Remove the swap slot from the anon and
    427  1.187     rmind 				 * mark the anon as having no real slot.
    428  1.187     rmind 				 * Do not free the swap slot, thus preventing
    429   1.47       chs 				 * it from being used again.
    430   1.47       chs 				 */
    431   1.69       chs 
    432  1.187     rmind 				if (anon->an_swslot > 0) {
    433   1.84        pk 					uvm_swap_markbad(anon->an_swslot, 1);
    434  1.187     rmind 				}
    435   1.47       chs 				anon->an_swslot = SWSLOT_BAD;
    436   1.47       chs 
    437  1.187     rmind 				if ((pg->flags & PG_RELEASED) != 0) {
    438   1.88      yamt 					goto released;
    439  1.187     rmind 				}
    440   1.88      yamt 
    441   1.47       chs 				/*
    442  1.187     rmind 				 * Note: page was never !PG_BUSY, so it
    443  1.187     rmind 				 * cannot be mapped and thus no need to
    444  1.187     rmind 				 * pmap_page_protect() it.
    445    1.7       mrg 				 */
    446   1.69       chs 
    447    1.7       mrg 				uvm_pagefree(pg);
    448    1.7       mrg 
    449  1.187     rmind 				if (locked) {
    450  1.186     rmind 					uvmfault_unlockall(ufi, NULL, NULL);
    451  1.187     rmind 				}
    452  1.216        ad 				rw_exit(anon->an_lock);
    453    1.7       mrg 				UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
    454   1.58       chs 				return error;
    455    1.7       mrg 			}
    456   1.63       chs 
    457   1.88      yamt 			if ((pg->flags & PG_RELEASED) != 0) {
    458   1.88      yamt released:
    459   1.88      yamt 				KASSERT(anon->an_ref == 0);
    460   1.88      yamt 
    461   1.88      yamt 				/*
    462  1.187     rmind 				 * Released while we had unlocked amap.
    463   1.88      yamt 				 */
    464   1.88      yamt 
    465  1.187     rmind 				if (locked) {
    466  1.186     rmind 					uvmfault_unlockall(ufi, NULL, NULL);
    467  1.187     rmind 				}
    468   1.88      yamt 				uvm_anon_release(anon);
    469   1.88      yamt 
    470   1.88      yamt 				if (error) {
    471   1.88      yamt 					UVMHIST_LOG(maphist,
    472   1.88      yamt 					    "<- ERROR/RELEASED", 0,0,0,0);
    473   1.88      yamt 					return error;
    474   1.88      yamt 				}
    475   1.88      yamt 
    476   1.88      yamt 				UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
    477   1.88      yamt 				return ERESTART;
    478   1.88      yamt 			}
    479   1.88      yamt 
    480    1.7       mrg 			/*
    481  1.187     rmind 			 * We have successfully read the page, activate it.
    482    1.7       mrg 			 */
    483   1.69       chs 
    484  1.214        ad 			uvm_pagelock(pg);
    485    1.7       mrg 			uvm_pageactivate(pg);
    486  1.219        ad 			uvm_pagewakeup(pg);
    487  1.214        ad 			uvm_pageunlock(pg);
    488  1.219        ad 			pg->flags &= ~(PG_BUSY|PG_FAKE);
    489  1.215        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
    490  1.219        ad 			UVM_PAGE_OWN(pg, NULL);
    491  1.187     rmind #else
    492  1.101      yamt 			panic("%s: we_own", __func__);
    493  1.101      yamt #endif /* defined(VMSWAP) */
    494    1.7       mrg 		}
    495    1.7       mrg 
    496    1.7       mrg 		/*
    497  1.187     rmind 		 * We were not able to re-lock the map - restart the fault.
    498    1.7       mrg 		 */
    499    1.7       mrg 
    500    1.7       mrg 		if (!locked) {
    501  1.186     rmind 			if (we_own) {
    502  1.216        ad 				rw_exit(anon->an_lock);
    503  1.186     rmind 			}
    504    1.7       mrg 			UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
    505  1.187     rmind 			return ERESTART;
    506    1.7       mrg 		}
    507    1.7       mrg 
    508    1.7       mrg 		/*
    509  1.187     rmind 		 * Verify that no one has touched the amap and moved
    510  1.187     rmind 		 * the anon on us.
    511    1.7       mrg 		 */
    512    1.1       mrg 
    513  1.186     rmind 		if (ufi != NULL && amap_lookup(&ufi->entry->aref,
    514  1.186     rmind 		    ufi->orig_rvaddr - ufi->entry->start) != anon) {
    515   1.63       chs 
    516  1.186     rmind 			uvmfault_unlockall(ufi, amap, NULL);
    517    1.7       mrg 			UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
    518  1.187     rmind 			return ERESTART;
    519    1.7       mrg 		}
    520   1.63       chs 
    521    1.7       mrg 		/*
    522  1.187     rmind 		 * Retry..
    523    1.7       mrg 		 */
    524    1.1       mrg 
    525  1.213        ad 		cpu_count(CPU_COUNT_FLTANRETRY, 1);
    526    1.7       mrg 		continue;
    527   1.69       chs 	}
    528    1.7       mrg 	/*NOTREACHED*/
    529    1.1       mrg }
    530    1.1       mrg 
    531    1.1       mrg /*
    532  1.106      yamt  * uvmfault_promote: promote data to a new anon.  used for 1B and 2B.
    533  1.106      yamt  *
    534  1.106      yamt  *	1. allocate an anon and a page.
    535  1.106      yamt  *	2. fill its contents.
    536  1.106      yamt  *	3. put it into amap.
    537  1.106      yamt  *
    538  1.106      yamt  * => if we fail (result != 0) we unlock everything.
    539  1.106      yamt  * => on success, return a new locked anon via 'nanon'.
    540  1.106      yamt  *    (*nanon)->an_page will be a resident, locked, dirty page.
    541  1.183      yamt  * => it's caller's responsibility to put the promoted nanon->an_page to the
    542  1.183      yamt  *    page queue.
    543  1.106      yamt  */
    544  1.106      yamt 
    545  1.106      yamt static int
    546  1.106      yamt uvmfault_promote(struct uvm_faultinfo *ufi,
    547  1.106      yamt     struct vm_anon *oanon,
    548  1.106      yamt     struct vm_page *uobjpage,
    549  1.106      yamt     struct vm_anon **nanon, /* OUT: allocated anon */
    550  1.106      yamt     struct vm_anon **spare)
    551  1.106      yamt {
    552  1.106      yamt 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
    553  1.106      yamt 	struct uvm_object *uobj;
    554  1.106      yamt 	struct vm_anon *anon;
    555  1.106      yamt 	struct vm_page *pg;
    556  1.106      yamt 	struct vm_page *opg;
    557  1.106      yamt 	int error;
    558  1.106      yamt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    559  1.106      yamt 
    560  1.106      yamt 	if (oanon) {
    561  1.106      yamt 		/* anon COW */
    562  1.106      yamt 		opg = oanon->an_page;
    563  1.106      yamt 		KASSERT(opg != NULL);
    564  1.106      yamt 		KASSERT(opg->uobject == NULL || opg->loan_count > 0);
    565  1.106      yamt 	} else if (uobjpage != PGO_DONTCARE) {
    566  1.106      yamt 		/* object-backed COW */
    567  1.106      yamt 		opg = uobjpage;
    568  1.227        ad 		KASSERT(rw_lock_held(opg->uobject->vmobjlock));
    569  1.106      yamt 	} else {
    570  1.106      yamt 		/* ZFOD */
    571  1.106      yamt 		opg = NULL;
    572  1.106      yamt 	}
    573  1.106      yamt 	if (opg != NULL) {
    574  1.106      yamt 		uobj = opg->uobject;
    575  1.106      yamt 	} else {
    576  1.106      yamt 		uobj = NULL;
    577  1.106      yamt 	}
    578  1.106      yamt 
    579  1.106      yamt 	KASSERT(amap != NULL);
    580  1.106      yamt 	KASSERT(uobjpage != NULL);
    581  1.216        ad 	KASSERT(rw_write_held(amap->am_lock));
    582  1.186     rmind 	KASSERT(oanon == NULL || amap->am_lock == oanon->an_lock);
    583  1.222        ad 	KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
    584  1.106      yamt 
    585  1.106      yamt 	if (*spare != NULL) {
    586  1.106      yamt 		anon = *spare;
    587  1.106      yamt 		*spare = NULL;
    588  1.192      para 	} else {
    589  1.106      yamt 		anon = uvm_analloc();
    590  1.106      yamt 	}
    591  1.106      yamt 	if (anon) {
    592  1.106      yamt 
    593  1.106      yamt 		/*
    594  1.106      yamt 		 * The new anon is locked.
    595  1.106      yamt 		 *
    596  1.106      yamt 		 * if opg == NULL, we want a zero'd, dirty page,
    597  1.106      yamt 		 * so have uvm_pagealloc() do that for us.
    598  1.106      yamt 		 */
    599  1.106      yamt 
    600  1.186     rmind 		KASSERT(anon->an_lock == NULL);
    601  1.186     rmind 		anon->an_lock = amap->am_lock;
    602  1.179      matt 		pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
    603  1.179      matt 		    UVM_FLAG_COLORMATCH | (opg == NULL ? UVM_PGA_ZERO : 0));
    604  1.186     rmind 		if (pg == NULL) {
    605  1.186     rmind 			anon->an_lock = NULL;
    606  1.186     rmind 		}
    607  1.106      yamt 	} else {
    608  1.106      yamt 		pg = NULL;
    609  1.106      yamt 	}
    610  1.106      yamt 
    611  1.106      yamt 	/*
    612  1.106      yamt 	 * out of memory resources?
    613  1.106      yamt 	 */
    614  1.106      yamt 
    615  1.106      yamt 	if (pg == NULL) {
    616  1.106      yamt 		/* save anon for the next try. */
    617  1.106      yamt 		if (anon != NULL) {
    618  1.106      yamt 			*spare = anon;
    619  1.106      yamt 		}
    620  1.106      yamt 
    621  1.106      yamt 		/* unlock and fail ... */
    622  1.186     rmind 		uvmfault_unlockall(ufi, amap, uobj);
    623  1.106      yamt 		if (!uvm_reclaimable()) {
    624  1.106      yamt 			UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
    625  1.213        ad 			cpu_count(CPU_COUNT_FLTNOANON, 1);
    626  1.106      yamt 			error = ENOMEM;
    627  1.106      yamt 			goto done;
    628  1.106      yamt 		}
    629  1.106      yamt 
    630  1.106      yamt 		UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
    631  1.213        ad 		cpu_count(CPU_COUNT_FLTNORAM, 1);
    632  1.106      yamt 		uvm_wait("flt_noram5");
    633  1.106      yamt 		error = ERESTART;
    634  1.106      yamt 		goto done;
    635  1.106      yamt 	}
    636  1.106      yamt 
    637  1.234       chs 	/*
    638  1.234       chs 	 * copy the page [pg now dirty]
    639  1.234       chs 	 *
    640  1.234       chs 	 * Remove the pmap entry now for the old page at this address
    641  1.234       chs 	 * so that no thread can modify the new page while any thread
    642  1.234       chs 	 * might still see the old page.
    643  1.234       chs 	 */
    644  1.106      yamt 	if (opg) {
    645  1.234       chs 		pmap_remove(vm_map_pmap(ufi->orig_map), ufi->orig_rvaddr,
    646  1.234       chs 			     ufi->orig_rvaddr + PAGE_SIZE);
    647  1.234       chs 		pmap_update(vm_map_pmap(ufi->orig_map));
    648  1.106      yamt 		uvm_pagecopy(opg, pg);
    649  1.106      yamt 	}
    650  1.215        ad 	KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
    651  1.106      yamt 
    652  1.106      yamt 	amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
    653  1.106      yamt 	    oanon != NULL);
    654  1.106      yamt 
    655  1.227        ad 	/*
    656  1.227        ad 	 * from this point on am_lock won't be dropped until the page is
    657  1.227        ad 	 * entered, so it's safe to unbusy the page up front.
    658  1.227        ad 	 *
    659  1.227        ad 	 * uvm_fault_{upper,lower}_done will activate or enqueue the page.
    660  1.227        ad 	 */
    661  1.227        ad 
    662  1.227        ad 	pg = anon->an_page;
    663  1.227        ad 	pg->flags &= ~(PG_BUSY|PG_FAKE);
    664  1.227        ad 	UVM_PAGE_OWN(pg, NULL);
    665  1.227        ad 
    666  1.106      yamt 	*nanon = anon;
    667  1.106      yamt 	error = 0;
    668  1.106      yamt done:
    669  1.106      yamt 	return error;
    670  1.106      yamt }
    671  1.106      yamt 
    672  1.203  christos /*
    673  1.203  christos  * Update statistics after fault resolution.
    674  1.203  christos  * - maxrss
    675  1.203  christos  */
    676  1.203  christos void
    677  1.203  christos uvmfault_update_stats(struct uvm_faultinfo *ufi)
    678  1.203  christos {
    679  1.203  christos 	struct vm_map		*map;
    680  1.204  christos 	struct vmspace 		*vm;
    681  1.203  christos 	struct proc		*p;
    682  1.203  christos 	vsize_t			 res;
    683  1.203  christos 
    684  1.203  christos 	map = ufi->orig_map;
    685  1.203  christos 
    686  1.203  christos 	p = curproc;
    687  1.203  christos 	KASSERT(p != NULL);
    688  1.204  christos 	vm = p->p_vmspace;
    689  1.204  christos 
    690  1.204  christos 	if (&vm->vm_map != map)
    691  1.203  christos 		return;
    692  1.203  christos 
    693  1.203  christos 	res = pmap_resident_count(map->pmap);
    694  1.204  christos 	if (vm->vm_rssmax < res)
    695  1.204  christos 		vm->vm_rssmax = res;
    696  1.203  christos }
    697  1.106      yamt 
    698  1.106      yamt /*
    699    1.1       mrg  *   F A U L T   -   m a i n   e n t r y   p o i n t
    700    1.1       mrg  */
    701    1.1       mrg 
    702    1.1       mrg /*
    703    1.1       mrg  * uvm_fault: page fault handler
    704    1.1       mrg  *
    705    1.1       mrg  * => called from MD code to resolve a page fault
    706   1.63       chs  * => VM data structures usually should be unlocked.   however, it is
    707    1.1       mrg  *	possible to call here with the main map locked if the caller
    708  1.229   msaitoh  *	gets a write lock, sets it recursive, and then calls us (c.f.
    709    1.1       mrg  *	uvm_map_pageable).   this should be avoided because it keeps
    710    1.1       mrg  *	the map locked off during I/O.
    711   1.66   thorpej  * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
    712    1.1       mrg  */
    713    1.1       mrg 
    714   1.24   mycroft #define MASK(entry)     (UVM_ET_ISCOPYONWRITE(entry) ? \
    715   1.24   mycroft 			 ~VM_PROT_WRITE : VM_PROT_ALL)
    716   1.24   mycroft 
    717  1.110  drochner /* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */
    718  1.130  uebayasi #define UVM_FAULT_WIRE		(1 << 0)
    719  1.130  uebayasi #define UVM_FAULT_MAXPROT	(1 << 1)
    720  1.110  drochner 
    721  1.140  uebayasi struct uvm_faultctx {
    722  1.191      yamt 
    723  1.191      yamt 	/*
    724  1.191      yamt 	 * the following members are set up by uvm_fault_check() and
    725  1.191      yamt 	 * read-only after that.
    726  1.191      yamt 	 *
    727  1.191      yamt 	 * note that narrow is used by uvm_fault_check() to change
    728  1.191      yamt 	 * the behaviour after ERESTART.
    729  1.191      yamt 	 *
    730  1.191      yamt 	 * most of them might change after RESTART if the underlying
    731  1.191      yamt 	 * map entry has been changed behind us.  an exception is
    732  1.191      yamt 	 * wire_paging, which does never change.
    733  1.191      yamt 	 */
    734  1.140  uebayasi 	vm_prot_t access_type;
    735  1.150  uebayasi 	vaddr_t startva;
    736  1.150  uebayasi 	int npages;
    737  1.150  uebayasi 	int centeridx;
    738  1.191      yamt 	bool narrow;		/* work on a single requested page only */
    739  1.191      yamt 	bool wire_mapping;	/* request a PMAP_WIRED mapping
    740  1.191      yamt 				   (UVM_FAULT_WIRE or VM_MAPENT_ISWIRED) */
    741  1.191      yamt 	bool wire_paging;	/* request uvm_pagewire
    742  1.191      yamt 				   (true for UVM_FAULT_WIRE) */
    743  1.191      yamt 	bool cow_now;		/* VM_PROT_WRITE is actually requested
    744  1.191      yamt 				   (ie. should break COW and page loaning) */
    745  1.191      yamt 
    746  1.191      yamt 	/*
    747  1.191      yamt 	 * enter_prot is set up by uvm_fault_check() and clamped
    748  1.191      yamt 	 * (ie. drop the VM_PROT_WRITE bit) in various places in case
    749  1.191      yamt 	 * of !cow_now.
    750  1.191      yamt 	 */
    751  1.191      yamt 	vm_prot_t enter_prot;	/* prot at which we want to enter pages in */
    752  1.191      yamt 
    753  1.191      yamt 	/*
    754  1.191      yamt 	 * the following member is for uvmfault_promote() and ERESTART.
    755  1.191      yamt 	 */
    756  1.150  uebayasi 	struct vm_anon *anon_spare;
    757  1.191      yamt 
    758  1.191      yamt 	/*
    759  1.230  dholland 	 * the following is actually a uvm_fault_lower() internal.
    760  1.191      yamt 	 * it's here merely for debugging.
    761  1.191      yamt 	 * (or due to the mechanical separation of the function?)
    762  1.191      yamt 	 */
    763  1.168  uebayasi 	bool promote;
    764  1.222        ad 
    765  1.222        ad 	/*
    766  1.222        ad 	 * type of lock to acquire on objects in both layers.
    767  1.222        ad 	 */
    768  1.222        ad 	krw_t lower_lock_type;
    769  1.222        ad 	krw_t upper_lock_type;
    770  1.140  uebayasi };
    771  1.140  uebayasi 
    772  1.163  uebayasi static inline int	uvm_fault_check(
    773  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    774  1.177      yamt 			    struct vm_anon ***, bool);
    775  1.163  uebayasi 
    776  1.163  uebayasi static int		uvm_fault_upper(
    777  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    778  1.163  uebayasi 			    struct vm_anon **);
    779  1.163  uebayasi static inline int	uvm_fault_upper_lookup(
    780  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    781  1.163  uebayasi 			    struct vm_anon **, struct vm_page **);
    782  1.163  uebayasi static inline void	uvm_fault_upper_neighbor(
    783  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    784  1.163  uebayasi 			    vaddr_t, struct vm_page *, bool);
    785  1.163  uebayasi static inline int	uvm_fault_upper_loan(
    786  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    787  1.163  uebayasi 			    struct vm_anon *, struct uvm_object **);
    788  1.163  uebayasi static inline int	uvm_fault_upper_promote(
    789  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    790  1.163  uebayasi 			    struct uvm_object *, struct vm_anon *);
    791  1.163  uebayasi static inline int	uvm_fault_upper_direct(
    792  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    793  1.163  uebayasi 			    struct uvm_object *, struct vm_anon *);
    794  1.163  uebayasi static int		uvm_fault_upper_enter(
    795  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    796  1.163  uebayasi 			    struct uvm_object *, struct vm_anon *,
    797  1.163  uebayasi 			    struct vm_page *, struct vm_anon *);
    798  1.169  uebayasi static inline void	uvm_fault_upper_done(
    799  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    800  1.177      yamt 			    struct vm_anon *, struct vm_page *);
    801  1.163  uebayasi 
    802  1.163  uebayasi static int		uvm_fault_lower(
    803  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    804  1.163  uebayasi 			    struct vm_page **);
    805  1.173  uebayasi static inline void	uvm_fault_lower_lookup(
    806  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    807  1.163  uebayasi 			    struct vm_page **);
    808  1.163  uebayasi static inline void	uvm_fault_lower_neighbor(
    809  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    810  1.215        ad 			    vaddr_t, struct vm_page *);
    811  1.163  uebayasi static inline int	uvm_fault_lower_io(
    812  1.222        ad 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    813  1.163  uebayasi 			    struct uvm_object **, struct vm_page **);
    814  1.163  uebayasi static inline int	uvm_fault_lower_direct(
    815  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    816  1.163  uebayasi 			    struct uvm_object *, struct vm_page *);
    817  1.163  uebayasi static inline int	uvm_fault_lower_direct_loan(
    818  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    819  1.163  uebayasi 			    struct uvm_object *, struct vm_page **,
    820  1.163  uebayasi 			    struct vm_page **);
    821  1.163  uebayasi static inline int	uvm_fault_lower_promote(
    822  1.163  uebayasi 			    struct uvm_faultinfo *, struct uvm_faultctx *,
    823  1.163  uebayasi 			    struct uvm_object *, struct vm_page *);
    824  1.163  uebayasi static int		uvm_fault_lower_enter(
    825  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    826  1.163  uebayasi 			    struct uvm_object *,
    827  1.183      yamt 			    struct vm_anon *, struct vm_page *);
    828  1.169  uebayasi static inline void	uvm_fault_lower_done(
    829  1.177      yamt 			    struct uvm_faultinfo *, const struct uvm_faultctx *,
    830  1.177      yamt 			    struct uvm_object *, struct vm_page *);
    831  1.138  uebayasi 
    832    1.7       mrg int
    833  1.110  drochner uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
    834  1.110  drochner     vm_prot_t access_type, int fault_flag)
    835    1.1       mrg {
    836    1.7       mrg 	struct uvm_faultinfo ufi;
    837  1.140  uebayasi 	struct uvm_faultctx flt = {
    838  1.140  uebayasi 		.access_type = access_type,
    839  1.146  uebayasi 
    840  1.146  uebayasi 		/* don't look for neighborhood * pages on "wire" fault */
    841  1.146  uebayasi 		.narrow = (fault_flag & UVM_FAULT_WIRE) != 0,
    842  1.146  uebayasi 
    843  1.146  uebayasi 		/* "wire" fault causes wiring of both mapping and paging */
    844  1.146  uebayasi 		.wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0,
    845  1.146  uebayasi 		.wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0,
    846  1.222        ad 
    847  1.222        ad 		/*
    848  1.222        ad 		 * default lock type to acquire on upper & lower layer
    849  1.222        ad 		 * objects: reader.  this can be upgraded at any point
    850  1.222        ad 		 * during the fault from read -> write and uvm_faultctx
    851  1.222        ad 		 * changed to match, but is never downgraded write -> read.
    852  1.222        ad 		 */
    853  1.222        ad #ifdef __HAVE_UNLOCKED_PMAP /* XXX temporary */
    854  1.222        ad 		.upper_lock_type = RW_WRITER,
    855  1.222        ad 		.lower_lock_type = RW_WRITER,
    856  1.222        ad #else
    857  1.222        ad 		.upper_lock_type = RW_READER,
    858  1.222        ad 		.lower_lock_type = RW_READER,
    859  1.222        ad #endif
    860  1.140  uebayasi 	};
    861  1.177      yamt 	const bool maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0;
    862  1.137  uebayasi 	struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
    863  1.141  uebayasi 	struct vm_page *pages_store[UVM_MAXRANGE], **pages;
    864  1.140  uebayasi 	int error;
    865  1.196       tls 
    866  1.228     skrll 	UVMHIST_FUNC(__func__);
    867  1.228     skrll 	UVMHIST_CALLARGS(maphist, "(map=%#jx, vaddr=%#jx, at=%jd, ff=%jd)",
    868  1.201  pgoyette 	      (uintptr_t)orig_map, vaddr, access_type, fault_flag);
    869    1.1       mrg 
    870  1.193       tls 	/* Don't count anything until user interaction is possible */
    871  1.213        ad 	kpreempt_disable();
    872  1.193       tls 	if (__predict_true(start_init_exec)) {
    873  1.213        ad 		struct cpu_info *ci = curcpu();
    874  1.213        ad 		CPU_COUNT(CPU_COUNT_NFAULT, 1);
    875  1.213        ad 		/* Don't flood RNG subsystem with samples. */
    876  1.213        ad 		if (++(ci->ci_faultrng) == 503) {
    877  1.213        ad 			ci->ci_faultrng = 0;
    878  1.233  riastrad 			rnd_add_uint32(&uvm_fault_rndsource,
    879  1.213        ad 			    sizeof(vaddr_t) == sizeof(uint32_t) ?
    880  1.213        ad 			    (uint32_t)vaddr : sizeof(vaddr_t) ==
    881  1.213        ad 			    sizeof(uint64_t) ?
    882  1.213        ad 			    (uint32_t)vaddr :
    883  1.213        ad 			    (uint32_t)ci->ci_counts[CPU_COUNT_NFAULT]);
    884  1.213        ad 		}
    885  1.193       tls 	}
    886  1.213        ad 	kpreempt_enable();
    887  1.213        ad 
    888    1.7       mrg 	/*
    889    1.7       mrg 	 * init the IN parameters in the ufi
    890    1.7       mrg 	 */
    891    1.1       mrg 
    892    1.7       mrg 	ufi.orig_map = orig_map;
    893    1.7       mrg 	ufi.orig_rvaddr = trunc_page(vaddr);
    894    1.7       mrg 	ufi.orig_size = PAGE_SIZE;	/* can't get any smaller than this */
    895    1.7       mrg 
    896  1.142  uebayasi 	error = ERESTART;
    897  1.183      yamt 	while (error == ERESTART) { /* ReFault: */
    898  1.143  uebayasi 		anons = anons_store;
    899  1.143  uebayasi 		pages = pages_store;
    900    1.1       mrg 
    901  1.177      yamt 		error = uvm_fault_check(&ufi, &flt, &anons, maxprot);
    902  1.143  uebayasi 		if (error != 0)
    903  1.143  uebayasi 			continue;
    904  1.141  uebayasi 
    905  1.143  uebayasi 		error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
    906  1.143  uebayasi 		if (error != 0)
    907  1.143  uebayasi 			continue;
    908  1.138  uebayasi 
    909  1.144  uebayasi 		if (pages[flt.centeridx] == PGO_DONTCARE)
    910  1.148  uebayasi 			error = uvm_fault_upper(&ufi, &flt, anons);
    911  1.167  uebayasi 		else {
    912  1.177      yamt 			struct uvm_object * const uobj =
    913  1.177      yamt 			    ufi.entry->object.uvm_obj;
    914  1.167  uebayasi 
    915  1.167  uebayasi 			if (uobj && uobj->pgops->pgo_fault != NULL) {
    916  1.173  uebayasi 				/*
    917  1.173  uebayasi 				 * invoke "special" fault routine.
    918  1.173  uebayasi 				 */
    919  1.216        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    920  1.173  uebayasi 				/* locked: maps(read), amap(if there), uobj */
    921  1.173  uebayasi 				error = uobj->pgops->pgo_fault(&ufi,
    922  1.173  uebayasi 				    flt.startva, pages, flt.npages,
    923  1.173  uebayasi 				    flt.centeridx, flt.access_type,
    924  1.173  uebayasi 				    PGO_LOCKED|PGO_SYNCIO);
    925  1.167  uebayasi 
    926  1.177      yamt 				/*
    927  1.177      yamt 				 * locked: nothing, pgo_fault has unlocked
    928  1.177      yamt 				 * everything
    929  1.177      yamt 				 */
    930  1.167  uebayasi 
    931  1.167  uebayasi 				/*
    932  1.177      yamt 				 * object fault routine responsible for
    933  1.177      yamt 				 * pmap_update().
    934  1.167  uebayasi 				 */
    935  1.205       chs 
    936  1.205       chs 				/*
    937  1.205       chs 				 * Wake up the pagedaemon if the fault method
    938  1.205       chs 				 * failed for lack of memory but some can be
    939  1.205       chs 				 * reclaimed.
    940  1.205       chs 				 */
    941  1.205       chs 				if (error == ENOMEM && uvm_reclaimable()) {
    942  1.205       chs 					uvm_wait("pgo_fault");
    943  1.205       chs 					error = ERESTART;
    944  1.205       chs 				}
    945  1.167  uebayasi 			} else {
    946  1.167  uebayasi 				error = uvm_fault_lower(&ufi, &flt, pages);
    947  1.167  uebayasi 			}
    948  1.167  uebayasi 		}
    949  1.142  uebayasi 	}
    950  1.138  uebayasi 
    951  1.140  uebayasi 	if (flt.anon_spare != NULL) {
    952  1.140  uebayasi 		flt.anon_spare->an_ref--;
    953  1.186     rmind 		KASSERT(flt.anon_spare->an_ref == 0);
    954  1.186     rmind 		KASSERT(flt.anon_spare->an_lock == NULL);
    955  1.221        ad 		uvm_anfree(flt.anon_spare);
    956  1.138  uebayasi 	}
    957  1.138  uebayasi 	return error;
    958  1.141  uebayasi }
    959  1.138  uebayasi 
    960  1.173  uebayasi /*
    961  1.173  uebayasi  * uvm_fault_check: check prot, handle needs-copy, etc.
    962  1.173  uebayasi  *
    963  1.173  uebayasi  *	1. lookup entry.
    964  1.173  uebayasi  *	2. check protection.
    965  1.173  uebayasi  *	3. adjust fault condition (mainly for simulated fault).
    966  1.173  uebayasi  *	4. handle needs-copy (lazy amap copy).
    967  1.173  uebayasi  *	5. establish range of interest for neighbor fault (aka pre-fault).
    968  1.173  uebayasi  *	6. look up anons (if amap exists).
    969  1.173  uebayasi  *	7. flush pages (if MADV_SEQUENTIAL)
    970  1.173  uebayasi  *
    971  1.173  uebayasi  * => called with nothing locked.
    972  1.173  uebayasi  * => if we fail (result != 0) we unlock everything.
    973  1.177      yamt  * => initialize/adjust many members of flt.
    974  1.173  uebayasi  */
    975  1.173  uebayasi 
    976  1.144  uebayasi static int
    977  1.141  uebayasi uvm_fault_check(
    978  1.141  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
    979  1.177      yamt 	struct vm_anon ***ranons, bool maxprot)
    980  1.141  uebayasi {
    981  1.141  uebayasi 	struct vm_amap *amap;
    982  1.141  uebayasi 	struct uvm_object *uobj;
    983  1.137  uebayasi 	vm_prot_t check_prot;
    984  1.137  uebayasi 	int nback, nforw;
    985  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    986  1.137  uebayasi 
    987    1.7       mrg 	/*
    988    1.7       mrg 	 * lookup and lock the maps
    989    1.7       mrg 	 */
    990    1.7       mrg 
    991  1.141  uebayasi 	if (uvmfault_lookup(ufi, false) == false) {
    992  1.217       rin 		UVMHIST_LOG(maphist, "<- no mapping @ %#jx", ufi->orig_rvaddr,
    993  1.177      yamt 		    0,0,0);
    994  1.141  uebayasi 		return EFAULT;
    995    1.7       mrg 	}
    996    1.7       mrg 	/* locked: maps(read) */
    997    1.7       mrg 
    998   1.61   thorpej #ifdef DIAGNOSTIC
    999  1.141  uebayasi 	if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) {
   1000   1.61   thorpej 		printf("Page fault on non-pageable map:\n");
   1001  1.141  uebayasi 		printf("ufi->map = %p\n", ufi->map);
   1002  1.141  uebayasi 		printf("ufi->orig_map = %p\n", ufi->orig_map);
   1003  1.217       rin 		printf("ufi->orig_rvaddr = %#lx\n", (u_long) ufi->orig_rvaddr);
   1004  1.141  uebayasi 		panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0");
   1005   1.61   thorpej 	}
   1006   1.61   thorpej #endif
   1007   1.58       chs 
   1008    1.7       mrg 	/*
   1009    1.7       mrg 	 * check protection
   1010    1.7       mrg 	 */
   1011    1.7       mrg 
   1012  1.177      yamt 	check_prot = maxprot ?
   1013  1.141  uebayasi 	    ufi->entry->max_protection : ufi->entry->protection;
   1014  1.141  uebayasi 	if ((check_prot & flt->access_type) != flt->access_type) {
   1015    1.7       mrg 		UVMHIST_LOG(maphist,
   1016  1.201  pgoyette 		    "<- protection failure (prot=%#jx, access=%#jx)",
   1017  1.141  uebayasi 		    ufi->entry->protection, flt->access_type, 0, 0);
   1018  1.141  uebayasi 		uvmfault_unlockmaps(ufi, false);
   1019  1.200  christos 		return EFAULT;
   1020    1.7       mrg 	}
   1021    1.7       mrg 
   1022    1.7       mrg 	/*
   1023    1.7       mrg 	 * "enter_prot" is the protection we want to enter the page in at.
   1024    1.7       mrg 	 * for certain pages (e.g. copy-on-write pages) this protection can
   1025  1.141  uebayasi 	 * be more strict than ufi->entry->protection.  "wired" means either
   1026    1.7       mrg 	 * the entry is wired or we are fault-wiring the pg.
   1027    1.7       mrg 	 */
   1028    1.7       mrg 
   1029  1.141  uebayasi 	flt->enter_prot = ufi->entry->protection;
   1030  1.207       chs 	if (VM_MAPENT_ISWIRED(ufi->entry)) {
   1031  1.146  uebayasi 		flt->wire_mapping = true;
   1032  1.207       chs 		flt->wire_paging = true;
   1033  1.207       chs 		flt->narrow = true;
   1034  1.207       chs 	}
   1035  1.146  uebayasi 
   1036  1.146  uebayasi 	if (flt->wire_mapping) {
   1037  1.141  uebayasi 		flt->access_type = flt->enter_prot; /* full access for wired */
   1038  1.141  uebayasi 		flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
   1039   1.73       chs 	} else {
   1040  1.141  uebayasi 		flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
   1041   1.73       chs 	}
   1042    1.7       mrg 
   1043  1.222        ad 	if (flt->wire_paging) {
   1044  1.222        ad 		/* wiring pages requires a write lock. */
   1045  1.222        ad 		flt->upper_lock_type = RW_WRITER;
   1046  1.222        ad 		flt->lower_lock_type = RW_WRITER;
   1047  1.222        ad 	}
   1048  1.222        ad 
   1049  1.168  uebayasi 	flt->promote = false;
   1050  1.168  uebayasi 
   1051    1.7       mrg 	/*
   1052    1.7       mrg 	 * handle "needs_copy" case.   if we need to copy the amap we will
   1053    1.7       mrg 	 * have to drop our readlock and relock it with a write lock.  (we
   1054    1.7       mrg 	 * need a write lock to change anything in a map entry [e.g.
   1055    1.7       mrg 	 * needs_copy]).
   1056    1.7       mrg 	 */
   1057    1.7       mrg 
   1058  1.141  uebayasi 	if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
   1059  1.141  uebayasi 		if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
   1060  1.177      yamt 			KASSERT(!maxprot);
   1061    1.7       mrg 			/* need to clear */
   1062    1.7       mrg 			UVMHIST_LOG(maphist,
   1063    1.7       mrg 			    "  need to clear needs_copy and refault",0,0,0,0);
   1064  1.141  uebayasi 			uvmfault_unlockmaps(ufi, false);
   1065  1.141  uebayasi 			uvmfault_amapcopy(ufi);
   1066  1.213        ad 			cpu_count(CPU_COUNT_FLTAMCOPY, 1);
   1067  1.141  uebayasi 			return ERESTART;
   1068    1.7       mrg 
   1069    1.7       mrg 		} else {
   1070    1.7       mrg 
   1071    1.7       mrg 			/*
   1072    1.7       mrg 			 * ensure that we pmap_enter page R/O since
   1073    1.7       mrg 			 * needs_copy is still true
   1074    1.7       mrg 			 */
   1075   1.72       chs 
   1076  1.141  uebayasi 			flt->enter_prot &= ~VM_PROT_WRITE;
   1077    1.7       mrg 		}
   1078    1.7       mrg 	}
   1079    1.7       mrg 
   1080    1.7       mrg 	/*
   1081    1.7       mrg 	 * identify the players
   1082    1.7       mrg 	 */
   1083    1.7       mrg 
   1084  1.141  uebayasi 	amap = ufi->entry->aref.ar_amap;	/* upper layer */
   1085  1.141  uebayasi 	uobj = ufi->entry->object.uvm_obj;	/* lower layer */
   1086    1.7       mrg 
   1087    1.7       mrg 	/*
   1088    1.7       mrg 	 * check for a case 0 fault.  if nothing backing the entry then
   1089    1.7       mrg 	 * error now.
   1090    1.7       mrg 	 */
   1091    1.7       mrg 
   1092    1.7       mrg 	if (amap == NULL && uobj == NULL) {
   1093  1.141  uebayasi 		uvmfault_unlockmaps(ufi, false);
   1094    1.7       mrg 		UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
   1095  1.141  uebayasi 		return EFAULT;
   1096    1.7       mrg 	}
   1097    1.1       mrg 
   1098    1.7       mrg 	/*
   1099  1.227        ad 	 * for a case 2B fault waste no time on adjacent pages because
   1100  1.227        ad 	 * they are likely already entered.
   1101  1.227        ad 	 */
   1102  1.227        ad 
   1103  1.227        ad 	if (uobj != NULL && amap != NULL &&
   1104  1.227        ad 	    (flt->access_type & VM_PROT_WRITE) != 0) {
   1105  1.227        ad 		/* wide fault (!narrow) */
   1106  1.227        ad 		flt->narrow = true;
   1107  1.227        ad 	}
   1108  1.227        ad 
   1109  1.227        ad 	/*
   1110    1.7       mrg 	 * establish range of interest based on advice from mapper
   1111    1.7       mrg 	 * and then clip to fit map entry.   note that we only want
   1112   1.63       chs 	 * to do this the first time through the fault.   if we
   1113    1.7       mrg 	 * ReFault we will disable this by setting "narrow" to true.
   1114    1.7       mrg 	 */
   1115    1.1       mrg 
   1116  1.141  uebayasi 	if (flt->narrow == false) {
   1117    1.7       mrg 
   1118    1.7       mrg 		/* wide fault (!narrow) */
   1119  1.141  uebayasi 		KASSERT(uvmadvice[ufi->entry->advice].advice ==
   1120  1.141  uebayasi 			 ufi->entry->advice);
   1121  1.141  uebayasi 		nback = MIN(uvmadvice[ufi->entry->advice].nback,
   1122  1.177      yamt 		    (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
   1123  1.141  uebayasi 		flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
   1124    1.7       mrg 		/*
   1125    1.7       mrg 		 * note: "-1" because we don't want to count the
   1126    1.7       mrg 		 * faulting page as forw
   1127    1.7       mrg 		 */
   1128  1.177      yamt 		nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
   1129  1.177      yamt 			    ((ufi->entry->end - ufi->orig_rvaddr) >>
   1130  1.177      yamt 			     PAGE_SHIFT) - 1);
   1131  1.141  uebayasi 		flt->npages = nback + nforw + 1;
   1132  1.141  uebayasi 		flt->centeridx = nback;
   1133    1.7       mrg 
   1134  1.141  uebayasi 		flt->narrow = true;	/* ensure only once per-fault */
   1135    1.7       mrg 
   1136    1.7       mrg 	} else {
   1137   1.63       chs 
   1138    1.7       mrg 		/* narrow fault! */
   1139    1.7       mrg 		nback = nforw = 0;
   1140  1.141  uebayasi 		flt->startva = ufi->orig_rvaddr;
   1141  1.141  uebayasi 		flt->npages = 1;
   1142  1.141  uebayasi 		flt->centeridx = 0;
   1143    1.1       mrg 
   1144    1.7       mrg 	}
   1145  1.131  uebayasi 	/* offset from entry's start to pgs' start */
   1146  1.141  uebayasi 	const voff_t eoff = flt->startva - ufi->entry->start;
   1147    1.1       mrg 
   1148    1.7       mrg 	/* locked: maps(read) */
   1149  1.201  pgoyette 	UVMHIST_LOG(maphist, "  narrow=%jd, back=%jd, forw=%jd, startva=%#jx",
   1150  1.141  uebayasi 		    flt->narrow, nback, nforw, flt->startva);
   1151  1.201  pgoyette 	UVMHIST_LOG(maphist, "  entry=%#jx, amap=%#jx, obj=%#jx",
   1152  1.201  pgoyette 	    (uintptr_t)ufi->entry, (uintptr_t)amap, (uintptr_t)uobj, 0);
   1153    1.1       mrg 
   1154    1.7       mrg 	/*
   1155  1.222        ad 	 * guess at the most suitable lock types to acquire.
   1156  1.222        ad 	 * if we've got an amap then lock it and extract current anons.
   1157    1.7       mrg 	 */
   1158    1.7       mrg 
   1159    1.7       mrg 	if (amap) {
   1160  1.222        ad 		if ((amap_flags(amap) & AMAP_SHARED) == 0) {
   1161  1.222        ad 			/*
   1162  1.222        ad 			 * the amap isn't shared.  get a writer lock to
   1163  1.222        ad 			 * avoid the cost of upgrading the lock later if
   1164  1.222        ad 			 * needed.
   1165  1.222        ad 			 *
   1166  1.222        ad 			 * XXX nice for PostgreSQL, but consider threads.
   1167  1.222        ad 			 */
   1168  1.222        ad 			flt->upper_lock_type = RW_WRITER;
   1169  1.222        ad 		} else if ((flt->access_type & VM_PROT_WRITE) != 0) {
   1170  1.222        ad 			/*
   1171  1.222        ad 			 * assume we're about to COW.
   1172  1.222        ad 			 */
   1173  1.222        ad 			flt->upper_lock_type = RW_WRITER;
   1174  1.222        ad 		}
   1175  1.222        ad 		amap_lock(amap, flt->upper_lock_type);
   1176  1.141  uebayasi 		amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages);
   1177    1.7       mrg 	} else {
   1178  1.222        ad 		if ((flt->access_type & VM_PROT_WRITE) != 0) {
   1179  1.222        ad 			/*
   1180  1.222        ad 			 * we are about to dirty the object and that
   1181  1.222        ad 			 * requires a write lock.
   1182  1.222        ad 			 */
   1183  1.222        ad 			flt->lower_lock_type = RW_WRITER;
   1184  1.222        ad 		}
   1185  1.141  uebayasi 		*ranons = NULL;	/* to be safe */
   1186    1.7       mrg 	}
   1187    1.7       mrg 
   1188    1.7       mrg 	/* locked: maps(read), amap(if there) */
   1189  1.222        ad 	KASSERT(amap == NULL ||
   1190  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1191    1.7       mrg 
   1192    1.7       mrg 	/*
   1193    1.7       mrg 	 * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
   1194    1.7       mrg 	 * now and then forget about them (for the rest of the fault).
   1195    1.7       mrg 	 */
   1196    1.7       mrg 
   1197  1.141  uebayasi 	if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
   1198    1.7       mrg 
   1199    1.7       mrg 		UVMHIST_LOG(maphist, "  MADV_SEQUENTIAL: flushing backpages",
   1200    1.7       mrg 		    0,0,0,0);
   1201    1.7       mrg 		/* flush back-page anons? */
   1202   1.63       chs 		if (amap)
   1203  1.141  uebayasi 			uvmfault_anonflush(*ranons, nback);
   1204    1.7       mrg 
   1205  1.225        ad 		/*
   1206  1.225        ad 		 * flush object?  change lock type to RW_WRITER, to avoid
   1207  1.225        ad 		 * excessive competition between read/write locks if many
   1208  1.225        ad 		 * threads doing "sequential access".
   1209  1.225        ad 		 */
   1210    1.7       mrg 		if (uobj) {
   1211  1.137  uebayasi 			voff_t uoff;
   1212  1.137  uebayasi 
   1213  1.225        ad 			flt->lower_lock_type = RW_WRITER;
   1214  1.141  uebayasi 			uoff = ufi->entry->offset + eoff;
   1215  1.216        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
   1216   1.90      yamt 			(void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
   1217   1.15       chs 				    (nback << PAGE_SHIFT), PGO_DEACTIVATE);
   1218    1.7       mrg 		}
   1219    1.7       mrg 
   1220    1.7       mrg 		/* now forget about the backpages */
   1221    1.7       mrg 		if (amap)
   1222  1.141  uebayasi 			*ranons += nback;
   1223  1.141  uebayasi 		flt->startva += (nback << PAGE_SHIFT);
   1224  1.141  uebayasi 		flt->npages -= nback;
   1225  1.141  uebayasi 		flt->centeridx = 0;
   1226    1.7       mrg 	}
   1227  1.137  uebayasi 	/*
   1228  1.137  uebayasi 	 * => startva is fixed
   1229  1.137  uebayasi 	 * => npages is fixed
   1230  1.137  uebayasi 	 */
   1231  1.177      yamt 	KASSERT(flt->startva <= ufi->orig_rvaddr);
   1232  1.177      yamt 	KASSERT(ufi->orig_rvaddr + ufi->orig_size <=
   1233  1.177      yamt 	    flt->startva + (flt->npages << PAGE_SHIFT));
   1234  1.141  uebayasi 	return 0;
   1235  1.141  uebayasi }
   1236  1.141  uebayasi 
   1237  1.173  uebayasi /*
   1238  1.222        ad  * uvm_fault_upper_upgrade: upgrade upper lock, reader -> writer
   1239  1.222        ad  */
   1240  1.222        ad 
   1241  1.222        ad static inline int
   1242  1.222        ad uvm_fault_upper_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1243  1.222        ad     struct vm_amap *amap, struct uvm_object *uobj)
   1244  1.222        ad {
   1245  1.224     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1246  1.222        ad 
   1247  1.222        ad 	KASSERT(amap != NULL);
   1248  1.222        ad 	KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
   1249  1.222        ad 
   1250  1.222        ad 	/*
   1251  1.222        ad 	 * fast path.
   1252  1.222        ad 	 */
   1253  1.223     skrll 
   1254  1.222        ad 	if (__predict_true(flt->upper_lock_type == RW_WRITER)) {
   1255  1.222        ad 		return 0;
   1256  1.222        ad 	}
   1257  1.222        ad 
   1258  1.222        ad 	/*
   1259  1.222        ad 	 * otherwise try for the upgrade.  if we don't get it, unlock
   1260  1.222        ad 	 * everything, restart the fault and next time around get a writer
   1261  1.222        ad 	 * lock.
   1262  1.222        ad 	 */
   1263  1.222        ad 
   1264  1.222        ad 	flt->upper_lock_type = RW_WRITER;
   1265  1.222        ad 	if (__predict_false(!rw_tryupgrade(amap->am_lock))) {
   1266  1.222        ad 		uvmfault_unlockall(ufi, amap, uobj);
   1267  1.222        ad 		cpu_count(CPU_COUNT_FLTNOUP, 1);
   1268  1.222        ad 		UVMHIST_LOG(maphist, "  !upgrade upper", 0, 0,0,0);
   1269  1.222        ad 		return ERESTART;
   1270  1.222        ad 	}
   1271  1.222        ad 	cpu_count(CPU_COUNT_FLTUP, 1);
   1272  1.222        ad 	KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
   1273  1.222        ad 	return 0;
   1274  1.222        ad }
   1275  1.222        ad 
   1276  1.222        ad /*
   1277  1.173  uebayasi  * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
   1278  1.173  uebayasi  *
   1279  1.173  uebayasi  * iterate range of interest:
   1280  1.173  uebayasi  *	1. check if h/w mapping exists.  if yes, we don't care
   1281  1.173  uebayasi  *	2. check if anon exists.  if not, page is lower.
   1282  1.173  uebayasi  *	3. if anon exists, enter h/w mapping for neighbors.
   1283  1.173  uebayasi  *
   1284  1.173  uebayasi  * => called with amap locked (if exists).
   1285  1.173  uebayasi  */
   1286  1.173  uebayasi 
   1287  1.144  uebayasi static int
   1288  1.141  uebayasi uvm_fault_upper_lookup(
   1289  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   1290  1.141  uebayasi 	struct vm_anon **anons, struct vm_page **pages)
   1291  1.141  uebayasi {
   1292  1.141  uebayasi 	struct vm_amap *amap = ufi->entry->aref.ar_amap;
   1293  1.137  uebayasi 	int lcv;
   1294  1.137  uebayasi 	vaddr_t currva;
   1295  1.195    martin 	bool shadowed __unused;
   1296  1.220        ad 	bool entered;
   1297  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1298    1.7       mrg 
   1299    1.7       mrg 	/* locked: maps(read), amap(if there) */
   1300  1.222        ad 	KASSERT(amap == NULL ||
   1301  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1302    1.1       mrg 
   1303    1.7       mrg 	/*
   1304    1.7       mrg 	 * map in the backpages and frontpages we found in the amap in hopes
   1305    1.7       mrg 	 * of preventing future faults.    we also init the pages[] array as
   1306    1.7       mrg 	 * we go.
   1307    1.7       mrg 	 */
   1308    1.7       mrg 
   1309  1.141  uebayasi 	currva = flt->startva;
   1310  1.144  uebayasi 	shadowed = false;
   1311  1.220        ad 	entered = false;
   1312  1.163  uebayasi 	for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
   1313    1.7       mrg 		/*
   1314    1.7       mrg 		 * unmapped or center page.   check if any anon at this level.
   1315    1.7       mrg 		 */
   1316    1.7       mrg 		if (amap == NULL || anons[lcv] == NULL) {
   1317    1.7       mrg 			pages[lcv] = NULL;
   1318    1.7       mrg 			continue;
   1319    1.7       mrg 		}
   1320    1.7       mrg 
   1321    1.7       mrg 		/*
   1322  1.222        ad 		 * check for present page and map if possible.
   1323    1.7       mrg 		 */
   1324    1.7       mrg 
   1325    1.7       mrg 		pages[lcv] = PGO_DONTCARE;
   1326  1.177      yamt 		if (lcv == flt->centeridx) {	/* save center for later! */
   1327  1.144  uebayasi 			shadowed = true;
   1328  1.186     rmind 			continue;
   1329  1.186     rmind 		}
   1330  1.186     rmind 
   1331  1.186     rmind 		struct vm_anon *anon = anons[lcv];
   1332  1.186     rmind 		struct vm_page *pg = anon->an_page;
   1333  1.161  uebayasi 
   1334  1.186     rmind 		KASSERT(anon->an_lock == amap->am_lock);
   1335  1.172  uebayasi 
   1336  1.220        ad 		/*
   1337  1.220        ad 		 * ignore loaned and busy pages.
   1338  1.220        ad 		 * don't play with VAs that are already mapped.
   1339  1.220        ad 		 */
   1340  1.220        ad 
   1341  1.220        ad 		if (pg && pg->loan_count == 0 && (pg->flags & PG_BUSY) == 0 &&
   1342  1.220        ad 		    !pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
   1343  1.186     rmind 			uvm_fault_upper_neighbor(ufi, flt, currva,
   1344  1.186     rmind 			    pg, anon->an_ref > 1);
   1345  1.220        ad 			entered = true;
   1346    1.7       mrg 		}
   1347  1.151  uebayasi 	}
   1348  1.220        ad 	if (entered) {
   1349  1.220        ad 		pmap_update(ufi->orig_map->pmap);
   1350  1.220        ad 	}
   1351  1.151  uebayasi 
   1352  1.160  uebayasi 	/* locked: maps(read), amap(if there) */
   1353  1.222        ad 	KASSERT(amap == NULL ||
   1354  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1355  1.160  uebayasi 	/* (shadowed == true) if there is an anon at the faulting address */
   1356  1.201  pgoyette 	UVMHIST_LOG(maphist, "  shadowed=%jd, will_get=%jd", shadowed,
   1357  1.164   mlelstv 	    (ufi->entry->object.uvm_obj && shadowed != false),0,0);
   1358  1.160  uebayasi 
   1359  1.151  uebayasi 	return 0;
   1360  1.151  uebayasi }
   1361  1.151  uebayasi 
   1362  1.173  uebayasi /*
   1363  1.202       chs  * uvm_fault_upper_neighbor: enter single upper neighbor page.
   1364  1.173  uebayasi  *
   1365  1.173  uebayasi  * => called with amap and anon locked.
   1366  1.173  uebayasi  */
   1367  1.173  uebayasi 
   1368  1.151  uebayasi static void
   1369  1.163  uebayasi uvm_fault_upper_neighbor(
   1370  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   1371  1.161  uebayasi 	vaddr_t currva, struct vm_page *pg, bool readonly)
   1372  1.151  uebayasi {
   1373  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1374  1.151  uebayasi 
   1375  1.173  uebayasi 	/* locked: amap, anon */
   1376  1.173  uebayasi 
   1377  1.215        ad 	KASSERT(pg->uobject == NULL);
   1378  1.215        ad 	KASSERT(pg->uanon != NULL);
   1379  1.222        ad 	KASSERT(rw_lock_op(pg->uanon->an_lock) == flt->upper_lock_type);
   1380  1.215        ad 	KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
   1381  1.215        ad 
   1382  1.222        ad 	/*
   1383  1.227        ad 	 * there wasn't a direct fault on the page, so avoid the cost of
   1384  1.227        ad 	 * activating it.
   1385  1.222        ad 	 */
   1386  1.222        ad 
   1387  1.227        ad 	if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
   1388  1.222        ad 		uvm_pagelock(pg);
   1389  1.222        ad 		uvm_pageenqueue(pg);
   1390  1.222        ad 		uvm_pageunlock(pg);
   1391  1.222        ad 	}
   1392  1.227        ad 
   1393  1.152  uebayasi 	UVMHIST_LOG(maphist,
   1394  1.201  pgoyette 	    "  MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx",
   1395  1.201  pgoyette 	    (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
   1396  1.213        ad 	cpu_count(CPU_COUNT_FLTNAMAP, 1);
   1397  1.152  uebayasi 
   1398  1.152  uebayasi 	/*
   1399  1.161  uebayasi 	 * Since this page isn't the page that's actually faulting,
   1400  1.161  uebayasi 	 * ignore pmap_enter() failures; it's not critical that we
   1401  1.161  uebayasi 	 * enter these right now.
   1402  1.152  uebayasi 	 */
   1403  1.152  uebayasi 
   1404  1.152  uebayasi 	(void) pmap_enter(ufi->orig_map->pmap, currva,
   1405  1.161  uebayasi 	    VM_PAGE_TO_PHYS(pg),
   1406  1.161  uebayasi 	    readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
   1407  1.152  uebayasi 	    flt->enter_prot,
   1408  1.154  uebayasi 	    PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
   1409  1.151  uebayasi }
   1410  1.151  uebayasi 
   1411  1.173  uebayasi /*
   1412  1.173  uebayasi  * uvm_fault_upper: handle upper fault.
   1413  1.173  uebayasi  *
   1414  1.173  uebayasi  *	1. acquire anon lock.
   1415  1.173  uebayasi  *	2. get anon.  let uvmfault_anonget do the dirty work.
   1416  1.173  uebayasi  *	3. handle loan.
   1417  1.173  uebayasi  *	4. dispatch direct or promote handlers.
   1418  1.173  uebayasi  */
   1419  1.134  uebayasi 
   1420  1.138  uebayasi static int
   1421  1.138  uebayasi uvm_fault_upper(
   1422  1.140  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1423  1.148  uebayasi 	struct vm_anon **anons)
   1424  1.138  uebayasi {
   1425  1.148  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   1426  1.148  uebayasi 	struct vm_anon * const anon = anons[flt->centeridx];
   1427  1.148  uebayasi 	struct uvm_object *uobj;
   1428  1.138  uebayasi 	int error;
   1429  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1430  1.137  uebayasi 
   1431  1.186     rmind 	/* locked: maps(read), amap, anon */
   1432  1.222        ad 	KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1433  1.186     rmind 	KASSERT(anon->an_lock == amap->am_lock);
   1434    1.7       mrg 
   1435    1.7       mrg 	/*
   1436    1.7       mrg 	 * handle case 1: fault on an anon in our amap
   1437    1.7       mrg 	 */
   1438    1.7       mrg 
   1439  1.201  pgoyette 	UVMHIST_LOG(maphist, "  case 1 fault: anon=%#jx",
   1440  1.201  pgoyette 	    (uintptr_t)anon, 0, 0, 0);
   1441    1.7       mrg 
   1442    1.7       mrg 	/*
   1443    1.7       mrg 	 * no matter if we have case 1A or case 1B we are going to need to
   1444    1.7       mrg 	 * have the anon's memory resident.   ensure that now.
   1445    1.7       mrg 	 */
   1446    1.7       mrg 
   1447    1.7       mrg 	/*
   1448   1.47       chs 	 * let uvmfault_anonget do the dirty work.
   1449   1.51   thorpej 	 * if it fails (!OK) it will unlock everything for us.
   1450   1.47       chs 	 * if it succeeds, locks are still valid and locked.
   1451    1.7       mrg 	 * also, if it is OK, then the anon's page is on the queues.
   1452    1.7       mrg 	 * if the page is on loan from a uvm_object, then anonget will
   1453    1.7       mrg 	 * lock that object for us if it does not fail.
   1454    1.7       mrg 	 */
   1455  1.222        ad  retry:
   1456  1.138  uebayasi 	error = uvmfault_anonget(ufi, amap, anon);
   1457   1.58       chs 	switch (error) {
   1458   1.57       chs 	case 0:
   1459   1.63       chs 		break;
   1460    1.7       mrg 
   1461   1.57       chs 	case ERESTART:
   1462  1.139  uebayasi 		return ERESTART;
   1463    1.7       mrg 
   1464   1.57       chs 	case EAGAIN:
   1465  1.128     pooka 		kpause("fltagain1", false, hz/2, NULL);
   1466  1.139  uebayasi 		return ERESTART;
   1467   1.51   thorpej 
   1468  1.222        ad 	case ENOLCK:
   1469  1.222        ad 		/* it needs a write lock: retry */
   1470  1.222        ad 		error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
   1471  1.222        ad 		if (error != 0) {
   1472  1.222        ad 			return error;
   1473  1.222        ad 		}
   1474  1.222        ad 		KASSERT(rw_write_held(amap->am_lock));
   1475  1.222        ad 		goto retry;
   1476  1.222        ad 
   1477   1.51   thorpej 	default:
   1478  1.138  uebayasi 		return error;
   1479    1.1       mrg 	}
   1480    1.7       mrg 
   1481    1.7       mrg 	/*
   1482    1.7       mrg 	 * uobj is non null if the page is on loan from an object (i.e. uobj)
   1483    1.7       mrg 	 */
   1484    1.7       mrg 
   1485   1.94      yamt 	uobj = anon->an_page->uobject;	/* locked by anonget if !NULL */
   1486    1.7       mrg 
   1487    1.7       mrg 	/* locked: maps(read), amap, anon, uobj(if one) */
   1488  1.222        ad 	KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1489  1.186     rmind 	KASSERT(anon->an_lock == amap->am_lock);
   1490  1.222        ad 	KASSERT(uobj == NULL ||
   1491  1.222        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   1492    1.7       mrg 
   1493    1.7       mrg 	/*
   1494   1.63       chs 	 * special handling for loaned pages
   1495    1.7       mrg 	 */
   1496   1.52       chs 
   1497   1.94      yamt 	if (anon->an_page->loan_count) {
   1498  1.148  uebayasi 		error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
   1499  1.148  uebayasi 		if (error != 0)
   1500  1.148  uebayasi 			return error;
   1501  1.148  uebayasi 	}
   1502  1.160  uebayasi 
   1503  1.160  uebayasi 	/*
   1504  1.160  uebayasi 	 * if we are case 1B then we will need to allocate a new blank
   1505  1.160  uebayasi 	 * anon to transfer the data into.   note that we have a lock
   1506  1.160  uebayasi 	 * on anon, so no one can busy or release the page until we are done.
   1507  1.160  uebayasi 	 * also note that the ref count can't drop to zero here because
   1508  1.160  uebayasi 	 * it is > 1 and we are only dropping one ref.
   1509  1.160  uebayasi 	 *
   1510  1.160  uebayasi 	 * in the (hopefully very rare) case that we are out of RAM we
   1511  1.160  uebayasi 	 * will unlock, wait for more RAM, and refault.
   1512  1.160  uebayasi 	 *
   1513  1.160  uebayasi 	 * if we are out of anon VM we kill the process (XXX: could wait?).
   1514  1.160  uebayasi 	 */
   1515  1.160  uebayasi 
   1516  1.160  uebayasi 	if (flt->cow_now && anon->an_ref > 1) {
   1517  1.168  uebayasi 		flt->promote = true;
   1518  1.160  uebayasi 		error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
   1519  1.160  uebayasi 	} else {
   1520  1.160  uebayasi 		error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
   1521  1.160  uebayasi 	}
   1522  1.160  uebayasi 	return error;
   1523  1.148  uebayasi }
   1524  1.148  uebayasi 
   1525  1.173  uebayasi /*
   1526  1.173  uebayasi  * uvm_fault_upper_loan: handle loaned upper page.
   1527  1.173  uebayasi  *
   1528  1.177      yamt  *	1. if not cow'ing now, simply adjust flt->enter_prot.
   1529  1.173  uebayasi  *	2. if cow'ing now, and if ref count is 1, break loan.
   1530  1.173  uebayasi  */
   1531  1.173  uebayasi 
   1532  1.148  uebayasi static int
   1533  1.148  uebayasi uvm_fault_upper_loan(
   1534  1.148  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1535  1.148  uebayasi 	struct vm_anon *anon, struct uvm_object **ruobj)
   1536  1.148  uebayasi {
   1537  1.149  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   1538  1.151  uebayasi 	int error = 0;
   1539  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1540  1.149  uebayasi 
   1541  1.149  uebayasi 	if (!flt->cow_now) {
   1542    1.7       mrg 
   1543  1.149  uebayasi 		/*
   1544  1.149  uebayasi 		 * for read faults on loaned pages we just cap the
   1545  1.149  uebayasi 		 * protection at read-only.
   1546  1.149  uebayasi 		 */
   1547   1.63       chs 
   1548  1.149  uebayasi 		flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
   1549    1.7       mrg 
   1550  1.149  uebayasi 	} else {
   1551  1.149  uebayasi 		/*
   1552  1.149  uebayasi 		 * note that we can't allow writes into a loaned page!
   1553  1.149  uebayasi 		 *
   1554  1.149  uebayasi 		 * if we have a write fault on a loaned page in an
   1555  1.149  uebayasi 		 * anon then we need to look at the anon's ref count.
   1556  1.149  uebayasi 		 * if it is greater than one then we are going to do
   1557  1.149  uebayasi 		 * a normal copy-on-write fault into a new anon (this
   1558  1.149  uebayasi 		 * is not a problem).  however, if the reference count
   1559  1.149  uebayasi 		 * is one (a case where we would normally allow a
   1560  1.149  uebayasi 		 * write directly to the page) then we need to kill
   1561  1.149  uebayasi 		 * the loan before we continue.
   1562  1.149  uebayasi 		 */
   1563  1.149  uebayasi 
   1564  1.149  uebayasi 		/* >1 case is already ok */
   1565  1.149  uebayasi 		if (anon->an_ref == 1) {
   1566  1.222        ad 			/* breaking loan requires a write lock. */
   1567  1.222        ad 			error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
   1568  1.222        ad 			if (error != 0) {
   1569  1.222        ad 				return error;
   1570  1.222        ad 			}
   1571  1.222        ad 			KASSERT(rw_write_held(amap->am_lock));
   1572  1.222        ad 
   1573  1.155  uebayasi 			error = uvm_loanbreak_anon(anon, *ruobj);
   1574  1.151  uebayasi 			if (error != 0) {
   1575  1.186     rmind 				uvmfault_unlockall(ufi, amap, *ruobj);
   1576  1.151  uebayasi 				uvm_wait("flt_noram2");
   1577  1.151  uebayasi 				return ERESTART;
   1578  1.151  uebayasi 			}
   1579  1.206   msaitoh 			/* if we were a loan receiver uobj is gone */
   1580  1.155  uebayasi 			if (*ruobj)
   1581  1.155  uebayasi 				*ruobj = NULL;
   1582  1.151  uebayasi 		}
   1583  1.151  uebayasi 	}
   1584  1.151  uebayasi 	return error;
   1585  1.151  uebayasi }
   1586  1.151  uebayasi 
   1587  1.173  uebayasi /*
   1588  1.173  uebayasi  * uvm_fault_upper_promote: promote upper page.
   1589  1.173  uebayasi  *
   1590  1.173  uebayasi  *	1. call uvmfault_promote.
   1591  1.173  uebayasi  *	2. enqueue page.
   1592  1.173  uebayasi  *	3. deref.
   1593  1.173  uebayasi  *	4. pass page to uvm_fault_upper_enter.
   1594  1.173  uebayasi  */
   1595  1.173  uebayasi 
   1596  1.148  uebayasi static int
   1597  1.148  uebayasi uvm_fault_upper_promote(
   1598  1.148  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1599  1.148  uebayasi 	struct uvm_object *uobj, struct vm_anon *anon)
   1600  1.148  uebayasi {
   1601  1.222        ad 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   1602  1.149  uebayasi 	struct vm_anon * const oanon = anon;
   1603  1.149  uebayasi 	struct vm_page *pg;
   1604  1.149  uebayasi 	int error;
   1605  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1606  1.149  uebayasi 
   1607  1.149  uebayasi 	UVMHIST_LOG(maphist, "  case 1B: COW fault",0,0,0,0);
   1608  1.149  uebayasi 
   1609  1.222        ad 	/* promoting requires a write lock. */
   1610  1.222        ad 	error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
   1611  1.222        ad 	if (error != 0) {
   1612  1.222        ad 		return error;
   1613  1.222        ad 	}
   1614  1.222        ad 	KASSERT(rw_write_held(amap->am_lock));
   1615  1.222        ad 
   1616  1.236        ad 	cpu_count(CPU_COUNT_FLT_ACOW, 1);
   1617  1.236        ad 
   1618  1.177      yamt 	error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
   1619  1.177      yamt 	    &flt->anon_spare);
   1620  1.149  uebayasi 	switch (error) {
   1621  1.149  uebayasi 	case 0:
   1622  1.149  uebayasi 		break;
   1623  1.149  uebayasi 	case ERESTART:
   1624  1.149  uebayasi 		return ERESTART;
   1625  1.149  uebayasi 	default:
   1626  1.149  uebayasi 		return error;
   1627  1.149  uebayasi 	}
   1628  1.227        ad 	pg = anon->an_page;
   1629    1.7       mrg 
   1630  1.222        ad 	KASSERT(anon->an_lock == oanon->an_lock);
   1631  1.227        ad 	KASSERT((pg->flags & (PG_BUSY | PG_FAKE)) == 0);
   1632    1.7       mrg 
   1633  1.149  uebayasi 	/* deref: can not drop to zero here by defn! */
   1634  1.183      yamt 	KASSERT(oanon->an_ref > 1);
   1635  1.149  uebayasi 	oanon->an_ref--;
   1636   1.53   thorpej 
   1637  1.149  uebayasi 	/*
   1638  1.149  uebayasi 	 * note: oanon is still locked, as is the new anon.  we
   1639  1.149  uebayasi 	 * need to check for this later when we unlock oanon; if
   1640  1.149  uebayasi 	 * oanon != anon, we'll have to unlock anon, too.
   1641  1.149  uebayasi 	 */
   1642    1.7       mrg 
   1643  1.149  uebayasi 	return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
   1644  1.148  uebayasi }
   1645  1.148  uebayasi 
   1646  1.173  uebayasi /*
   1647  1.173  uebayasi  * uvm_fault_upper_direct: handle direct fault.
   1648  1.173  uebayasi  */
   1649  1.173  uebayasi 
   1650  1.148  uebayasi static int
   1651  1.148  uebayasi uvm_fault_upper_direct(
   1652  1.148  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1653  1.148  uebayasi 	struct uvm_object *uobj, struct vm_anon *anon)
   1654  1.148  uebayasi {
   1655  1.149  uebayasi 	struct vm_anon * const oanon = anon;
   1656  1.149  uebayasi 	struct vm_page *pg;
   1657  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1658   1.52       chs 
   1659  1.213        ad 	cpu_count(CPU_COUNT_FLT_ANON, 1);
   1660  1.149  uebayasi 	pg = anon->an_page;
   1661  1.149  uebayasi 	if (anon->an_ref > 1)     /* disallow writes to ref > 1 anons */
   1662  1.149  uebayasi 		flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
   1663    1.7       mrg 
   1664  1.149  uebayasi 	return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
   1665  1.148  uebayasi }
   1666  1.148  uebayasi 
   1667  1.173  uebayasi /*
   1668  1.173  uebayasi  * uvm_fault_upper_enter: enter h/w mapping of upper page.
   1669  1.173  uebayasi  */
   1670  1.173  uebayasi 
   1671  1.148  uebayasi static int
   1672  1.148  uebayasi uvm_fault_upper_enter(
   1673  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   1674  1.148  uebayasi 	struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
   1675  1.148  uebayasi 	struct vm_anon *oanon)
   1676  1.148  uebayasi {
   1677  1.202       chs 	struct pmap *pmap = ufi->orig_map->pmap;
   1678  1.202       chs 	vaddr_t va = ufi->orig_rvaddr;
   1679  1.148  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   1680  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1681    1.7       mrg 
   1682  1.173  uebayasi 	/* locked: maps(read), amap, oanon, anon(if different from oanon) */
   1683  1.222        ad 	KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1684  1.186     rmind 	KASSERT(anon->an_lock == amap->am_lock);
   1685  1.186     rmind 	KASSERT(oanon->an_lock == amap->am_lock);
   1686  1.222        ad 	KASSERT(uobj == NULL ||
   1687  1.222        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   1688  1.215        ad 	KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
   1689    1.7       mrg 
   1690    1.7       mrg 	/*
   1691   1.69       chs 	 * now map the page in.
   1692    1.7       mrg 	 */
   1693    1.7       mrg 
   1694  1.177      yamt 	UVMHIST_LOG(maphist,
   1695  1.201  pgoyette 	    "  MAPPING: anon: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
   1696  1.202       chs 	    (uintptr_t)pmap, va, (uintptr_t)pg, flt->promote);
   1697  1.202       chs 	if (pmap_enter(pmap, va, VM_PAGE_TO_PHYS(pg),
   1698  1.177      yamt 	    flt->enter_prot, flt->access_type | PMAP_CANFAIL |
   1699  1.177      yamt 	    (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
   1700   1.69       chs 
   1701   1.46   thorpej 		/*
   1702  1.202       chs 		 * If pmap_enter() fails, it must not leave behind an existing
   1703  1.202       chs 		 * pmap entry.  In particular, a now-stale entry for a different
   1704  1.202       chs 		 * page would leave the pmap inconsistent with the vm_map.
   1705  1.202       chs 		 * This is not to imply that pmap_enter() should remove an
   1706  1.202       chs 		 * existing mapping in such a situation (since that could create
   1707  1.202       chs 		 * different problems, eg. if the existing mapping is wired),
   1708  1.202       chs 		 * but rather that the pmap should be designed such that it
   1709  1.202       chs 		 * never needs to fail when the new mapping is replacing an
   1710  1.202       chs 		 * existing mapping and the new page has no existing mappings.
   1711  1.226        ad 		 *
   1712  1.226        ad 		 * XXX This can't be asserted safely any more because many
   1713  1.226        ad 		 * LWPs and/or many processes could simultaneously fault on
   1714  1.226        ad 		 * the same VA and some might succeed.
   1715  1.202       chs 		 */
   1716  1.202       chs 
   1717  1.226        ad 		/* KASSERT(!pmap_extract(pmap, va, NULL)); */
   1718  1.202       chs 
   1719  1.202       chs 		/*
   1720  1.222        ad 		 * ensure that the page is queued in the case that
   1721  1.222        ad 		 * we just promoted.
   1722  1.222        ad 		 */
   1723  1.222        ad 
   1724  1.227        ad 		uvm_pagelock(pg);
   1725  1.227        ad 		uvm_pageenqueue(pg);
   1726  1.227        ad 		uvm_pageunlock(pg);
   1727  1.222        ad 
   1728  1.222        ad 		/*
   1729   1.46   thorpej 		 * No need to undo what we did; we can simply think of
   1730   1.46   thorpej 		 * this as the pmap throwing away the mapping information.
   1731   1.46   thorpej 		 *
   1732   1.46   thorpej 		 * We do, however, have to go through the ReFault path,
   1733   1.46   thorpej 		 * as the map may change while we're asleep.
   1734   1.46   thorpej 		 */
   1735   1.69       chs 
   1736  1.186     rmind 		uvmfault_unlockall(ufi, amap, uobj);
   1737   1.92      yamt 		if (!uvm_reclaimable()) {
   1738   1.46   thorpej 			UVMHIST_LOG(maphist,
   1739   1.46   thorpej 			    "<- failed.  out of VM",0,0,0,0);
   1740   1.46   thorpej 			/* XXX instrumentation */
   1741  1.148  uebayasi 			return ENOMEM;
   1742   1.46   thorpej 		}
   1743   1.46   thorpej 		/* XXX instrumentation */
   1744   1.46   thorpej 		uvm_wait("flt_pmfail1");
   1745  1.139  uebayasi 		return ERESTART;
   1746   1.46   thorpej 	}
   1747    1.7       mrg 
   1748  1.177      yamt 	uvm_fault_upper_done(ufi, flt, anon, pg);
   1749  1.169  uebayasi 
   1750  1.169  uebayasi 	/*
   1751  1.169  uebayasi 	 * done case 1!  finish up by unlocking everything and returning success
   1752  1.169  uebayasi 	 */
   1753  1.169  uebayasi 
   1754  1.202       chs 	pmap_update(pmap);
   1755  1.186     rmind 	uvmfault_unlockall(ufi, amap, uobj);
   1756  1.169  uebayasi 	return 0;
   1757  1.148  uebayasi }
   1758  1.148  uebayasi 
   1759  1.173  uebayasi /*
   1760  1.173  uebayasi  * uvm_fault_upper_done: queue upper center page.
   1761  1.173  uebayasi  */
   1762  1.173  uebayasi 
   1763  1.169  uebayasi static void
   1764  1.148  uebayasi uvm_fault_upper_done(
   1765  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   1766  1.177      yamt 	struct vm_anon *anon, struct vm_page *pg)
   1767  1.148  uebayasi {
   1768  1.174     rmind 	const bool wire_paging = flt->wire_paging;
   1769  1.174     rmind 
   1770  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1771  1.148  uebayasi 
   1772    1.7       mrg 	/*
   1773   1.46   thorpej 	 * ... update the page queues.
   1774    1.7       mrg 	 */
   1775    1.7       mrg 
   1776  1.174     rmind 	if (wire_paging) {
   1777  1.227        ad 		uvm_pagelock(pg);
   1778    1.8     chuck 		uvm_pagewire(pg);
   1779  1.227        ad 		uvm_pageunlock(pg);
   1780   1.29       chs 
   1781   1.29       chs 		/*
   1782   1.29       chs 		 * since the now-wired page cannot be paged out,
   1783   1.29       chs 		 * release its swap resources for others to use.
   1784  1.215        ad 		 * and since an anon with no swap cannot be clean,
   1785  1.215        ad 		 * mark it dirty now.
   1786   1.29       chs 		 */
   1787   1.29       chs 
   1788  1.215        ad 		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   1789  1.174     rmind 		uvm_anon_dropswap(anon);
   1790  1.227        ad 	} else if (uvmpdpol_pageactivate_p(pg)) {
   1791  1.227        ad 		/*
   1792  1.227        ad 		 * avoid re-activating the page unless needed,
   1793  1.227        ad 		 * to avoid false sharing on multiprocessor.
   1794  1.227        ad 		 */
   1795  1.227        ad 
   1796  1.227        ad 		uvm_pagelock(pg);
   1797  1.227        ad 		uvm_pageactivate(pg);
   1798  1.227        ad 		uvm_pageunlock(pg);
   1799  1.174     rmind 	}
   1800  1.138  uebayasi }
   1801    1.1       mrg 
   1802  1.173  uebayasi /*
   1803  1.222        ad  * uvm_fault_lower_upgrade: upgrade lower lock, reader -> writer
   1804  1.222        ad  */
   1805  1.222        ad 
   1806  1.222        ad static inline int
   1807  1.222        ad uvm_fault_lower_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1808  1.222        ad     struct vm_amap *amap, struct uvm_object *uobj, struct vm_page *uobjpage)
   1809  1.222        ad {
   1810  1.222        ad 
   1811  1.224     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1812  1.224     skrll 
   1813  1.222        ad 	KASSERT(uobj != NULL);
   1814  1.222        ad 	KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
   1815  1.222        ad 
   1816  1.222        ad 	/*
   1817  1.222        ad 	 * fast path.
   1818  1.222        ad 	 */
   1819  1.223     skrll 
   1820  1.222        ad 	if (__predict_true(flt->lower_lock_type == RW_WRITER)) {
   1821  1.222        ad 		return 0;
   1822  1.222        ad 	}
   1823  1.222        ad 
   1824  1.222        ad 	/*
   1825  1.222        ad 	 * otherwise try for the upgrade.  if we don't get it, unlock
   1826  1.222        ad 	 * everything, restart the fault and next time around get a writer
   1827  1.222        ad 	 * lock.
   1828  1.222        ad 	 */
   1829  1.222        ad 
   1830  1.222        ad 	flt->lower_lock_type = RW_WRITER;
   1831  1.222        ad 	if (__predict_false(!rw_tryupgrade(uobj->vmobjlock))) {
   1832  1.222        ad 		uvmfault_unlockall(ufi, amap, uobj);
   1833  1.222        ad 		cpu_count(CPU_COUNT_FLTNOUP, 1);
   1834  1.222        ad 		UVMHIST_LOG(maphist, "  !upgrade lower", 0, 0,0,0);
   1835  1.222        ad 		return ERESTART;
   1836  1.222        ad 	}
   1837  1.222        ad 	cpu_count(CPU_COUNT_FLTUP, 1);
   1838  1.222        ad 	KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
   1839  1.222        ad 	return 0;
   1840  1.222        ad }
   1841  1.222        ad 
   1842  1.222        ad /*
   1843  1.173  uebayasi  * uvm_fault_lower: handle lower fault.
   1844  1.173  uebayasi  *
   1845  1.173  uebayasi  *	1. check uobj
   1846  1.173  uebayasi  *	1.1. if null, ZFOD.
   1847  1.235    andvar  *	1.2. if not null, look up unmapped neighbor pages.
   1848  1.173  uebayasi  *	2. for center page, check if promote.
   1849  1.173  uebayasi  *	2.1. ZFOD always needs promotion.
   1850  1.173  uebayasi  *	2.2. other uobjs, when entry is marked COW (usually MAP_PRIVATE vnode).
   1851  1.173  uebayasi  *	3. if uobj is not ZFOD and page is not found, do i/o.
   1852  1.173  uebayasi  *	4. dispatch either direct / promote fault.
   1853  1.173  uebayasi  */
   1854  1.173  uebayasi 
   1855  1.138  uebayasi static int
   1856  1.173  uebayasi uvm_fault_lower(
   1857  1.140  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   1858  1.173  uebayasi 	struct vm_page **pages)
   1859  1.138  uebayasi {
   1860  1.198  riastrad 	struct vm_amap *amap __diagused = ufi->entry->aref.ar_amap;
   1861  1.173  uebayasi 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
   1862  1.173  uebayasi 	struct vm_page *uobjpage;
   1863  1.138  uebayasi 	int error;
   1864  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1865  1.173  uebayasi 
   1866    1.7       mrg 	/*
   1867  1.173  uebayasi 	 * now, if the desired page is not shadowed by the amap and we have
   1868  1.173  uebayasi 	 * a backing object that does not have a special fault routine, then
   1869  1.173  uebayasi 	 * we ask (with pgo_get) the object for resident pages that we care
   1870  1.173  uebayasi 	 * about and attempt to map them in.  we do not let pgo_get block
   1871  1.173  uebayasi 	 * (PGO_LOCKED).
   1872  1.173  uebayasi 	 */
   1873  1.173  uebayasi 
   1874  1.173  uebayasi 	if (uobj == NULL) {
   1875  1.173  uebayasi 		/* zero fill; don't care neighbor pages */
   1876  1.173  uebayasi 		uobjpage = NULL;
   1877  1.173  uebayasi 	} else {
   1878  1.173  uebayasi 		uvm_fault_lower_lookup(ufi, flt, pages);
   1879  1.173  uebayasi 		uobjpage = pages[flt->centeridx];
   1880  1.173  uebayasi 	}
   1881  1.173  uebayasi 
   1882  1.173  uebayasi 	/*
   1883  1.173  uebayasi 	 * note that at this point we are done with any front or back pages.
   1884  1.173  uebayasi 	 * we are now going to focus on the center page (i.e. the one we've
   1885  1.173  uebayasi 	 * faulted on).  if we have faulted on the upper (anon) layer
   1886  1.173  uebayasi 	 * [i.e. case 1], then the anon we want is anons[centeridx] (we have
   1887  1.173  uebayasi 	 * not touched it yet).  if we have faulted on the bottom (uobj)
   1888  1.173  uebayasi 	 * layer [i.e. case 2] and the page was both present and available,
   1889  1.173  uebayasi 	 * then we've got a pointer to it as "uobjpage" and we've already
   1890  1.173  uebayasi 	 * made it BUSY.
   1891    1.7       mrg 	 */
   1892    1.7       mrg 
   1893    1.7       mrg 	/*
   1894    1.7       mrg 	 * locked:
   1895    1.7       mrg 	 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
   1896    1.7       mrg 	 */
   1897  1.222        ad 	KASSERT(amap == NULL ||
   1898  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1899  1.227        ad 	KASSERT(uobj == NULL ||
   1900  1.227        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   1901    1.7       mrg 
   1902    1.7       mrg 	/*
   1903    1.7       mrg 	 * note that uobjpage can not be PGO_DONTCARE at this point.  we now
   1904    1.7       mrg 	 * set uobjpage to PGO_DONTCARE if we are doing a zero fill.  if we
   1905    1.7       mrg 	 * have a backing object, check and see if we are going to promote
   1906    1.7       mrg 	 * the data up to an anon during the fault.
   1907    1.7       mrg 	 */
   1908    1.7       mrg 
   1909    1.7       mrg 	if (uobj == NULL) {
   1910   1.63       chs 		uobjpage = PGO_DONTCARE;
   1911  1.168  uebayasi 		flt->promote = true;		/* always need anon here */
   1912    1.7       mrg 	} else {
   1913   1.52       chs 		KASSERT(uobjpage != PGO_DONTCARE);
   1914  1.168  uebayasi 		flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry);
   1915    1.7       mrg 	}
   1916  1.201  pgoyette 	UVMHIST_LOG(maphist, "  case 2 fault: promote=%jd, zfill=%jd",
   1917  1.168  uebayasi 	    flt->promote, (uobj == NULL), 0,0);
   1918    1.1       mrg 
   1919    1.7       mrg 	/*
   1920    1.9     chuck 	 * if uobjpage is not null then we do not need to do I/O to get the
   1921    1.9     chuck 	 * uobjpage.
   1922    1.9     chuck 	 *
   1923   1.63       chs 	 * if uobjpage is null, then we need to unlock and ask the pager to
   1924    1.7       mrg 	 * get the data for us.   once we have the data, we need to reverify
   1925    1.7       mrg 	 * the state the world.   we are currently not holding any resources.
   1926    1.7       mrg 	 */
   1927    1.1       mrg 
   1928    1.9     chuck 	if (uobjpage) {
   1929    1.9     chuck 		/* update rusage counters */
   1930  1.124        ad 		curlwp->l_ru.ru_minflt++;
   1931    1.9     chuck 	} else {
   1932  1.163  uebayasi 		error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
   1933  1.148  uebayasi 		if (error != 0)
   1934  1.148  uebayasi 			return error;
   1935  1.148  uebayasi 	}
   1936  1.160  uebayasi 
   1937  1.160  uebayasi 	/*
   1938  1.160  uebayasi 	 * locked:
   1939  1.160  uebayasi 	 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
   1940  1.160  uebayasi 	 */
   1941  1.222        ad 	KASSERT(amap == NULL ||
   1942  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   1943  1.227        ad 	KASSERT(uobj == NULL ||
   1944  1.227        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   1945  1.160  uebayasi 
   1946  1.160  uebayasi 	/*
   1947  1.160  uebayasi 	 * notes:
   1948  1.160  uebayasi 	 *  - at this point uobjpage can not be NULL
   1949  1.160  uebayasi 	 *  - at this point uobjpage can not be PG_RELEASED (since we checked
   1950  1.160  uebayasi 	 *  for it above)
   1951  1.218        ad 	 *  - at this point uobjpage could be waited on (handle later)
   1952  1.227        ad 	 *  - uobjpage can be from a different object if tmpfs (vnode vs UAO)
   1953  1.160  uebayasi 	 */
   1954  1.160  uebayasi 
   1955  1.177      yamt 	KASSERT(uobjpage != NULL);
   1956  1.227        ad 	KASSERT(uobj == NULL ||
   1957  1.227        ad 	    uobjpage->uobject->vmobjlock == uobj->vmobjlock);
   1958  1.160  uebayasi 	KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
   1959  1.215        ad 	    uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_CLEAN);
   1960  1.160  uebayasi 
   1961  1.177      yamt 	if (!flt->promote) {
   1962  1.163  uebayasi 		error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
   1963  1.160  uebayasi 	} else {
   1964  1.163  uebayasi 		error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
   1965  1.160  uebayasi 	}
   1966  1.160  uebayasi 	return error;
   1967  1.148  uebayasi }
   1968  1.148  uebayasi 
   1969  1.173  uebayasi /*
   1970  1.173  uebayasi  * uvm_fault_lower_lookup: look up on-memory uobj pages.
   1971  1.173  uebayasi  *
   1972  1.173  uebayasi  *	1. get on-memory pages.
   1973  1.173  uebayasi  *	2. if failed, give up (get only center page later).
   1974  1.173  uebayasi  *	3. if succeeded, enter h/w mapping of neighbor pages.
   1975  1.173  uebayasi  */
   1976  1.173  uebayasi 
   1977  1.173  uebayasi static void
   1978  1.173  uebayasi uvm_fault_lower_lookup(
   1979  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   1980  1.173  uebayasi 	struct vm_page **pages)
   1981  1.173  uebayasi {
   1982  1.173  uebayasi 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
   1983  1.173  uebayasi 	int lcv, gotpages;
   1984  1.173  uebayasi 	vaddr_t currva;
   1985  1.227        ad 	bool entered;
   1986  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   1987  1.173  uebayasi 
   1988  1.222        ad 	rw_enter(uobj->vmobjlock, flt->lower_lock_type);
   1989  1.222        ad 
   1990  1.222        ad 	/*
   1991  1.222        ad 	 * Locked: maps(read), amap(if there), uobj
   1992  1.222        ad 	 */
   1993  1.173  uebayasi 
   1994  1.213        ad 	cpu_count(CPU_COUNT_FLTLGET, 1);
   1995  1.173  uebayasi 	gotpages = flt->npages;
   1996  1.173  uebayasi 	(void) uobj->pgops->pgo_get(uobj,
   1997  1.173  uebayasi 	    ufi->entry->offset + flt->startva - ufi->entry->start,
   1998  1.173  uebayasi 	    pages, &gotpages, flt->centeridx,
   1999  1.222        ad 	    flt->access_type & MASK(ufi->entry), ufi->entry->advice,
   2000  1.227        ad 	    PGO_LOCKED);
   2001  1.173  uebayasi 
   2002  1.222        ad 	KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   2003  1.186     rmind 
   2004  1.173  uebayasi 	/*
   2005  1.173  uebayasi 	 * check for pages to map, if we got any
   2006  1.173  uebayasi 	 */
   2007  1.173  uebayasi 
   2008  1.173  uebayasi 	if (gotpages == 0) {
   2009  1.173  uebayasi 		pages[flt->centeridx] = NULL;
   2010  1.173  uebayasi 		return;
   2011  1.173  uebayasi 	}
   2012  1.173  uebayasi 
   2013  1.227        ad 	entered = false;
   2014  1.173  uebayasi 	currva = flt->startva;
   2015  1.173  uebayasi 	for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
   2016  1.173  uebayasi 		struct vm_page *curpg;
   2017  1.173  uebayasi 
   2018  1.173  uebayasi 		curpg = pages[lcv];
   2019  1.173  uebayasi 		if (curpg == NULL || curpg == PGO_DONTCARE) {
   2020  1.173  uebayasi 			continue;
   2021  1.173  uebayasi 		}
   2022  1.173  uebayasi 
   2023  1.227        ad 		/*
   2024  1.227        ad 		 * in the case of tmpfs, the pages might be from a different
   2025  1.227        ad 		 * uvm_object.  just make sure that they have the same lock.
   2026  1.227        ad 		 */
   2027  1.227        ad 
   2028  1.227        ad 		KASSERT(curpg->uobject->vmobjlock == uobj->vmobjlock);
   2029  1.227        ad 		KASSERT((curpg->flags & PG_BUSY) == 0);
   2030  1.222        ad 
   2031  1.173  uebayasi 		/*
   2032  1.227        ad 		 * leave the centre page for later.  don't screw with
   2033  1.227        ad 		 * existing mappings (needless & expensive).
   2034  1.173  uebayasi 		 */
   2035  1.173  uebayasi 
   2036  1.173  uebayasi 		if (lcv == flt->centeridx) {
   2037  1.217       rin 			UVMHIST_LOG(maphist, "  got uobjpage (%#jx) "
   2038  1.201  pgoyette 			    "with locked get", (uintptr_t)curpg, 0, 0, 0);
   2039  1.227        ad 		} else if (!pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
   2040  1.215        ad 			uvm_fault_lower_neighbor(ufi, flt, currva, curpg);
   2041  1.227        ad 			entered = true;
   2042  1.173  uebayasi 		}
   2043  1.173  uebayasi 	}
   2044  1.227        ad 	if (entered) {
   2045  1.227        ad 		pmap_update(ufi->orig_map->pmap);
   2046  1.227        ad 	}
   2047  1.173  uebayasi }
   2048  1.173  uebayasi 
   2049  1.173  uebayasi /*
   2050  1.173  uebayasi  * uvm_fault_lower_neighbor: enter h/w mapping of lower neighbor page.
   2051  1.173  uebayasi  */
   2052  1.173  uebayasi 
   2053  1.173  uebayasi static void
   2054  1.173  uebayasi uvm_fault_lower_neighbor(
   2055  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   2056  1.215        ad 	vaddr_t currva, struct vm_page *pg)
   2057  1.173  uebayasi {
   2058  1.215        ad 	const bool readonly = uvm_pagereadonly_p(pg) || pg->loan_count > 0;
   2059  1.182     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2060  1.173  uebayasi 
   2061  1.173  uebayasi 	/* locked: maps(read), amap(if there), uobj */
   2062  1.173  uebayasi 
   2063  1.173  uebayasi 	/*
   2064  1.173  uebayasi 	 * calling pgo_get with PGO_LOCKED returns us pages which
   2065  1.173  uebayasi 	 * are neither busy nor released, so we don't need to check
   2066  1.173  uebayasi 	 * for this.  we can just directly enter the pages.
   2067  1.227        ad 	 *
   2068  1.222        ad 	 * there wasn't a direct fault on the page, so avoid the cost of
   2069  1.227        ad 	 * activating it.
   2070  1.222        ad 	 */
   2071  1.222        ad 
   2072  1.227        ad 	if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
   2073  1.222        ad 		uvm_pagelock(pg);
   2074  1.222        ad 		uvm_pageenqueue(pg);
   2075  1.222        ad 		uvm_pageunlock(pg);
   2076  1.222        ad 	}
   2077  1.227        ad 
   2078  1.173  uebayasi 	UVMHIST_LOG(maphist,
   2079  1.201  pgoyette 	    "  MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx",
   2080  1.201  pgoyette 	    (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
   2081  1.213        ad 	cpu_count(CPU_COUNT_FLTNOMAP, 1);
   2082  1.173  uebayasi 
   2083  1.173  uebayasi 	/*
   2084  1.173  uebayasi 	 * Since this page isn't the page that's actually faulting,
   2085  1.173  uebayasi 	 * ignore pmap_enter() failures; it's not critical that we
   2086  1.173  uebayasi 	 * enter these right now.
   2087  1.219        ad 	 * NOTE: page can't be waited on or PG_RELEASED because we've
   2088  1.173  uebayasi 	 * held the lock the whole time we've had the handle.
   2089  1.173  uebayasi 	 */
   2090  1.173  uebayasi 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   2091  1.173  uebayasi 	KASSERT((pg->flags & PG_RELEASED) == 0);
   2092  1.215        ad 	KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) ||
   2093  1.215        ad 	    uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
   2094  1.227        ad 	KASSERT((pg->flags & PG_BUSY) == 0);
   2095  1.222        ad 	KASSERT(rw_lock_op(pg->uobject->vmobjlock) == flt->lower_lock_type);
   2096  1.199     skrll 
   2097  1.223     skrll 	const vm_prot_t mapprot =
   2098  1.199     skrll 	    readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
   2099  1.199     skrll 	    flt->enter_prot & MASK(ufi->entry);
   2100  1.223     skrll 	const u_int mapflags =
   2101  1.199     skrll 	    PMAP_CANFAIL | (flt->wire_mapping ? (mapprot | PMAP_WIRED) : 0);
   2102  1.173  uebayasi 	(void) pmap_enter(ufi->orig_map->pmap, currva,
   2103  1.199     skrll 	    VM_PAGE_TO_PHYS(pg), mapprot, mapflags);
   2104  1.173  uebayasi }
   2105  1.173  uebayasi 
   2106  1.173  uebayasi /*
   2107  1.173  uebayasi  * uvm_fault_lower_io: get lower page from backing store.
   2108  1.173  uebayasi  *
   2109  1.173  uebayasi  *	1. unlock everything, because i/o will block.
   2110  1.173  uebayasi  *	2. call pgo_get.
   2111  1.173  uebayasi  *	3. if failed, recover.
   2112  1.173  uebayasi  *	4. if succeeded, relock everything and verify things.
   2113  1.173  uebayasi  */
   2114  1.173  uebayasi 
   2115  1.148  uebayasi static int
   2116  1.163  uebayasi uvm_fault_lower_io(
   2117  1.222        ad 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   2118  1.156  uebayasi 	struct uvm_object **ruobj, struct vm_page **ruobjpage)
   2119  1.148  uebayasi {
   2120  1.149  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   2121  1.156  uebayasi 	struct uvm_object *uobj = *ruobj;
   2122  1.158  uebayasi 	struct vm_page *pg;
   2123  1.149  uebayasi 	bool locked;
   2124  1.149  uebayasi 	int gotpages;
   2125  1.149  uebayasi 	int error;
   2126  1.149  uebayasi 	voff_t uoff;
   2127  1.208       chs 	vm_prot_t access_type;
   2128  1.208       chs 	int advice;
   2129  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2130  1.149  uebayasi 
   2131  1.208       chs 	/* grab everything we need from the entry before we unlock */
   2132  1.208       chs 	uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
   2133  1.208       chs 	access_type = flt->access_type & MASK(ufi->entry);
   2134  1.208       chs 	advice = ufi->entry->advice;
   2135  1.208       chs 
   2136  1.186     rmind 	/* Locked: maps(read), amap(if there), uobj */
   2137  1.222        ad 	KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   2138  1.222        ad 
   2139  1.222        ad 	/* Upgrade to a write lock if needed. */
   2140  1.222        ad 	error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, NULL);
   2141  1.222        ad 	if (error != 0) {
   2142  1.222        ad 		return error;
   2143  1.222        ad 	}
   2144  1.186     rmind 	uvmfault_unlockall(ufi, amap, NULL);
   2145  1.186     rmind 
   2146  1.236        ad 	/* update rusage counters */
   2147  1.236        ad 	curlwp->l_ru.ru_majflt++;
   2148  1.236        ad 
   2149  1.222        ad 	/* Locked: uobj(write) */
   2150  1.222        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
   2151   1.63       chs 
   2152  1.213        ad 	cpu_count(CPU_COUNT_FLTGET, 1);
   2153  1.149  uebayasi 	gotpages = 1;
   2154  1.166   mlelstv 	pg = NULL;
   2155  1.158  uebayasi 	error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
   2156  1.208       chs 	    0, access_type, advice, PGO_SYNCIO);
   2157  1.158  uebayasi 	/* locked: pg(if no error) */
   2158   1.52       chs 
   2159  1.149  uebayasi 	/*
   2160  1.149  uebayasi 	 * recover from I/O
   2161  1.149  uebayasi 	 */
   2162    1.1       mrg 
   2163  1.149  uebayasi 	if (error) {
   2164  1.149  uebayasi 		if (error == EAGAIN) {
   2165  1.149  uebayasi 			UVMHIST_LOG(maphist,
   2166  1.149  uebayasi 			    "  pgo_get says TRY AGAIN!",0,0,0,0);
   2167  1.149  uebayasi 			kpause("fltagain2", false, hz/2, NULL);
   2168  1.149  uebayasi 			return ERESTART;
   2169  1.149  uebayasi 		}
   2170    1.1       mrg 
   2171  1.139  uebayasi #if 0
   2172  1.149  uebayasi 		KASSERT(error != ERESTART);
   2173  1.139  uebayasi #else
   2174  1.149  uebayasi 		/* XXXUEBS don't re-fault? */
   2175  1.149  uebayasi 		if (error == ERESTART)
   2176  1.149  uebayasi 			error = EIO;
   2177  1.139  uebayasi #endif
   2178  1.139  uebayasi 
   2179  1.201  pgoyette 		UVMHIST_LOG(maphist, "<- pgo_get failed (code %jd)",
   2180  1.149  uebayasi 		    error, 0,0,0);
   2181  1.149  uebayasi 		return error;
   2182  1.149  uebayasi 	}
   2183    1.7       mrg 
   2184  1.149  uebayasi 	/*
   2185  1.149  uebayasi 	 * re-verify the state of the world by first trying to relock
   2186  1.149  uebayasi 	 * the maps.  always relock the object.
   2187  1.149  uebayasi 	 */
   2188    1.7       mrg 
   2189  1.149  uebayasi 	locked = uvmfault_relock(ufi);
   2190  1.149  uebayasi 	if (locked && amap)
   2191  1.222        ad 		amap_lock(amap, flt->upper_lock_type);
   2192  1.156  uebayasi 
   2193  1.156  uebayasi 	/* might be changed */
   2194  1.158  uebayasi 	uobj = pg->uobject;
   2195  1.156  uebayasi 
   2196  1.222        ad 	rw_enter(uobj->vmobjlock, flt->lower_lock_type);
   2197  1.186     rmind 	KASSERT((pg->flags & PG_BUSY) != 0);
   2198  1.222        ad 	KASSERT(flt->lower_lock_type == RW_WRITER);
   2199  1.186     rmind 
   2200  1.214        ad 	uvm_pagelock(pg);
   2201  1.186     rmind 	uvm_pageactivate(pg);
   2202  1.214        ad 	uvm_pageunlock(pg);
   2203   1.63       chs 
   2204  1.158  uebayasi 	/* locked(locked): maps(read), amap(if !null), uobj, pg */
   2205  1.158  uebayasi 	/* locked(!locked): uobj, pg */
   2206    1.7       mrg 
   2207  1.149  uebayasi 	/*
   2208  1.149  uebayasi 	 * verify that the page has not be released and re-verify
   2209  1.149  uebayasi 	 * that amap slot is still free.   if there is a problem,
   2210  1.149  uebayasi 	 * we unlock and clean up.
   2211  1.149  uebayasi 	 */
   2212    1.7       mrg 
   2213  1.158  uebayasi 	if ((pg->flags & PG_RELEASED) != 0 ||
   2214  1.158  uebayasi 	    (locked && amap && amap_lookup(&ufi->entry->aref,
   2215  1.149  uebayasi 	      ufi->orig_rvaddr - ufi->entry->start))) {
   2216  1.149  uebayasi 		if (locked)
   2217  1.186     rmind 			uvmfault_unlockall(ufi, amap, NULL);
   2218  1.149  uebayasi 		locked = false;
   2219  1.149  uebayasi 	}
   2220    1.7       mrg 
   2221  1.149  uebayasi 	/*
   2222  1.227        ad 	 * unbusy/release the page.
   2223  1.227        ad 	 */
   2224  1.227        ad 
   2225  1.227        ad 	if ((pg->flags & PG_RELEASED) == 0) {
   2226  1.227        ad 		pg->flags &= ~PG_BUSY;
   2227  1.227        ad 		uvm_pagelock(pg);
   2228  1.227        ad 		uvm_pagewakeup(pg);
   2229  1.227        ad 		uvm_pageunlock(pg);
   2230  1.227        ad 		UVM_PAGE_OWN(pg, NULL);
   2231  1.227        ad 	} else {
   2232  1.227        ad 		cpu_count(CPU_COUNT_FLTPGRELE, 1);
   2233  1.227        ad 		uvm_pagefree(pg);
   2234  1.227        ad 	}
   2235  1.227        ad 
   2236  1.227        ad 	/*
   2237  1.227        ad 	 * didn't get the lock?   retry.
   2238  1.149  uebayasi 	 */
   2239    1.7       mrg 
   2240  1.149  uebayasi 	if (locked == false) {
   2241  1.149  uebayasi 		UVMHIST_LOG(maphist,
   2242  1.149  uebayasi 		    "  wasn't able to relock after fault: retry",
   2243  1.149  uebayasi 		    0,0,0,0);
   2244  1.216        ad 		rw_exit(uobj->vmobjlock);
   2245  1.149  uebayasi 		return ERESTART;
   2246  1.149  uebayasi 	}
   2247    1.7       mrg 
   2248  1.149  uebayasi 	/*
   2249  1.227        ad 	 * we have the data in pg.  we are holding object lock (so the page
   2250  1.149  uebayasi 	 * can't be released on us).
   2251  1.149  uebayasi 	 */
   2252    1.7       mrg 
   2253  1.227        ad 	/* locked: maps(read), amap(if !null), uobj */
   2254  1.148  uebayasi 
   2255  1.156  uebayasi 	*ruobj = uobj;
   2256  1.158  uebayasi 	*ruobjpage = pg;
   2257  1.148  uebayasi 	return 0;
   2258  1.148  uebayasi }
   2259  1.148  uebayasi 
   2260  1.173  uebayasi /*
   2261  1.173  uebayasi  * uvm_fault_lower_direct: fault lower center page
   2262  1.173  uebayasi  *
   2263  1.177      yamt  *	1. adjust flt->enter_prot.
   2264  1.173  uebayasi  *	2. if page is loaned, resolve.
   2265  1.173  uebayasi  */
   2266  1.173  uebayasi 
   2267  1.148  uebayasi int
   2268  1.163  uebayasi uvm_fault_lower_direct(
   2269  1.148  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   2270  1.156  uebayasi 	struct uvm_object *uobj, struct vm_page *uobjpage)
   2271  1.148  uebayasi {
   2272  1.149  uebayasi 	struct vm_page *pg;
   2273  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2274  1.149  uebayasi 
   2275  1.149  uebayasi 	/*
   2276  1.149  uebayasi 	 * we are not promoting.   if the mapping is COW ensure that we
   2277  1.149  uebayasi 	 * don't give more access than we should (e.g. when doing a read
   2278  1.149  uebayasi 	 * fault on a COPYONWRITE mapping we want to map the COW page in
   2279  1.149  uebayasi 	 * R/O even though the entry protection could be R/W).
   2280  1.149  uebayasi 	 *
   2281  1.149  uebayasi 	 * set "pg" to the page we want to map in (uobjpage, usually)
   2282  1.149  uebayasi 	 */
   2283    1.1       mrg 
   2284  1.213        ad 	cpu_count(CPU_COUNT_FLT_OBJ, 1);
   2285  1.149  uebayasi 	if (UVM_ET_ISCOPYONWRITE(ufi->entry) ||
   2286  1.149  uebayasi 	    UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
   2287  1.149  uebayasi 		flt->enter_prot &= ~VM_PROT_WRITE;
   2288  1.149  uebayasi 	pg = uobjpage;		/* map in the actual object */
   2289    1.7       mrg 
   2290  1.149  uebayasi 	KASSERT(uobjpage != PGO_DONTCARE);
   2291    1.7       mrg 
   2292  1.149  uebayasi 	/*
   2293  1.149  uebayasi 	 * we are faulting directly on the page.   be careful
   2294  1.149  uebayasi 	 * about writing to loaned pages...
   2295  1.149  uebayasi 	 */
   2296  1.149  uebayasi 
   2297  1.149  uebayasi 	if (uobjpage->loan_count) {
   2298  1.163  uebayasi 		uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
   2299  1.151  uebayasi 	}
   2300  1.151  uebayasi 	KASSERT(pg == uobjpage);
   2301  1.227        ad 	KASSERT((pg->flags & PG_BUSY) == 0);
   2302  1.183      yamt 	return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg);
   2303  1.151  uebayasi }
   2304  1.151  uebayasi 
   2305  1.173  uebayasi /*
   2306  1.173  uebayasi  * uvm_fault_lower_direct_loan: resolve loaned page.
   2307  1.173  uebayasi  *
   2308  1.177      yamt  *	1. if not cow'ing, adjust flt->enter_prot.
   2309  1.173  uebayasi  *	2. if cow'ing, break loan.
   2310  1.173  uebayasi  */
   2311  1.173  uebayasi 
   2312  1.151  uebayasi static int
   2313  1.163  uebayasi uvm_fault_lower_direct_loan(
   2314  1.151  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   2315  1.177      yamt 	struct uvm_object *uobj, struct vm_page **rpg,
   2316  1.177      yamt 	struct vm_page **ruobjpage)
   2317  1.151  uebayasi {
   2318  1.152  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   2319  1.152  uebayasi 	struct vm_page *pg;
   2320  1.152  uebayasi 	struct vm_page *uobjpage = *ruobjpage;
   2321  1.222        ad 	int error;
   2322  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2323  1.152  uebayasi 
   2324  1.152  uebayasi 	if (!flt->cow_now) {
   2325  1.152  uebayasi 		/* read fault: cap the protection at readonly */
   2326  1.152  uebayasi 		/* cap! */
   2327  1.152  uebayasi 		flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
   2328  1.152  uebayasi 	} else {
   2329  1.222        ad 		/*
   2330  1.222        ad 		 * write fault: must break the loan here.  to do this
   2331  1.222        ad 		 * we need a write lock on the object.
   2332  1.222        ad 		 */
   2333  1.222        ad 
   2334  1.222        ad 		error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, uobjpage);
   2335  1.222        ad 		if (error != 0) {
   2336  1.222        ad 			return error;
   2337  1.222        ad 		}
   2338  1.222        ad 		KASSERT(rw_write_held(uobj->vmobjlock));
   2339  1.152  uebayasi 
   2340  1.152  uebayasi 		pg = uvm_loanbreak(uobjpage);
   2341  1.152  uebayasi 		if (pg == NULL) {
   2342  1.152  uebayasi 
   2343  1.186     rmind 			uvmfault_unlockall(ufi, amap, uobj);
   2344  1.152  uebayasi 			UVMHIST_LOG(maphist,
   2345  1.152  uebayasi 			  "  out of RAM breaking loan, waiting",
   2346  1.152  uebayasi 			  0,0,0,0);
   2347  1.213        ad 			cpu_count(CPU_COUNT_FLTNORAM, 1);
   2348  1.152  uebayasi 			uvm_wait("flt_noram4");
   2349  1.152  uebayasi 			return ERESTART;
   2350   1.69       chs 		}
   2351  1.152  uebayasi 		*rpg = pg;
   2352  1.152  uebayasi 		*ruobjpage = pg;
   2353  1.227        ad 
   2354  1.227        ad 		/*
   2355  1.227        ad 		 * drop ownership of page while still holding object lock,
   2356  1.227        ad 		 * which won't be dropped until the page is entered.
   2357  1.227        ad 		 */
   2358  1.227        ad 
   2359  1.227        ad 		uvm_pagelock(pg);
   2360  1.227        ad 		uvm_pagewakeup(pg);
   2361  1.227        ad 		uvm_pageunlock(pg);
   2362  1.227        ad 		pg->flags &= ~PG_BUSY;
   2363  1.227        ad 		UVM_PAGE_OWN(pg, NULL);
   2364  1.152  uebayasi 	}
   2365  1.152  uebayasi 	return 0;
   2366  1.148  uebayasi }
   2367  1.148  uebayasi 
   2368  1.173  uebayasi /*
   2369  1.173  uebayasi  * uvm_fault_lower_promote: promote lower page.
   2370  1.173  uebayasi  *
   2371  1.173  uebayasi  *	1. call uvmfault_promote.
   2372  1.173  uebayasi  *	2. fill in data.
   2373  1.173  uebayasi  *	3. if not ZFOD, dispose old page.
   2374  1.173  uebayasi  */
   2375  1.173  uebayasi 
   2376  1.148  uebayasi int
   2377  1.163  uebayasi uvm_fault_lower_promote(
   2378  1.148  uebayasi 	struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
   2379  1.156  uebayasi 	struct uvm_object *uobj, struct vm_page *uobjpage)
   2380  1.148  uebayasi {
   2381  1.149  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   2382  1.149  uebayasi 	struct vm_anon *anon;
   2383  1.149  uebayasi 	struct vm_page *pg;
   2384  1.149  uebayasi 	int error;
   2385  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2386   1.63       chs 
   2387  1.186     rmind 	KASSERT(amap != NULL);
   2388  1.186     rmind 
   2389  1.222        ad 	/* promoting requires a write lock. */
   2390  1.222        ad 	error = uvm_fault_upper_upgrade(ufi, flt, amap, uobj);
   2391  1.222        ad 	if (error != 0) {
   2392  1.222        ad 		return error;
   2393  1.222        ad 	}
   2394  1.222        ad 	KASSERT(rw_write_held(amap->am_lock));
   2395  1.227        ad 	KASSERT(uobj == NULL ||
   2396  1.227        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   2397  1.222        ad 
   2398  1.149  uebayasi 	/*
   2399  1.186     rmind 	 * If we are going to promote the data to an anon we
   2400  1.149  uebayasi 	 * allocate a blank anon here and plug it into our amap.
   2401  1.149  uebayasi 	 */
   2402  1.222        ad 	error = uvmfault_promote(ufi, NULL, uobjpage, &anon, &flt->anon_spare);
   2403  1.149  uebayasi 	switch (error) {
   2404  1.149  uebayasi 	case 0:
   2405  1.149  uebayasi 		break;
   2406  1.149  uebayasi 	case ERESTART:
   2407  1.149  uebayasi 		return ERESTART;
   2408  1.149  uebayasi 	default:
   2409  1.149  uebayasi 		return error;
   2410  1.149  uebayasi 	}
   2411  1.149  uebayasi 
   2412  1.149  uebayasi 	pg = anon->an_page;
   2413  1.149  uebayasi 
   2414  1.149  uebayasi 	/*
   2415  1.186     rmind 	 * Fill in the data.
   2416  1.149  uebayasi 	 */
   2417  1.105      yamt 
   2418  1.149  uebayasi 	if (uobjpage != PGO_DONTCARE) {
   2419  1.213        ad 		cpu_count(CPU_COUNT_FLT_PRCOPY, 1);
   2420    1.1       mrg 
   2421    1.7       mrg 		/*
   2422  1.149  uebayasi 		 * promote to shared amap?  make sure all sharing
   2423  1.149  uebayasi 		 * procs see it
   2424    1.7       mrg 		 */
   2425    1.7       mrg 
   2426  1.149  uebayasi 		if ((amap_flags(amap) & AMAP_SHARED) != 0) {
   2427  1.149  uebayasi 			pmap_page_protect(uobjpage, VM_PROT_NONE);
   2428    1.7       mrg 			/*
   2429  1.149  uebayasi 			 * XXX: PAGE MIGHT BE WIRED!
   2430    1.7       mrg 			 */
   2431  1.149  uebayasi 		}
   2432   1.69       chs 
   2433  1.149  uebayasi 		UVMHIST_LOG(maphist,
   2434  1.217       rin 		    "  promote uobjpage %#jx to anon/page %#jx/%#jx",
   2435  1.201  pgoyette 		    (uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
   2436   1.63       chs 
   2437  1.149  uebayasi 	} else {
   2438  1.213        ad 		cpu_count(CPU_COUNT_FLT_PRZERO, 1);
   2439    1.7       mrg 
   2440  1.149  uebayasi 		/*
   2441  1.149  uebayasi 		 * Page is zero'd and marked dirty by
   2442  1.149  uebayasi 		 * uvmfault_promote().
   2443  1.149  uebayasi 		 */
   2444   1.52       chs 
   2445  1.217       rin 		UVMHIST_LOG(maphist,"  zero fill anon/page %#jx/%#jx",
   2446  1.201  pgoyette 		    (uintptr_t)anon, (uintptr_t)pg, 0, 0);
   2447  1.149  uebayasi 	}
   2448  1.148  uebayasi 
   2449  1.183      yamt 	return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
   2450  1.148  uebayasi }
   2451  1.148  uebayasi 
   2452  1.173  uebayasi /*
   2453  1.183      yamt  * uvm_fault_lower_enter: enter h/w mapping of lower page or anon page promoted
   2454  1.183      yamt  * from the lower page.
   2455  1.173  uebayasi  */
   2456  1.173  uebayasi 
   2457  1.148  uebayasi int
   2458  1.163  uebayasi uvm_fault_lower_enter(
   2459  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   2460  1.148  uebayasi 	struct uvm_object *uobj,
   2461  1.183      yamt 	struct vm_anon *anon, struct vm_page *pg)
   2462  1.148  uebayasi {
   2463  1.148  uebayasi 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
   2464  1.215        ad 	const bool readonly = uvm_pagereadonly_p(pg);
   2465  1.148  uebayasi 	int error;
   2466  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2467    1.7       mrg 
   2468    1.7       mrg 	/*
   2469  1.186     rmind 	 * Locked:
   2470  1.186     rmind 	 *
   2471  1.186     rmind 	 *	maps(read), amap(if !null), uobj(if !null),
   2472  1.186     rmind 	 *	anon(if !null), pg(if anon), unlock_uobj(if !null)
   2473    1.7       mrg 	 *
   2474  1.222        ad 	 * anon must be write locked (promotion).  uobj can be either.
   2475  1.222        ad 	 *
   2476  1.186     rmind 	 * Note: pg is either the uobjpage or the new page in the new anon.
   2477    1.7       mrg 	 */
   2478  1.227        ad 
   2479  1.222        ad 	KASSERT(amap == NULL ||
   2480  1.222        ad 	    rw_lock_op(amap->am_lock) == flt->upper_lock_type);
   2481  1.227        ad 	KASSERT(uobj == NULL ||
   2482  1.227        ad 	    rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
   2483  1.186     rmind 	KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
   2484  1.227        ad 
   2485  1.227        ad 	/*
   2486  1.227        ad 	 * note that pg can't be PG_RELEASED or PG_BUSY since we did
   2487  1.227        ad 	 * not drop the object lock since the last time we checked.
   2488  1.227        ad 	 */
   2489  1.227        ad 
   2490  1.227        ad 	KASSERT((pg->flags & PG_RELEASED) == 0);
   2491  1.227        ad 	KASSERT((pg->flags & PG_BUSY) == 0);
   2492    1.7       mrg 
   2493    1.7       mrg 	/*
   2494    1.7       mrg 	 * all resources are present.   we can now map it in and free our
   2495    1.7       mrg 	 * resources.
   2496    1.7       mrg 	 */
   2497    1.7       mrg 
   2498    1.7       mrg 	UVMHIST_LOG(maphist,
   2499  1.201  pgoyette 	    "  MAPPING: case2: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
   2500  1.201  pgoyette 	    (uintptr_t)ufi->orig_map->pmap, ufi->orig_rvaddr,
   2501  1.201  pgoyette 	    (uintptr_t)pg, flt->promote);
   2502  1.215        ad 	KASSERTMSG((flt->access_type & VM_PROT_WRITE) == 0 || !readonly,
   2503  1.215        ad 	    "promote=%u cow_now=%u access_type=%x enter_prot=%x cow=%u "
   2504  1.215        ad 	    "entry=%p map=%p orig_rvaddr=%p pg=%p",
   2505  1.215        ad 	    flt->promote, flt->cow_now, flt->access_type, flt->enter_prot,
   2506  1.215        ad 	    UVM_ET_ISCOPYONWRITE(ufi->entry), ufi->entry, ufi->orig_map,
   2507  1.215        ad 	    (void *)ufi->orig_rvaddr, pg);
   2508  1.215        ad 	KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || !readonly);
   2509  1.177      yamt 	if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
   2510  1.177      yamt 	    VM_PAGE_TO_PHYS(pg),
   2511  1.215        ad 	    readonly ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
   2512  1.177      yamt 	    flt->access_type | PMAP_CANFAIL |
   2513  1.177      yamt 	    (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
   2514   1.52       chs 
   2515   1.46   thorpej 		/*
   2516   1.46   thorpej 		 * No need to undo what we did; we can simply think of
   2517   1.46   thorpej 		 * this as the pmap throwing away the mapping information.
   2518   1.46   thorpej 		 *
   2519   1.46   thorpej 		 * We do, however, have to go through the ReFault path,
   2520   1.46   thorpej 		 * as the map may change while we're asleep.
   2521   1.46   thorpej 		 */
   2522   1.52       chs 
   2523  1.183      yamt 		/*
   2524  1.183      yamt 		 * ensure that the page is queued in the case that
   2525  1.183      yamt 		 * we just promoted the page.
   2526  1.183      yamt 		 */
   2527  1.183      yamt 
   2528  1.227        ad 		if (anon != NULL) {
   2529  1.222        ad 			uvm_pagelock(pg);
   2530  1.222        ad 			uvm_pageenqueue(pg);
   2531  1.222        ad 			uvm_pagewakeup(pg);
   2532  1.222        ad 			uvm_pageunlock(pg);
   2533  1.222        ad 		}
   2534  1.171  uebayasi 
   2535  1.186     rmind 		uvmfault_unlockall(ufi, amap, uobj);
   2536   1.92      yamt 		if (!uvm_reclaimable()) {
   2537   1.46   thorpej 			UVMHIST_LOG(maphist,
   2538   1.46   thorpej 			    "<- failed.  out of VM",0,0,0,0);
   2539   1.46   thorpej 			/* XXX instrumentation */
   2540  1.106      yamt 			error = ENOMEM;
   2541  1.138  uebayasi 			return error;
   2542   1.46   thorpej 		}
   2543   1.46   thorpej 		/* XXX instrumentation */
   2544   1.46   thorpej 		uvm_wait("flt_pmfail2");
   2545  1.139  uebayasi 		return ERESTART;
   2546   1.46   thorpej 	}
   2547    1.1       mrg 
   2548  1.177      yamt 	uvm_fault_lower_done(ufi, flt, uobj, pg);
   2549  1.175     rmind 	pmap_update(ufi->orig_map->pmap);
   2550  1.186     rmind 	uvmfault_unlockall(ufi, amap, uobj);
   2551  1.175     rmind 
   2552  1.169  uebayasi 	UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
   2553  1.169  uebayasi 	return 0;
   2554  1.148  uebayasi }
   2555  1.148  uebayasi 
   2556  1.173  uebayasi /*
   2557  1.173  uebayasi  * uvm_fault_lower_done: queue lower center page.
   2558  1.173  uebayasi  */
   2559  1.173  uebayasi 
   2560  1.169  uebayasi void
   2561  1.163  uebayasi uvm_fault_lower_done(
   2562  1.177      yamt 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
   2563  1.177      yamt 	struct uvm_object *uobj, struct vm_page *pg)
   2564  1.148  uebayasi {
   2565  1.174     rmind 
   2566  1.228     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
   2567  1.148  uebayasi 
   2568  1.146  uebayasi 	if (flt->wire_paging) {
   2569  1.227        ad 		uvm_pagelock(pg);
   2570    1.8     chuck 		uvm_pagewire(pg);
   2571  1.227        ad 		uvm_pageunlock(pg);
   2572  1.212        ad 		if (pg->flags & PG_AOBJ) {
   2573   1.29       chs 
   2574   1.29       chs 			/*
   2575   1.29       chs 			 * since the now-wired page cannot be paged out,
   2576   1.29       chs 			 * release its swap resources for others to use.
   2577  1.215        ad 			 * since an aobj page with no swap cannot be clean,
   2578  1.215        ad 			 * mark it dirty now.
   2579  1.227        ad 			 *
   2580  1.227        ad 			 * use pg->uobject here.  if the page is from a
   2581  1.227        ad 			 * tmpfs vnode, the pages are backed by its UAO and
   2582  1.227        ad 			 * not the vnode.
   2583   1.29       chs 			 */
   2584   1.29       chs 
   2585  1.113  christos 			KASSERT(uobj != NULL);
   2586  1.227        ad 			KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
   2587  1.215        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   2588  1.227        ad 			uao_dropswap(pg->uobject, pg->offset >> PAGE_SHIFT);
   2589   1.22       chs 		}
   2590  1.227        ad 	} else if (uvmpdpol_pageactivate_p(pg)) {
   2591  1.227        ad 		/*
   2592  1.227        ad 		 * avoid re-activating the page unless needed,
   2593  1.227        ad 		 * to avoid false sharing on multiprocessor.
   2594  1.227        ad 		 */
   2595  1.227        ad 
   2596  1.227        ad 		uvm_pagelock(pg);
   2597    1.7       mrg 		uvm_pageactivate(pg);
   2598  1.227        ad 		uvm_pageunlock(pg);
   2599  1.174     rmind 	}
   2600    1.1       mrg }
   2601    1.1       mrg 
   2602  1.110  drochner 
   2603    1.1       mrg /*
   2604    1.1       mrg  * uvm_fault_wire: wire down a range of virtual addresses in a map.
   2605    1.1       mrg  *
   2606   1.36   thorpej  * => map may be read-locked by caller, but MUST NOT be write-locked.
   2607   1.36   thorpej  * => if map is read-locked, any operations which may cause map to
   2608   1.36   thorpej  *	be write-locked in uvm_fault() must be taken care of by
   2609   1.36   thorpej  *	the caller.  See uvm_map_pageable().
   2610    1.1       mrg  */
   2611    1.1       mrg 
   2612    1.7       mrg int
   2613   1.95   thorpej uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
   2614  1.130  uebayasi     vm_prot_t access_type, int maxprot)
   2615    1.7       mrg {
   2616   1.12       eeh 	vaddr_t va;
   2617   1.58       chs 	int error;
   2618    1.7       mrg 
   2619    1.7       mrg 	/*
   2620   1.47       chs 	 * now fault it in a page at a time.   if the fault fails then we have
   2621   1.63       chs 	 * to undo what we have done.   note that in uvm_fault VM_PROT_NONE
   2622   1.47       chs 	 * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
   2623    1.7       mrg 	 */
   2624    1.1       mrg 
   2625   1.65       chs 	/*
   2626   1.65       chs 	 * XXX work around overflowing a vaddr_t.  this prevents us from
   2627   1.65       chs 	 * wiring the last page in the address space, though.
   2628   1.65       chs 	 */
   2629   1.65       chs 	if (start > end) {
   2630   1.65       chs 		return EFAULT;
   2631   1.65       chs 	}
   2632   1.65       chs 
   2633  1.163  uebayasi 	for (va = start; va < end; va += PAGE_SIZE) {
   2634  1.110  drochner 		error = uvm_fault_internal(map, va, access_type,
   2635  1.177      yamt 		    (maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE);
   2636   1.58       chs 		if (error) {
   2637    1.7       mrg 			if (va != start) {
   2638   1.31   thorpej 				uvm_fault_unwire(map, start, va);
   2639    1.7       mrg 			}
   2640   1.58       chs 			return error;
   2641    1.7       mrg 		}
   2642    1.7       mrg 	}
   2643   1.58       chs 	return 0;
   2644    1.1       mrg }
   2645    1.1       mrg 
   2646    1.1       mrg /*
   2647    1.1       mrg  * uvm_fault_unwire(): unwire range of virtual space.
   2648    1.1       mrg  */
   2649    1.1       mrg 
   2650    1.7       mrg void
   2651   1.95   thorpej uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
   2652   1.36   thorpej {
   2653   1.36   thorpej 	vm_map_lock_read(map);
   2654   1.36   thorpej 	uvm_fault_unwire_locked(map, start, end);
   2655   1.36   thorpej 	vm_map_unlock_read(map);
   2656   1.36   thorpej }
   2657   1.36   thorpej 
   2658   1.36   thorpej /*
   2659   1.36   thorpej  * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
   2660   1.36   thorpej  *
   2661   1.36   thorpej  * => map must be at least read-locked.
   2662   1.36   thorpej  */
   2663   1.36   thorpej 
   2664   1.36   thorpej void
   2665   1.95   thorpej uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
   2666    1.7       mrg {
   2667  1.186     rmind 	struct vm_map_entry *entry, *oentry;
   2668   1.31   thorpej 	pmap_t pmap = vm_map_pmap(map);
   2669   1.42   thorpej 	vaddr_t va;
   2670   1.12       eeh 	paddr_t pa;
   2671   1.42   thorpej 	struct vm_page *pg;
   2672   1.31   thorpej 
   2673    1.7       mrg 	/*
   2674    1.7       mrg 	 * we assume that the area we are unwiring has actually been wired
   2675    1.7       mrg 	 * in the first place.   this means that we should be able to extract
   2676    1.7       mrg 	 * the PAs from the pmap.   we also lock out the page daemon so that
   2677    1.7       mrg 	 * we can call uvm_pageunwire.
   2678    1.7       mrg 	 */
   2679   1.37   thorpej 
   2680   1.37   thorpej 	/*
   2681   1.37   thorpej 	 * find the beginning map entry for the region.
   2682   1.37   thorpej 	 */
   2683   1.74       chs 
   2684  1.232  riastrad 	KASSERT(start >= vm_map_min(map));
   2685  1.232  riastrad 	KASSERT(end <= vm_map_max(map));
   2686  1.119   thorpej 	if (uvm_map_lookup_entry(map, start, &entry) == false)
   2687   1.37   thorpej 		panic("uvm_fault_unwire_locked: address not in map");
   2688   1.37   thorpej 
   2689  1.186     rmind 	oentry = NULL;
   2690   1.69       chs 	for (va = start; va < end; va += PAGE_SIZE) {
   2691   1.42   thorpej 
   2692   1.42   thorpej 		/*
   2693   1.74       chs 		 * find the map entry for the current address.
   2694   1.42   thorpej 		 */
   2695   1.56       chs 
   2696   1.56       chs 		KASSERT(va >= entry->start);
   2697   1.74       chs 		while (va >= entry->end) {
   2698  1.232  riastrad 			KASSERT(entry->next != &map->header);
   2699  1.232  riastrad 			KASSERT(entry->next->start <= entry->end);
   2700   1.42   thorpej 			entry = entry->next;
   2701   1.42   thorpej 		}
   2702   1.37   thorpej 
   2703   1.42   thorpej 		/*
   2704  1.186     rmind 		 * lock it.
   2705  1.186     rmind 		 */
   2706  1.186     rmind 
   2707  1.186     rmind 		if (entry != oentry) {
   2708  1.186     rmind 			if (oentry != NULL) {
   2709  1.186     rmind 				uvm_map_unlock_entry(oentry);
   2710  1.186     rmind 			}
   2711  1.216        ad 			uvm_map_lock_entry(entry, RW_WRITER);
   2712  1.186     rmind 			oentry = entry;
   2713  1.186     rmind 		}
   2714  1.186     rmind 
   2715  1.186     rmind 		/*
   2716   1.42   thorpej 		 * if the entry is no longer wired, tell the pmap.
   2717   1.42   thorpej 		 */
   2718   1.74       chs 
   2719  1.207       chs 		if (!pmap_extract(pmap, va, &pa))
   2720  1.207       chs 			continue;
   2721  1.207       chs 
   2722   1.42   thorpej 		if (VM_MAPENT_ISWIRED(entry) == 0)
   2723   1.42   thorpej 			pmap_unwire(pmap, va);
   2724   1.42   thorpej 
   2725   1.42   thorpej 		pg = PHYS_TO_VM_PAGE(pa);
   2726  1.214        ad 		if (pg) {
   2727  1.214        ad 			uvm_pagelock(pg);
   2728   1.42   thorpej 			uvm_pageunwire(pg);
   2729  1.214        ad 			uvm_pageunlock(pg);
   2730  1.214        ad 		}
   2731    1.7       mrg 	}
   2732    1.1       mrg 
   2733  1.186     rmind 	if (oentry != NULL) {
   2734  1.186     rmind 		uvm_map_unlock_entry(entry);
   2735  1.186     rmind 	}
   2736    1.1       mrg }
   2737