Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.4
      1  1.4  thorpej /*	$NetBSD: uvm_pager.c,v 1.4 1998/02/08 06:15:59 thorpej Exp $	*/
      2  1.1      mrg 
      3  1.1      mrg /*
      4  1.1      mrg  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  1.1      mrg  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  1.1      mrg  */
      7  1.1      mrg /*
      8  1.1      mrg  *
      9  1.1      mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     10  1.1      mrg  * All rights reserved.
     11  1.1      mrg  *
     12  1.1      mrg  * Redistribution and use in source and binary forms, with or without
     13  1.1      mrg  * modification, are permitted provided that the following conditions
     14  1.1      mrg  * are met:
     15  1.1      mrg  * 1. Redistributions of source code must retain the above copyright
     16  1.1      mrg  *    notice, this list of conditions and the following disclaimer.
     17  1.1      mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.1      mrg  *    notice, this list of conditions and the following disclaimer in the
     19  1.1      mrg  *    documentation and/or other materials provided with the distribution.
     20  1.1      mrg  * 3. All advertising materials mentioning features or use of this software
     21  1.1      mrg  *    must display the following acknowledgement:
     22  1.1      mrg  *      This product includes software developed by Charles D. Cranor and
     23  1.1      mrg  *      Washington University.
     24  1.1      mrg  * 4. The name of the author may not be used to endorse or promote products
     25  1.1      mrg  *    derived from this software without specific prior written permission.
     26  1.1      mrg  *
     27  1.1      mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     28  1.1      mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     29  1.1      mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     30  1.1      mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     31  1.1      mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     32  1.1      mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     33  1.1      mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     34  1.1      mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     35  1.1      mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     36  1.1      mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     37  1.3      mrg  *
     38  1.3      mrg  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     39  1.1      mrg  */
     40  1.1      mrg 
     41  1.1      mrg /*
     42  1.1      mrg  * uvm_pager.c: generic functions used to assist the pagers.
     43  1.1      mrg  */
     44  1.1      mrg 
     45  1.1      mrg #include <sys/param.h>
     46  1.1      mrg #include <sys/systm.h>
     47  1.1      mrg #include <sys/mount.h>
     48  1.1      mrg #include <sys/proc.h>
     49  1.1      mrg #include <sys/malloc.h>
     50  1.1      mrg 
     51  1.1      mrg #include <vm/vm.h>
     52  1.1      mrg #include <vm/vm_page.h>
     53  1.1      mrg #include <vm/vm_kern.h>
     54  1.1      mrg 
     55  1.1      mrg #include <sys/syscallargs.h>
     56  1.1      mrg 
     57  1.1      mrg #define UVM_PAGER
     58  1.1      mrg #include <uvm/uvm.h>
     59  1.1      mrg 
     60  1.1      mrg UVMHIST_DECL(maphist);
     61  1.1      mrg 
     62  1.1      mrg /*
     63  1.1      mrg  * list of uvm pagers in the system
     64  1.1      mrg  */
     65  1.1      mrg 
     66  1.1      mrg extern struct uvm_pagerops uvm_deviceops;
     67  1.1      mrg extern struct uvm_pagerops uvm_vnodeops;
     68  1.1      mrg 
     69  1.1      mrg struct uvm_pagerops *uvmpagerops[] = {
     70  1.1      mrg   &uvm_deviceops,
     71  1.1      mrg   &uvm_vnodeops,
     72  1.1      mrg };
     73  1.1      mrg 
     74  1.1      mrg /*
     75  1.1      mrg  * the pager map: provides KVA for I/O
     76  1.1      mrg  */
     77  1.1      mrg 
     78  1.1      mrg #define PAGER_MAP_SIZE       (4 * 1024 * 1024)
     79  1.1      mrg vm_map_t pager_map;		/* XXX */
     80  1.1      mrg simple_lock_data_t pager_map_wanted_lock;
     81  1.1      mrg boolean_t pager_map_wanted;	/* locked by pager map */
     82  1.1      mrg 
     83  1.1      mrg 
     84  1.1      mrg /*
     85  1.1      mrg  * uvm_pager_init: init pagers (at boot time)
     86  1.1      mrg  */
     87  1.1      mrg 
     88  1.1      mrg void uvm_pager_init()
     89  1.1      mrg 
     90  1.1      mrg {
     91  1.1      mrg   int lcv;
     92  1.1      mrg 
     93  1.1      mrg   /*
     94  1.1      mrg    * init pager map
     95  1.1      mrg    */
     96  1.1      mrg 
     97  1.1      mrg    pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
     98  1.4  thorpej    			PAGER_MAP_SIZE, FALSE, FALSE, NULL);
     99  1.1      mrg    simple_lock_init(&pager_map_wanted_lock);
    100  1.1      mrg    pager_map_wanted = FALSE;
    101  1.1      mrg 
    102  1.1      mrg   /*
    103  1.1      mrg    * init ASYNC I/O queue
    104  1.1      mrg    */
    105  1.1      mrg 
    106  1.1      mrg   TAILQ_INIT(&uvm.aio_done);
    107  1.1      mrg 
    108  1.1      mrg   /*
    109  1.1      mrg    * call pager init functions
    110  1.1      mrg    */
    111  1.1      mrg   for (lcv = 0 ;
    112  1.1      mrg        lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *) ; lcv++) {
    113  1.1      mrg     if (uvmpagerops[lcv]->pgo_init)
    114  1.1      mrg       uvmpagerops[lcv]->pgo_init();
    115  1.1      mrg   }
    116  1.1      mrg }
    117  1.1      mrg 
    118  1.1      mrg /*
    119  1.1      mrg  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    120  1.1      mrg  *
    121  1.1      mrg  * we basically just map in a blank map entry to reserve the space in the
    122  1.1      mrg  * map and then use pmap_enter() to put the mappings in by hand.
    123  1.1      mrg  */
    124  1.1      mrg 
    125  1.1      mrg vm_offset_t uvm_pagermapin(pps, npages, aiop, waitf)
    126  1.1      mrg 
    127  1.1      mrg struct vm_page **pps;
    128  1.1      mrg int npages;
    129  1.1      mrg struct uvm_aiodesc **aiop;	/* OUT */
    130  1.1      mrg int waitf;
    131  1.1      mrg 
    132  1.1      mrg {
    133  1.1      mrg   vm_size_t size;
    134  1.1      mrg   vm_offset_t kva;
    135  1.1      mrg   struct uvm_aiodesc *aio;
    136  1.1      mrg #if !defined(PMAP_NEW)
    137  1.1      mrg   vm_offset_t cva;
    138  1.1      mrg   struct vm_page *pp;
    139  1.1      mrg #endif
    140  1.1      mrg   UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    141  1.1      mrg 
    142  1.1      mrg   UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
    143  1.1      mrg 	      pps, npages, aiop, waitf);
    144  1.1      mrg 
    145  1.1      mrg ReStart:
    146  1.1      mrg   if (aiop) {
    147  1.1      mrg     MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
    148  1.1      mrg     if (aio == NULL)
    149  1.1      mrg       return(0);
    150  1.1      mrg     *aiop = aio;
    151  1.1      mrg   } else {
    152  1.1      mrg     aio = NULL;
    153  1.1      mrg   }
    154  1.1      mrg 
    155  1.1      mrg   size = npages * PAGE_SIZE;
    156  1.1      mrg   kva = NULL;			/* let system choose VA */
    157  1.1      mrg 
    158  1.1      mrg   if (uvm_map(pager_map, &kva, size, NULL,
    159  1.1      mrg 	      UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
    160  1.1      mrg     if (waitf == M_NOWAIT) {
    161  1.1      mrg       if (aio)
    162  1.1      mrg 	FREE(aio, M_TEMP);
    163  1.1      mrg       UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    164  1.1      mrg       return(NULL);
    165  1.1      mrg     }
    166  1.1      mrg     simple_lock(&pager_map_wanted_lock);
    167  1.1      mrg     pager_map_wanted = TRUE;
    168  1.1      mrg     UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    169  1.1      mrg     UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
    170  1.1      mrg 				"pager_map",0);
    171  1.1      mrg     goto ReStart;
    172  1.1      mrg   }
    173  1.1      mrg 
    174  1.1      mrg #if defined(PMAP_NEW)
    175  1.1      mrg   /*
    176  1.1      mrg    * XXX: (ab)using the pmap module to store state info for us.
    177  1.1      mrg    * (pmap stores the PAs... we fetch them back later and convert back
    178  1.1      mrg    * to pages with PHYS_TO_VM_PAGE).
    179  1.1      mrg    */
    180  1.1      mrg   pmap_kenter_pgs(kva, pps, npages);
    181  1.1      mrg 
    182  1.1      mrg #else /* PMAP_NEW */
    183  1.1      mrg 
    184  1.1      mrg   /* got it */
    185  1.1      mrg   for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    186  1.1      mrg     pp = *pps++;
    187  1.1      mrg #ifdef DEBUG
    188  1.1      mrg     if ((pp->flags & PG_BUSY) == 0)
    189  1.1      mrg       panic("uvm_pagermapin: page not busy");
    190  1.1      mrg #endif
    191  1.1      mrg 
    192  1.1      mrg     pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
    193  1.1      mrg 	       VM_PROT_DEFAULT, TRUE);
    194  1.1      mrg   }
    195  1.1      mrg 
    196  1.1      mrg #endif /* PMAP_NEW */
    197  1.1      mrg 
    198  1.1      mrg   UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    199  1.1      mrg   return(kva);
    200  1.1      mrg }
    201  1.1      mrg 
    202  1.1      mrg /*
    203  1.1      mrg  * uvm_pagermapout: remove pager_map mapping
    204  1.1      mrg  *
    205  1.1      mrg  * we remove our mappings by hand and then remove the mapping (waking
    206  1.1      mrg  * up anyone wanting space).
    207  1.1      mrg  */
    208  1.1      mrg 
    209  1.1      mrg void uvm_pagermapout(kva, npages)
    210  1.1      mrg 
    211  1.1      mrg vm_offset_t kva;
    212  1.1      mrg int npages;
    213  1.1      mrg 
    214  1.1      mrg {
    215  1.1      mrg   vm_size_t size = npages * PAGE_SIZE;
    216  1.1      mrg   vm_map_entry_t entries;
    217  1.1      mrg   UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    218  1.1      mrg 
    219  1.1      mrg   UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    220  1.1      mrg 
    221  1.1      mrg   /*
    222  1.1      mrg    * duplicate uvm_unmap, but add in pager_map_wanted handling.
    223  1.1      mrg    */
    224  1.1      mrg 
    225  1.1      mrg   vm_map_lock(pager_map);
    226  1.1      mrg   (void) uvm_unmap_remove(pager_map, kva, kva + size, 0, &entries);
    227  1.1      mrg   simple_lock(&pager_map_wanted_lock);
    228  1.1      mrg   if (pager_map_wanted) {
    229  1.1      mrg     pager_map_wanted = FALSE;
    230  1.1      mrg     wakeup(pager_map);
    231  1.1      mrg   }
    232  1.1      mrg   simple_unlock(&pager_map_wanted_lock);
    233  1.1      mrg   vm_map_unlock(pager_map);
    234  1.1      mrg   if (entries)
    235  1.1      mrg     uvm_unmap_detach(entries, 0);
    236  1.1      mrg 
    237  1.1      mrg   UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    238  1.1      mrg }
    239  1.1      mrg 
    240  1.1      mrg /*
    241  1.1      mrg  * uvm_mk_pcluster
    242  1.1      mrg  *
    243  1.1      mrg  * generic "make 'pager put' cluster" function.  a pager can either
    244  1.1      mrg  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
    245  1.1      mrg  * generic function, or [3] set it to a pager specific function.
    246  1.1      mrg  *
    247  1.1      mrg  * => caller must lock object _and_ pagequeues (since we need to look
    248  1.1      mrg  *    at active vs. inactive bits, etc.)
    249  1.1      mrg  * => caller must make center page busy and write-protect it
    250  1.1      mrg  * => we mark all cluster pages busy for the caller
    251  1.1      mrg  * => the caller must unbusy all pages (and check wanted/released
    252  1.1      mrg  *    status if it drops the object lock)
    253  1.1      mrg  * => flags:
    254  1.1      mrg  *      PGO_ALLPAGES:  all pages in object are valid targets
    255  1.1      mrg  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
    256  1.1      mrg  *      PGO_DOACTCLUST: include active pages in cluster.
    257  1.1      mrg  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
    258  1.1      mrg  *              PG_CLEANCHK is only a hint, but clearing will help reduce
    259  1.1      mrg  *		the number of calls we make to the pmap layer.
    260  1.1      mrg  */
    261  1.1      mrg 
    262  1.1      mrg struct vm_page **uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
    263  1.1      mrg 
    264  1.1      mrg struct uvm_object *uobj;	/* IN */
    265  1.1      mrg struct vm_page **pps, *center;  /* IN/OUT, IN */
    266  1.1      mrg int *npages, flags;		/* IN/OUT, IN */
    267  1.1      mrg vm_offset_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
    268  1.1      mrg 
    269  1.1      mrg {
    270  1.1      mrg   struct vm_page **ppsp, *pclust;
    271  1.1      mrg   vm_offset_t lo, hi, curoff;
    272  1.1      mrg   int center_idx, forward;
    273  1.1      mrg   UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
    274  1.1      mrg 
    275  1.1      mrg   /*
    276  1.1      mrg    * center page should already be busy and write protected.  XXX:
    277  1.1      mrg    * suppose page is wired?  if we lock, then a process could
    278  1.1      mrg    * fault/block on it.  if we don't lock, a process could write the
    279  1.1      mrg    * pages in the middle of an I/O.  (consider an msync()).  let's
    280  1.1      mrg    * lock it for now (better to delay than corrupt data?).
    281  1.1      mrg    */
    282  1.1      mrg 
    283  1.1      mrg   /*
    284  1.1      mrg    * get cluster boundaries, check sanity, and apply our limits as well.
    285  1.1      mrg    */
    286  1.1      mrg 
    287  1.1      mrg   uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
    288  1.1      mrg   if ((flags & PGO_ALLPAGES) == 0) {
    289  1.1      mrg     if (lo < mlo)
    290  1.1      mrg       lo = mlo;
    291  1.1      mrg     if (hi > mhi)
    292  1.1      mrg       hi = mhi;
    293  1.1      mrg   }
    294  1.1      mrg   if ((hi - lo) / PAGE_SIZE > *npages) {	/* pps too small, bail out! */
    295  1.1      mrg #ifdef DIAGNOSTIC
    296  1.1      mrg     printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
    297  1.1      mrg #endif
    298  1.1      mrg     pps[0] = center;
    299  1.1      mrg     *npages = 1;
    300  1.1      mrg     return(pps);
    301  1.1      mrg   }
    302  1.1      mrg 
    303  1.1      mrg   /*
    304  1.1      mrg    * now determine the center and attempt to cluster around the
    305  1.1      mrg    * edges
    306  1.1      mrg    */
    307  1.1      mrg 
    308  1.1      mrg   center_idx = (center->offset - lo) / PAGE_SIZE;
    309  1.1      mrg   pps[center_idx] = center;	/* plug in the center page */
    310  1.1      mrg   ppsp = &pps[center_idx];
    311  1.1      mrg   *npages = 1;
    312  1.1      mrg 
    313  1.1      mrg   /*
    314  1.1      mrg    * attempt to cluster around the left [backward], and then
    315  1.1      mrg    * the right side [forward].
    316  1.1      mrg    *
    317  1.1      mrg    * note that for inactive pages (pages that have been deactivated)
    318  1.1      mrg    * there are no valid mappings and PG_CLEAN should be up to date.
    319  1.1      mrg    * [i.e. there is no need to query the pmap with pmap_is_modified
    320  1.1      mrg    * since there are no mappings].
    321  1.1      mrg    */
    322  1.1      mrg 
    323  1.1      mrg   for (forward  = 0 ; forward <= 1 ; forward++) {
    324  1.1      mrg 
    325  1.1      mrg     curoff = center->offset + PAGE_SIZE * (forward) ? 1 : -1;
    326  1.1      mrg     for ( ; (forward == 0 && curoff >= lo) || (forward && curoff < hi) ;
    327  1.1      mrg 	  curoff = curoff + PAGE_SIZE * (forward) ? 1 : -1) {
    328  1.1      mrg 
    329  1.1      mrg       pclust = uvm_pagelookup(uobj, curoff);	/* lookup page */
    330  1.1      mrg       if (pclust == NULL)
    331  1.1      mrg 	break;			/* no page */
    332  1.1      mrg       /* handle active pages */
    333  1.1      mrg       /* NOTE: inactive pages don't have pmap mappings */
    334  1.1      mrg       if ((pclust->pqflags & PQ_INACTIVE) == 0) {
    335  1.1      mrg 	if ((flags & PGO_DOACTCLUST) == 0)
    336  1.1      mrg 	  break;		/* dont want mapped pages at all */
    337  1.1      mrg 	/* make sure "clean" bit is sync'd */
    338  1.1      mrg 	if ((pclust->flags & PG_CLEANCHK) == 0) {
    339  1.1      mrg 	  if ((pclust->flags & (PG_CLEAN|PG_BUSY)) == PG_CLEAN &&
    340  1.1      mrg 	      pmap_is_modified(PMAP_PGARG(pclust)))
    341  1.1      mrg 	    pclust->flags &= ~PG_CLEAN;
    342  1.1      mrg 	  pclust->flags |= PG_CLEANCHK;		/* now checked */
    343  1.1      mrg 	}
    344  1.1      mrg       }
    345  1.1      mrg       /* is page available for cleaning and does it need it? */
    346  1.1      mrg       if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
    347  1.1      mrg 	break;		/* page is already clean or is busy */
    348  1.1      mrg       /* yes!   enroll the page in our array */
    349  1.1      mrg       pclust->flags |= PG_BUSY;		/* busy! */
    350  1.1      mrg       UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
    351  1.1      mrg       /* XXX: protect wired page?   see above comment. */
    352  1.1      mrg       pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
    353  1.1      mrg       if (!forward) {
    354  1.1      mrg 	ppsp--;			/* back up one page */
    355  1.1      mrg 	*ppsp = pclust;
    356  1.1      mrg       } else {
    357  1.1      mrg 	ppsp[*npages] = pclust; /* move forward one page */
    358  1.1      mrg       }
    359  1.1      mrg       *npages = *npages + 1;
    360  1.1      mrg     }
    361  1.1      mrg 
    362  1.1      mrg   }
    363  1.1      mrg 
    364  1.1      mrg   /*
    365  1.1      mrg    * done!  return the cluster array to the caller!!!
    366  1.1      mrg    */
    367  1.1      mrg 
    368  1.1      mrg   UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    369  1.1      mrg   return(ppsp);
    370  1.1      mrg }
    371  1.1      mrg 
    372  1.1      mrg 
    373  1.1      mrg /*
    374  1.1      mrg  * uvm_shareprot: generic share protect routine
    375  1.1      mrg  *
    376  1.1      mrg  * => caller must lock map entry's map
    377  1.1      mrg  * => caller must lock object pointed to by map entry
    378  1.1      mrg  */
    379  1.1      mrg 
    380  1.1      mrg void uvm_shareprot(entry, prot)
    381  1.1      mrg 
    382  1.1      mrg vm_map_entry_t entry;
    383  1.1      mrg vm_prot_t prot;
    384  1.1      mrg 
    385  1.1      mrg {
    386  1.1      mrg   struct uvm_object *uobj = entry->object.uvm_obj;
    387  1.1      mrg   struct vm_page *pp;
    388  1.1      mrg   vm_offset_t start, stop;
    389  1.1      mrg   UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
    390  1.1      mrg 
    391  1.1      mrg   if (UVM_ET_ISMAP(entry))
    392  1.1      mrg     panic("uvm_shareprot: non-object attached");
    393  1.1      mrg 
    394  1.1      mrg   start = entry->offset;
    395  1.1      mrg   stop = start + (entry->end - entry->start);
    396  1.1      mrg 
    397  1.1      mrg   /*
    398  1.1      mrg    * traverse list of pages in object.   if page in range, pmap_prot it
    399  1.1      mrg    */
    400  1.1      mrg 
    401  1.1      mrg   for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
    402  1.1      mrg     if (pp->offset >= start && pp->offset < stop)
    403  1.1      mrg       pmap_page_protect(PMAP_PGARG(pp), prot);
    404  1.1      mrg   }
    405  1.1      mrg   UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    406  1.1      mrg }
    407  1.1      mrg 
    408  1.1      mrg /*
    409  1.1      mrg  * uvm_pager_put: high level pageout routine
    410  1.1      mrg  *
    411  1.1      mrg  * we want to pageout page "pg" to backing store, clustering if
    412  1.1      mrg  * possible.
    413  1.1      mrg  *
    414  1.1      mrg  * => page queues must be locked by caller
    415  1.1      mrg  * => if page is not swap-backed, then "uobj" points to the object
    416  1.1      mrg  *	backing it.   this object should be locked by the caller.
    417  1.1      mrg  * => if page is swap-backed, then "uobj" should be NULL.
    418  1.1      mrg  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
    419  1.1      mrg  *    for swap-backed memory, "pg" can be NULL if there is no page
    420  1.1      mrg  *    of interest [sometimes the case for the pagedaemon]
    421  1.1      mrg  * => "ppsp_ptr" should point to an array of npages vm_page pointers
    422  1.1      mrg  *	for possible cluster building
    423  1.1      mrg  * => flags (first two for non-swap-backed pages)
    424  1.1      mrg  *	PGO_ALLPAGES: all pages in uobj are valid targets
    425  1.1      mrg  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
    426  1.1      mrg  *	PGO_SYNCIO: do SYNC I/O (no async)
    427  1.1      mrg  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
    428  1.1      mrg  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
    429  1.1      mrg  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
    430  1.1      mrg  * => return state:
    431  1.1      mrg  *	1. we return the VM_PAGER status code of the pageout
    432  1.1      mrg  *	2. we return with the page queues unlocked
    433  1.1      mrg  *	3. if (uobj != NULL) [!swap_backed] we return with
    434  1.1      mrg  *		uobj locked _only_ if PGO_PDFREECLUST is set
    435  1.1      mrg  *		AND result != VM_PAGER_PEND.   in all other cases
    436  1.1      mrg  *		we return with uobj unlocked.   [this is a hack
    437  1.1      mrg  *		that allows the pagedaemon to save one lock/unlock
    438  1.1      mrg  *		pair in the !swap_backed case since we have to
    439  1.1      mrg  *		lock the uobj to drop the cluster anyway]
    440  1.1      mrg  *	4. on errors we always drop the cluster.   thus, if we return
    441  1.1      mrg  *		!PEND, !OK, then the caller only has to worry about
    442  1.1      mrg  *		un-busying the main page (not the cluster pages).
    443  1.1      mrg  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
    444  1.1      mrg  *		with all pages busy (caller must un-busy and check
    445  1.1      mrg  *		wanted/released flags).
    446  1.1      mrg  */
    447  1.1      mrg 
    448  1.1      mrg int uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
    449  1.1      mrg 
    450  1.1      mrg struct uvm_object *uobj;	/* IN */
    451  1.1      mrg struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
    452  1.1      mrg int *npages;			/* IN/OUT */
    453  1.1      mrg int flags;			/* IN */
    454  1.1      mrg vm_offset_t start, stop;	/* IN, IN */
    455  1.1      mrg 
    456  1.1      mrg {
    457  1.1      mrg   int result;
    458  1.1      mrg   daddr_t swblk;
    459  1.1      mrg   struct vm_page **ppsp = *ppsp_ptr;
    460  1.1      mrg 
    461  1.1      mrg   /*
    462  1.1      mrg    * note that uobj is null  if we are doing a swap-backed pageout.
    463  1.1      mrg    * note that uobj is !null if we are doing normal object pageout.
    464  1.1      mrg    * note that the page queues must be locked to cluster.
    465  1.1      mrg    */
    466  1.1      mrg 
    467  1.1      mrg   if (uobj) {	/* if !swap-backed */
    468  1.1      mrg 
    469  1.1      mrg     /*
    470  1.1      mrg      * attempt to build a cluster for pageout using its make-put-cluster
    471  1.1      mrg      * function (if it has one).
    472  1.1      mrg      */
    473  1.1      mrg 
    474  1.1      mrg     if (uobj->pgops->pgo_mk_pcluster) {
    475  1.1      mrg       ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp, npages, pg, flags,
    476  1.1      mrg 					  start, stop);
    477  1.1      mrg       *ppsp_ptr = ppsp;  /* update caller's pointer */
    478  1.1      mrg     } else {
    479  1.1      mrg       ppsp[0] = pg;
    480  1.1      mrg       *npages = 1;
    481  1.1      mrg      }
    482  1.1      mrg 
    483  1.1      mrg     swblk = 0;		/* XXX: keep gcc happy */
    484  1.1      mrg 
    485  1.1      mrg   } else {
    486  1.1      mrg 
    487  1.1      mrg     /*
    488  1.1      mrg      * for swap-backed pageout, the caller (the pagedaemon) has already
    489  1.1      mrg      * built the cluster for us.   the starting swap block we are writing
    490  1.1      mrg      * to has been passed in as "start."   "pg" could be NULL if there
    491  1.1      mrg      * is no page we are especially interested in (in which case the
    492  1.1      mrg      * whole cluster gets dropped in the event of an error or a sync
    493  1.1      mrg      * "done").
    494  1.1      mrg      */
    495  1.1      mrg     swblk = (daddr_t) start;
    496  1.1      mrg     /* ppsp and npages should be ok */
    497  1.1      mrg   }
    498  1.1      mrg 
    499  1.1      mrg   /* now that we've clustered we can unlock the page queues */
    500  1.1      mrg   uvm_unlock_pageq();
    501  1.1      mrg 
    502  1.1      mrg   /*
    503  1.1      mrg    * now attempt the I/O.   if we have a failure and we are
    504  1.1      mrg    * clustered, we will drop the cluster and try again.
    505  1.1      mrg    */
    506  1.1      mrg 
    507  1.1      mrg ReTry:
    508  1.1      mrg   if (uobj) {
    509  1.1      mrg     /* object is locked */
    510  1.1      mrg     result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags & PGO_SYNCIO);
    511  1.1      mrg     /* object is now unlocked */
    512  1.1      mrg   } else {
    513  1.1      mrg     /* nothing locked */
    514  1.1      mrg     result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
    515  1.1      mrg     /* nothing locked */
    516  1.1      mrg   }
    517  1.1      mrg 
    518  1.1      mrg   /*
    519  1.1      mrg    * we have attempted the I/O.
    520  1.1      mrg    *
    521  1.1      mrg    * if the I/O was a success then:
    522  1.1      mrg    * 	if !PGO_PDFREECLUST, we return the cluster to the
    523  1.1      mrg    *		caller (who must un-busy all pages)
    524  1.1      mrg    *	else we un-busy cluster pages for the pagedaemon
    525  1.1      mrg    *
    526  1.1      mrg    * if I/O is pending (async i/o) then we return the pending code.
    527  1.1      mrg    * [in this case the async i/o done function must clean up when
    528  1.1      mrg    *  i/o is done...]
    529  1.1      mrg    */
    530  1.1      mrg 
    531  1.1      mrg   if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
    532  1.1      mrg     if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
    533  1.1      mrg       /*
    534  1.1      mrg        * drop cluster and relock object (only if I/O is not pending)
    535  1.1      mrg        */
    536  1.1      mrg       if (uobj)
    537  1.1      mrg 	simple_lock(&uobj->vmobjlock);	/* required for dropcluster */
    538  1.1      mrg       if (*npages > 1 || pg == NULL)
    539  1.1      mrg 	uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_PDFREECLUST, 0);
    540  1.1      mrg       /* if (uobj): object still locked, as per return-state item #3 */
    541  1.1      mrg     }
    542  1.1      mrg     return(result);
    543  1.1      mrg   }
    544  1.1      mrg 
    545  1.1      mrg   /*
    546  1.1      mrg    * a pager error occured.    if we have clustered, we drop the
    547  1.1      mrg    * cluster and try again.
    548  1.1      mrg    */
    549  1.1      mrg 
    550  1.1      mrg   if (*npages > 1 || pg == NULL) {
    551  1.1      mrg     if (uobj)
    552  1.1      mrg       simple_lock(&uobj->vmobjlock);
    553  1.1      mrg     uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP, swblk);
    554  1.1      mrg     if (pg != NULL)
    555  1.1      mrg       goto ReTry;
    556  1.1      mrg   }
    557  1.1      mrg 
    558  1.1      mrg   /*
    559  1.1      mrg    * a pager error occured (even after dropping the cluster, if there
    560  1.1      mrg    * was one).    give up!   the caller only has one page ("pg")
    561  1.1      mrg    * to worry about.
    562  1.1      mrg    */
    563  1.1      mrg 
    564  1.1      mrg   if (uobj && (flags & PGO_PDFREECLUST) != 0)
    565  1.1      mrg     simple_lock(&uobj->vmobjlock);
    566  1.1      mrg   return(result);
    567  1.1      mrg }
    568  1.1      mrg 
    569  1.1      mrg /*
    570  1.1      mrg  * uvm_pager_dropcluster: drop a cluster we have built (because we
    571  1.1      mrg  * got an error, or, if PGO_PDFREECLUST we are un-busying the
    572  1.1      mrg  * cluster pages on behalf of the pagedaemon).
    573  1.1      mrg  *
    574  1.1      mrg  * => uobj, if non-null, is a non-swap-backed object that is
    575  1.1      mrg  *	locked by the caller.   we return with this object still
    576  1.1      mrg  *	locked.
    577  1.1      mrg  * => page queues are not locked
    578  1.1      mrg  * => pg is our page of interest (the one we clustered around, can be null)
    579  1.1      mrg  * => ppsp/npages is our current cluster
    580  1.1      mrg  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
    581  1.1      mrg  *	pages on behalf of the pagedaemon.
    582  1.1      mrg  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
    583  1.1      mrg  *		clustered swap-backed pages (except for "pg" if !NULL)
    584  1.1      mrg  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
    585  1.1      mrg  *		[only meaningful if swap-backed (uobj == NULL)]
    586  1.1      mrg  */
    587  1.1      mrg 
    588  1.1      mrg 
    589  1.1      mrg void uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
    590  1.1      mrg 
    591  1.1      mrg struct uvm_object *uobj;	/* IN */
    592  1.1      mrg struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
    593  1.1      mrg int *npages;			/* IN/OUT */
    594  1.1      mrg int flags;
    595  1.1      mrg int swblk;			/* valid if (uobj == NULL && PGO_REALLOCSWAP) */
    596  1.1      mrg 
    597  1.1      mrg {
    598  1.1      mrg   int lcv;
    599  1.1      mrg   boolean_t obj_is_alive;
    600  1.1      mrg 
    601  1.1      mrg   /*
    602  1.1      mrg    * if we need to reallocate swap space for the cluster we are dropping
    603  1.1      mrg    * (true if swap-backed and PGO_REALLOCSWAP) then free the old allocation
    604  1.1      mrg    * now.   save a block for "pg" if it is non-NULL.
    605  1.1      mrg    *
    606  1.1      mrg    * note that we will zap the object's pointer to swap in the "for" loop
    607  1.1      mrg    * below...
    608  1.1      mrg    */
    609  1.1      mrg 
    610  1.1      mrg   if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    611  1.1      mrg     if (pg)
    612  1.1      mrg       uvm_swap_free(swblk + 1, *npages - 1);
    613  1.1      mrg     else
    614  1.1      mrg       uvm_swap_free(swblk, *npages);
    615  1.1      mrg   }
    616  1.1      mrg 
    617  1.1      mrg   /*
    618  1.1      mrg    * drop all pages but "pg"
    619  1.1      mrg    */
    620  1.1      mrg 
    621  1.1      mrg   for (lcv = 0 ; lcv < *npages ; lcv++) {
    622  1.1      mrg 
    623  1.1      mrg     if (ppsp[lcv] == pg)		/* skip "pg" */
    624  1.1      mrg       continue;
    625  1.1      mrg 
    626  1.1      mrg     /*
    627  1.1      mrg      * if swap-backed, gain lock on object that owns page.  note
    628  1.1      mrg      * that PQ_ANON bit can't change as long as we are holding
    629  1.1      mrg      * the PG_BUSY bit (so there is no need to lock the page
    630  1.1      mrg      * queues to test it).
    631  1.1      mrg      *
    632  1.1      mrg      * once we have the lock, dispose of the pointer to swap, if requested
    633  1.1      mrg      */
    634  1.1      mrg     if (!uobj) {
    635  1.1      mrg       if (ppsp[lcv]->pqflags & PQ_ANON) {
    636  1.1      mrg 	simple_lock(&ppsp[lcv]->uanon->an_lock);
    637  1.1      mrg         if (flags & PGO_REALLOCSWAP)
    638  1.1      mrg 	  ppsp[lcv]->uanon->an_swslot = 0;	/* zap swap block */
    639  1.1      mrg       } else {
    640  1.1      mrg 	simple_lock(&ppsp[lcv]->uobject->vmobjlock);
    641  1.1      mrg         if (flags & PGO_REALLOCSWAP)
    642  1.1      mrg           uao_set_swslot(ppsp[lcv]->uobject, ppsp[lcv]->offset / PAGE_SIZE, 0);
    643  1.1      mrg       }
    644  1.1      mrg     }
    645  1.1      mrg 
    646  1.1      mrg     /* did someone want the page while we had it busy-locked? */
    647  1.1      mrg     if (ppsp[lcv]->flags & PG_WANTED)
    648  1.1      mrg       thread_wakeup(ppsp[lcv]);		/* still holding obj lock */
    649  1.1      mrg 
    650  1.1      mrg     /* if page was released, release it.  otherwise un-busy it */
    651  1.1      mrg     if (ppsp[lcv]->flags & PG_RELEASED) {
    652  1.1      mrg 
    653  1.1      mrg       if (ppsp[lcv]->pqflags & PQ_ANON) {
    654  1.1      mrg 	ppsp[lcv]->flags &= ~(PG_BUSY);		/* so that anfree will free */
    655  1.1      mrg         UVM_PAGE_OWN(ppsp[lcv], NULL);
    656  1.1      mrg 	pmap_page_protect(PMAP_PGARG(ppsp[lcv]), VM_PROT_NONE); /* be safe */
    657  1.1      mrg 	uvm_anfree(ppsp[lcv]->uanon);		/* kills anon and frees pg */
    658  1.1      mrg 	continue;
    659  1.1      mrg       }
    660  1.1      mrg 
    661  1.1      mrg       /*
    662  1.1      mrg        * pgo_releasepg will dump the page for us
    663  1.1      mrg        */
    664  1.1      mrg 
    665  1.1      mrg #ifdef DIAGNOSTIC
    666  1.1      mrg       if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
    667  1.1      mrg         panic("uvm_pager_dropcluster: no releasepg function");
    668  1.1      mrg #endif
    669  1.1      mrg       obj_is_alive = ppsp[lcv]->uobject->pgops->pgo_releasepg(pg, NULL);
    670  1.1      mrg 
    671  1.1      mrg #ifdef DIAGNOSTIC
    672  1.1      mrg       /* for normal objects, "pg" is still PG_BUSY by us, so obj can't die */
    673  1.1      mrg       if (uobj && !obj_is_alive)
    674  1.1      mrg 	panic("uvm_pager_dropcluster: object died with active page");
    675  1.1      mrg #endif
    676  1.1      mrg       if (!obj_is_alive)
    677  1.1      mrg 	continue;
    678  1.1      mrg 
    679  1.1      mrg     } else {
    680  1.1      mrg       ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
    681  1.1      mrg       UVM_PAGE_OWN(ppsp[lcv], NULL);
    682  1.1      mrg     }
    683  1.1      mrg 
    684  1.1      mrg     /*
    685  1.1      mrg      * if we are operating on behalf of the pagedaemon and we
    686  1.1      mrg      * had a successful pageout update the page!
    687  1.1      mrg      */
    688  1.1      mrg     if (flags & PGO_PDFREECLUST) {
    689  1.1      mrg       /* XXX: with PMAP_NEW ref should already be clear, but don't trust! */
    690  1.1      mrg       pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
    691  1.1      mrg       pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
    692  1.1      mrg       ppsp[lcv]->flags |= PG_CLEAN;
    693  1.1      mrg     }
    694  1.1      mrg 
    695  1.1      mrg     /* if anonymous cluster, unlock object and move on */
    696  1.1      mrg     if (!uobj) {
    697  1.1      mrg       if (ppsp[lcv]->pqflags & PQ_ANON)
    698  1.1      mrg 	simple_unlock(&ppsp[lcv]->uanon->an_lock);
    699  1.1      mrg       else
    700  1.1      mrg 	simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
    701  1.1      mrg     }
    702  1.1      mrg 
    703  1.1      mrg   }
    704  1.1      mrg 
    705  1.1      mrg   /*
    706  1.1      mrg    * drop to a cluster of 1 page ("pg") if requested
    707  1.1      mrg    */
    708  1.1      mrg 
    709  1.1      mrg   if (pg && (flags & PGO_PDFREECLUST) == 0) {
    710  1.1      mrg     /*
    711  1.1      mrg      * if we are not a successful pageout, we make a 1 page cluster.
    712  1.1      mrg      */
    713  1.1      mrg     ppsp[0] = pg;
    714  1.1      mrg     *npages = 1;
    715  1.1      mrg 
    716  1.1      mrg     /*
    717  1.1      mrg      * assign new swap block to new cluster, if anon backed
    718  1.1      mrg      */
    719  1.1      mrg     if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    720  1.1      mrg       if (pg->pqflags & PQ_ANON) {
    721  1.1      mrg         simple_lock(&pg->uanon->an_lock);
    722  1.1      mrg 	pg->uanon->an_swslot = swblk;		/* reassign */
    723  1.1      mrg         simple_unlock(&pg->uanon->an_lock);
    724  1.1      mrg       } else {
    725  1.1      mrg 	simple_lock(&pg->uobject->vmobjlock);
    726  1.1      mrg         uao_set_swslot(pg->uobject, pg->offset / PAGE_SIZE, swblk);
    727  1.1      mrg 	simple_unlock(&pg->uobject->vmobjlock);
    728  1.1      mrg       }
    729  1.1      mrg     }
    730  1.1      mrg   }
    731  1.1      mrg 
    732  1.1      mrg }
    733