Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.99
      1  1.99  jdolecek /*	$NetBSD: uvm_bio.c,v 1.99 2018/12/09 20:45:37 jdolecek Exp $	*/
      2   1.2       chs 
      3  1.13       chs /*
      4   1.2       chs  * Copyright (c) 1998 Chuck Silvers.
      5   1.2       chs  * All rights reserved.
      6   1.2       chs  *
      7   1.2       chs  * Redistribution and use in source and binary forms, with or without
      8   1.2       chs  * modification, are permitted provided that the following conditions
      9   1.2       chs  * are met:
     10   1.2       chs  * 1. Redistributions of source code must retain the above copyright
     11   1.2       chs  *    notice, this list of conditions and the following disclaimer.
     12   1.2       chs  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.2       chs  *    notice, this list of conditions and the following disclaimer in the
     14   1.2       chs  *    documentation and/or other materials provided with the distribution.
     15   1.2       chs  * 3. The name of the author may not be used to endorse or promote products
     16   1.2       chs  *    derived from this software without specific prior written permission.
     17   1.2       chs  *
     18   1.2       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19   1.2       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20   1.2       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21   1.2       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22   1.2       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23   1.2       chs  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24   1.2       chs  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25   1.2       chs  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26   1.2       chs  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27   1.2       chs  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28   1.2       chs  * SUCH DAMAGE.
     29   1.2       chs  *
     30   1.2       chs  */
     31   1.2       chs 
     32   1.2       chs /*
     33  1.33       chs  * uvm_bio.c: buffered i/o object mapping cache
     34   1.2       chs  */
     35   1.2       chs 
     36  1.21     lukem #include <sys/cdefs.h>
     37  1.99  jdolecek __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.99 2018/12/09 20:45:37 jdolecek Exp $");
     38  1.21     lukem 
     39  1.21     lukem #include "opt_uvmhist.h"
     40  1.50      yamt #include "opt_ubc.h"
     41   1.2       chs 
     42   1.2       chs #include <sys/param.h>
     43   1.2       chs #include <sys/systm.h>
     44  1.65        ad #include <sys/kmem.h>
     45   1.2       chs #include <sys/kernel.h>
     46  1.60        ad #include <sys/proc.h>
     47  1.67     pooka #include <sys/vnode.h>
     48   1.2       chs 
     49   1.2       chs #include <uvm/uvm.h>
     50   1.2       chs 
     51  1.95  jdolecek #ifdef PMAP_DIRECT
     52  1.95  jdolecek #  define UBC_USE_PMAP_DIRECT
     53  1.95  jdolecek #endif
     54   1.2       chs 
     55   1.2       chs /*
     56   1.2       chs  * local functions
     57   1.2       chs  */
     58   1.2       chs 
     59  1.39   thorpej static int	ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
     60  1.44  drochner 			  int, int, vm_prot_t, int);
     61  1.39   thorpej static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
     62  1.95  jdolecek #ifdef UBC_USE_PMAP_DIRECT
     63  1.95  jdolecek static int __noinline ubc_uiomove_direct(struct uvm_object *, struct uio *, vsize_t,
     64  1.95  jdolecek 			  int, int);
     65  1.95  jdolecek static void __noinline ubc_zerorange_direct(struct uvm_object *, off_t, size_t, int);
     66  1.95  jdolecek 
     67  1.95  jdolecek bool ubc_direct = false; /* XXX */
     68  1.95  jdolecek #endif
     69   1.2       chs 
     70   1.2       chs /*
     71   1.2       chs  * local data structues
     72   1.2       chs  */
     73   1.2       chs 
     74  1.18       chs #define UBC_HASH(uobj, offset) 						\
     75  1.18       chs 	(((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
     76   1.2       chs 				ubc_object.hashmask)
     77   1.2       chs 
     78  1.18       chs #define UBC_QUEUE(offset)						\
     79  1.18       chs 	(&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) &	\
     80  1.18       chs 			     (UBC_NQUEUES - 1)])
     81  1.18       chs 
     82  1.18       chs #define UBC_UMAP_ADDR(u)						\
     83  1.18       chs 	(vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
     84  1.18       chs 
     85  1.18       chs 
     86  1.18       chs #define UMAP_PAGES_LOCKED	0x0001
     87  1.18       chs #define UMAP_MAPPING_CACHED	0x0002
     88   1.2       chs 
     89  1.73     rmind struct ubc_map {
     90   1.2       chs 	struct uvm_object *	uobj;		/* mapped object */
     91   1.2       chs 	voff_t			offset;		/* offset into uobj */
     92  1.33       chs 	voff_t			writeoff;	/* write offset */
     93  1.33       chs 	vsize_t			writelen;	/* write len */
     94  1.18       chs 	int			refcount;	/* refcount on mapping */
     95  1.18       chs 	int			flags;		/* extra state */
     96  1.42      yamt 	int			advice;
     97   1.2       chs 
     98   1.2       chs 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     99   1.2       chs 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
    100  1.73     rmind 	LIST_ENTRY(ubc_map)	list;		/* per-object list */
    101   1.2       chs };
    102   1.2       chs 
    103  1.82      matt TAILQ_HEAD(ubc_inactive_head, ubc_map);
    104  1.73     rmind static struct ubc_object {
    105   1.2       chs 	struct uvm_object uobj;		/* glue for uvm_map() */
    106   1.2       chs 	char *kva;			/* where ubc_object is mapped */
    107   1.2       chs 	struct ubc_map *umap;		/* array of ubc_map's */
    108   1.2       chs 
    109   1.2       chs 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
    110   1.2       chs 	u_long hashmask;		/* mask for hashtable */
    111   1.2       chs 
    112  1.82      matt 	struct ubc_inactive_head *inactive;
    113   1.2       chs 					/* inactive queues for ubc_map's */
    114   1.2       chs } ubc_object;
    115   1.2       chs 
    116  1.63      yamt const struct uvm_pagerops ubc_pager = {
    117  1.48  christos 	.pgo_fault = ubc_fault,
    118   1.2       chs 	/* ... rest are NULL */
    119   1.2       chs };
    120   1.2       chs 
    121   1.2       chs int ubc_nwins = UBC_NWINS;
    122  1.93  jdolecek int ubc_winshift __read_mostly = UBC_WINSHIFT;
    123  1.93  jdolecek int ubc_winsize __read_mostly;
    124  1.27   thorpej #if defined(PMAP_PREFER)
    125   1.2       chs int ubc_nqueues;
    126   1.2       chs #define UBC_NQUEUES ubc_nqueues
    127   1.2       chs #else
    128   1.2       chs #define UBC_NQUEUES 1
    129   1.2       chs #endif
    130   1.2       chs 
    131  1.50      yamt #if defined(UBC_STATS)
    132  1.50      yamt 
    133  1.50      yamt #define	UBC_EVCNT_DEFINE(name) \
    134  1.50      yamt struct evcnt ubc_evcnt_##name = \
    135  1.50      yamt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
    136  1.50      yamt EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
    137  1.50      yamt #define	UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
    138  1.50      yamt 
    139  1.50      yamt #else /* defined(UBC_STATS) */
    140  1.50      yamt 
    141  1.50      yamt #define	UBC_EVCNT_DEFINE(name)	/* nothing */
    142  1.50      yamt #define	UBC_EVCNT_INCR(name)	/* nothing */
    143  1.50      yamt 
    144  1.50      yamt #endif /* defined(UBC_STATS) */
    145  1.50      yamt 
    146  1.50      yamt UBC_EVCNT_DEFINE(wincachehit)
    147  1.50      yamt UBC_EVCNT_DEFINE(wincachemiss)
    148  1.57      yamt UBC_EVCNT_DEFINE(faultbusy)
    149  1.50      yamt 
    150   1.2       chs /*
    151   1.2       chs  * ubc_init
    152   1.2       chs  *
    153   1.2       chs  * init pager private data structures.
    154   1.2       chs  */
    155   1.2       chs 
    156   1.2       chs void
    157   1.2       chs ubc_init(void)
    158   1.2       chs {
    159  1.15    simonb 	/*
    160  1.15    simonb 	 * Make sure ubc_winshift is sane.
    161  1.15    simonb 	 */
    162  1.15    simonb 	if (ubc_winshift < PAGE_SHIFT)
    163  1.15    simonb 		ubc_winshift = PAGE_SHIFT;
    164  1.95  jdolecek 	ubc_winsize = 1 << ubc_winshift;
    165   1.2       chs 
    166   1.2       chs 	/*
    167   1.2       chs 	 * init ubc_object.
    168   1.2       chs 	 * alloc and init ubc_map's.
    169   1.2       chs 	 * init inactive queues.
    170   1.2       chs 	 * alloc and init hashtable.
    171   1.2       chs 	 * map in ubc_object.
    172   1.2       chs 	 */
    173   1.2       chs 
    174  1.73     rmind 	uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
    175   1.2       chs 
    176  1.65        ad 	ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
    177  1.65        ad 	    KM_SLEEP);
    178   1.7     enami 	if (ubc_object.umap == NULL)
    179   1.7     enami 		panic("ubc_init: failed to allocate ubc_map");
    180   1.2       chs 
    181  1.95  jdolecek 	vaddr_t va = (vaddr_t)1L;
    182   1.2       chs #ifdef PMAP_PREFER
    183  1.36    atatat 	PMAP_PREFER(0, &va, 0, 0);	/* kernel is never topdown */
    184  1.11       chs 	ubc_nqueues = va >> ubc_winshift;
    185  1.11       chs 	if (ubc_nqueues == 0) {
    186  1.11       chs 		ubc_nqueues = 1;
    187   1.2       chs 	}
    188   1.2       chs #endif
    189  1.65        ad 	ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
    190  1.65        ad 	    sizeof(struct ubc_inactive_head), KM_SLEEP);
    191  1.95  jdolecek 	for (int i = 0; i < UBC_NQUEUES; i++) {
    192   1.2       chs 		TAILQ_INIT(&ubc_object.inactive[i]);
    193   1.2       chs 	}
    194  1.95  jdolecek 	for (int i = 0; i < ubc_nwins; i++) {
    195  1.95  jdolecek 		struct ubc_map *umap;
    196   1.2       chs 		umap = &ubc_object.umap[i];
    197   1.2       chs 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    198   1.2       chs 				  umap, inactive);
    199   1.2       chs 	}
    200   1.2       chs 
    201  1.65        ad 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
    202  1.65        ad 	    &ubc_object.hashmask);
    203  1.95  jdolecek 	for (int i = 0; i <= ubc_object.hashmask; i++) {
    204   1.2       chs 		LIST_INIT(&ubc_object.hash[i]);
    205   1.2       chs 	}
    206   1.2       chs 
    207   1.2       chs 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    208  1.11       chs 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    209  1.92      maxv 		    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
    210   1.9       chs 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    211  1.26    provos 		panic("ubc_init: failed to map ubc_object");
    212   1.2       chs 	}
    213  1.81  riastrad }
    214  1.81  riastrad 
    215  1.81  riastrad void
    216  1.81  riastrad ubchist_init(void)
    217  1.81  riastrad {
    218  1.81  riastrad 
    219   1.2       chs 	UVMHIST_INIT(ubchist, 300);
    220   1.2       chs }
    221   1.2       chs 
    222   1.2       chs /*
    223  1.69     rmind  * ubc_fault_page: helper of ubc_fault to handle a single page.
    224  1.70     rmind  *
    225  1.70     rmind  * => Caller has UVM object locked.
    226  1.73     rmind  * => Caller will perform pmap_update().
    227  1.69     rmind  */
    228  1.69     rmind 
    229  1.70     rmind static inline int
    230  1.69     rmind ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
    231  1.69     rmind     struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
    232  1.69     rmind {
    233  1.69     rmind 	struct uvm_object *uobj;
    234  1.69     rmind 	vm_prot_t mask;
    235  1.69     rmind 	int error;
    236  1.69     rmind 	bool rdonly;
    237  1.69     rmind 
    238  1.69     rmind 	uobj = pg->uobject;
    239  1.73     rmind 	KASSERT(mutex_owned(uobj->vmobjlock));
    240  1.70     rmind 
    241  1.69     rmind 	if (pg->flags & PG_WANTED) {
    242  1.69     rmind 		wakeup(pg);
    243  1.69     rmind 	}
    244  1.69     rmind 	KASSERT((pg->flags & PG_FAKE) == 0);
    245  1.69     rmind 	if (pg->flags & PG_RELEASED) {
    246  1.69     rmind 		mutex_enter(&uvm_pageqlock);
    247  1.69     rmind 		uvm_pagefree(pg);
    248  1.69     rmind 		mutex_exit(&uvm_pageqlock);
    249  1.70     rmind 		return 0;
    250  1.69     rmind 	}
    251  1.69     rmind 	if (pg->loan_count != 0) {
    252  1.69     rmind 
    253  1.69     rmind 		/*
    254  1.69     rmind 		 * Avoid unneeded loan break, if possible.
    255  1.69     rmind 		 */
    256  1.69     rmind 
    257  1.69     rmind 		if ((access_type & VM_PROT_WRITE) == 0) {
    258  1.69     rmind 			prot &= ~VM_PROT_WRITE;
    259  1.69     rmind 		}
    260  1.69     rmind 		if (prot & VM_PROT_WRITE) {
    261  1.69     rmind 			struct vm_page *newpg;
    262  1.69     rmind 
    263  1.69     rmind 			newpg = uvm_loanbreak(pg);
    264  1.69     rmind 			if (newpg == NULL) {
    265  1.69     rmind 				uvm_page_unbusy(&pg, 1);
    266  1.70     rmind 				return ENOMEM;
    267  1.69     rmind 			}
    268  1.69     rmind 			pg = newpg;
    269  1.69     rmind 		}
    270  1.69     rmind 	}
    271  1.69     rmind 
    272  1.69     rmind 	/*
    273  1.69     rmind 	 * Note that a page whose backing store is partially allocated
    274  1.69     rmind 	 * is marked as PG_RDONLY.
    275  1.69     rmind 	 */
    276  1.69     rmind 
    277  1.69     rmind 	KASSERT((pg->flags & PG_RDONLY) == 0 ||
    278  1.69     rmind 	    (access_type & VM_PROT_WRITE) == 0 ||
    279  1.69     rmind 	    pg->offset < umap->writeoff ||
    280  1.69     rmind 	    pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
    281  1.69     rmind 
    282  1.69     rmind 	rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
    283  1.69     rmind 	    (pg->flags & PG_RDONLY) != 0) ||
    284  1.69     rmind 	    UVM_OBJ_NEEDS_WRITEFAULT(uobj);
    285  1.69     rmind 	mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
    286  1.69     rmind 
    287  1.69     rmind 	error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    288  1.69     rmind 	    prot & mask, PMAP_CANFAIL | (access_type & mask));
    289  1.69     rmind 
    290  1.69     rmind 	mutex_enter(&uvm_pageqlock);
    291  1.69     rmind 	uvm_pageactivate(pg);
    292  1.69     rmind 	mutex_exit(&uvm_pageqlock);
    293  1.69     rmind 	pg->flags &= ~(PG_BUSY|PG_WANTED);
    294  1.69     rmind 	UVM_PAGE_OWN(pg, NULL);
    295  1.69     rmind 
    296  1.70     rmind 	return error;
    297  1.69     rmind }
    298  1.69     rmind 
    299  1.69     rmind /*
    300   1.2       chs  * ubc_fault: fault routine for ubc mapping
    301   1.2       chs  */
    302  1.18       chs 
    303  1.39   thorpej static int
    304  1.54      yamt ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
    305  1.54      yamt     int ign3, int ign4, vm_prot_t access_type, int flags)
    306   1.2       chs {
    307   1.2       chs 	struct uvm_object *uobj;
    308   1.2       chs 	struct ubc_map *umap;
    309   1.2       chs 	vaddr_t va, eva, ubc_offset, slot_offset;
    310  1.69     rmind 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    311  1.18       chs 	int i, error, npages;
    312  1.23       chs 	vm_prot_t prot;
    313  1.69     rmind 
    314  1.69     rmind 	UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
    315   1.2       chs 
    316   1.2       chs 	/*
    317   1.2       chs 	 * no need to try with PGO_LOCKED...
    318   1.2       chs 	 * we don't need to have the map locked since we know that
    319   1.2       chs 	 * no one will mess with it until our reference is released.
    320   1.2       chs 	 */
    321  1.18       chs 
    322   1.2       chs 	if (flags & PGO_LOCKED) {
    323  1.73     rmind 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
    324   1.2       chs 		flags &= ~PGO_LOCKED;
    325   1.2       chs 	}
    326   1.2       chs 
    327   1.2       chs 	va = ufi->orig_rvaddr;
    328   1.2       chs 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    329  1.11       chs 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    330   1.2       chs 	KASSERT(umap->refcount != 0);
    331  1.53      yamt 	KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    332  1.18       chs 	slot_offset = ubc_offset & (ubc_winsize - 1);
    333   1.2       chs 
    334  1.34       chs 	/*
    335  1.34       chs 	 * some platforms cannot write to individual bytes atomically, so
    336  1.34       chs 	 * software has to do read/modify/write of larger quantities instead.
    337  1.34       chs 	 * this means that the access_type for "write" operations
    338  1.34       chs 	 * can be VM_PROT_READ, which confuses us mightily.
    339  1.37     perry 	 *
    340  1.34       chs 	 * deal with this by resetting access_type based on the info
    341  1.34       chs 	 * that ubc_alloc() stores for us.
    342  1.34       chs 	 */
    343  1.34       chs 
    344  1.34       chs 	access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
    345  1.91  pgoyette 	UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
    346  1.34       chs 	    va, ubc_offset, access_type, 0);
    347  1.34       chs 
    348  1.33       chs 	if ((access_type & VM_PROT_WRITE) != 0) {
    349  1.87       kre #ifndef PRIxOFF		/* XXX */
    350  1.88       kre #define PRIxOFF "jx"	/* XXX */
    351  1.87       kre #endif			/* XXX */
    352  1.84  riastrad 		KASSERTMSG((trunc_page(umap->writeoff) <= slot_offset),
    353  1.87       kre 		    "out of range write: slot=%#"PRIxVSIZE" off=%#"PRIxOFF,
    354  1.88       kre 		    slot_offset, (intmax_t)umap->writeoff);
    355  1.84  riastrad 		KASSERTMSG((slot_offset < umap->writeoff + umap->writelen),
    356  1.87       kre 		    "out of range write: slot=%#"PRIxVADDR
    357  1.87       kre 		        " off=%#"PRIxOFF" len=%#"PRIxVSIZE,
    358  1.89     ozaki 		    slot_offset, (intmax_t)umap->writeoff, umap->writelen);
    359  1.33       chs 	}
    360  1.33       chs 
    361   1.2       chs 	/* no umap locking needed since we have a ref on the umap */
    362   1.2       chs 	uobj = umap->uobj;
    363   1.2       chs 
    364  1.33       chs 	if ((access_type & VM_PROT_WRITE) == 0) {
    365  1.33       chs 		npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    366  1.33       chs 	} else {
    367  1.33       chs 		npages = (round_page(umap->offset + umap->writeoff +
    368  1.33       chs 		    umap->writelen) - (umap->offset + slot_offset))
    369  1.33       chs 		    >> PAGE_SHIFT;
    370  1.33       chs 		flags |= PGO_PASTEOF;
    371  1.33       chs 	}
    372   1.2       chs 
    373   1.2       chs again:
    374   1.2       chs 	memset(pgs, 0, sizeof (pgs));
    375  1.73     rmind 	mutex_enter(uobj->vmobjlock);
    376   1.2       chs 
    377  1.91  pgoyette 	UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
    378  1.33       chs 	    slot_offset, umap->writeoff, umap->writelen, 0);
    379  1.91  pgoyette 	UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
    380  1.91  pgoyette 	    (uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
    381   1.2       chs 
    382  1.33       chs 	error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
    383  1.42      yamt 	    &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
    384  1.41      yamt 	    PGO_NOTIMESTAMP);
    385  1.91  pgoyette 	UVMHIST_LOG(ubchist, "getpages error %jd npages %jd", error, npages, 0,
    386  1.24    simonb 	    0);
    387   1.2       chs 
    388   1.5       chs 	if (error == EAGAIN) {
    389  1.70     rmind 		kpause("ubc_fault", false, hz >> 2, NULL);
    390   1.2       chs 		goto again;
    391   1.2       chs 	}
    392   1.5       chs 	if (error) {
    393  1.10       chs 		return error;
    394   1.5       chs 	}
    395   1.2       chs 
    396  1.69     rmind 	/*
    397  1.69     rmind 	 * For virtually-indexed, virtually-tagged caches we should avoid
    398  1.69     rmind 	 * creating writable mappings when we do not absolutely need them,
    399  1.69     rmind 	 * since the "compatible alias" trick does not work on such caches.
    400  1.69     rmind 	 * Otherwise, we can always map the pages writable.
    401  1.69     rmind 	 */
    402  1.69     rmind 
    403  1.69     rmind #ifdef PMAP_CACHE_VIVT
    404  1.69     rmind 	prot = VM_PROT_READ | access_type;
    405  1.69     rmind #else
    406  1.69     rmind 	prot = VM_PROT_READ | VM_PROT_WRITE;
    407  1.69     rmind #endif
    408  1.70     rmind 
    409   1.2       chs 	va = ufi->orig_rvaddr;
    410   1.2       chs 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    411   1.2       chs 
    412  1.91  pgoyette 	UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
    413  1.73     rmind 
    414  1.73     rmind 	/*
    415  1.73     rmind 	 * Note: normally all returned pages would have the same UVM object.
    416  1.73     rmind 	 * However, layered file-systems and e.g. tmpfs, may return pages
    417  1.73     rmind 	 * which belong to underlying UVM object.  In such case, lock is
    418  1.73     rmind 	 * shared amongst the objects.
    419  1.73     rmind 	 */
    420  1.73     rmind 	mutex_enter(uobj->vmobjlock);
    421  1.28      yamt 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    422  1.69     rmind 		struct vm_page *pg;
    423  1.34       chs 
    424  1.91  pgoyette 		UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
    425  1.91  pgoyette 		    0, 0);
    426   1.2       chs 		pg = pgs[i];
    427   1.2       chs 
    428   1.2       chs 		if (pg == NULL || pg == PGO_DONTCARE) {
    429   1.2       chs 			continue;
    430   1.2       chs 		}
    431  1.73     rmind 		KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
    432  1.70     rmind 		error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
    433  1.70     rmind 		if (error) {
    434  1.70     rmind 			/*
    435  1.70     rmind 			 * Flush (there might be pages entered), drop the lock,
    436  1.73     rmind 			 * and perform uvm_wait().  Note: page will re-fault.
    437  1.70     rmind 			 */
    438  1.70     rmind 			pmap_update(ufi->orig_map->pmap);
    439  1.73     rmind 			mutex_exit(uobj->vmobjlock);
    440  1.70     rmind 			uvm_wait("ubc_fault");
    441  1.73     rmind 			mutex_enter(uobj->vmobjlock);
    442  1.70     rmind 		}
    443  1.70     rmind 	}
    444  1.73     rmind 	/* Must make VA visible before the unlock. */
    445  1.73     rmind 	pmap_update(ufi->orig_map->pmap);
    446  1.73     rmind 	mutex_exit(uobj->vmobjlock);
    447  1.73     rmind 
    448   1.8       chs 	return 0;
    449   1.2       chs }
    450   1.2       chs 
    451   1.2       chs /*
    452   1.2       chs  * local functions
    453   1.2       chs  */
    454   1.2       chs 
    455  1.39   thorpej static struct ubc_map *
    456  1.39   thorpej ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
    457   1.2       chs {
    458   1.2       chs 	struct ubc_map *umap;
    459   1.2       chs 
    460   1.2       chs 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    461   1.2       chs 		if (umap->uobj == uobj && umap->offset == offset) {
    462   1.2       chs 			return umap;
    463   1.2       chs 		}
    464   1.2       chs 	}
    465   1.2       chs 	return NULL;
    466   1.2       chs }
    467   1.2       chs 
    468   1.2       chs 
    469   1.2       chs /*
    470   1.2       chs  * ubc interface functions
    471   1.2       chs  */
    472   1.2       chs 
    473   1.2       chs /*
    474  1.18       chs  * ubc_alloc:  allocate a file mapping window
    475   1.2       chs  */
    476  1.18       chs 
    477  1.94  jdolecek static void * __noinline
    478  1.42      yamt ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
    479  1.42      yamt     int flags)
    480   1.2       chs {
    481   1.6       chs 	vaddr_t slot_offset, va;
    482   1.2       chs 	struct ubc_map *umap;
    483   1.6       chs 	voff_t umap_offset;
    484  1.18       chs 	int error;
    485   1.2       chs 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    486   1.2       chs 
    487  1.91  pgoyette 	UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
    488  1.91  pgoyette 	    (uintptr_t)uobj, offset, *lenp, 0);
    489   1.2       chs 
    490  1.34       chs 	KASSERT(*lenp > 0);
    491   1.6       chs 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    492   1.4     enami 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    493  1.18       chs 	*lenp = MIN(*lenp, ubc_winsize - slot_offset);
    494   1.2       chs 
    495  1.76     rmind 	mutex_enter(ubc_object.uobj.vmobjlock);
    496  1.76     rmind again:
    497   1.2       chs 	/*
    498  1.76     rmind 	 * The UVM object is already referenced.
    499  1.73     rmind 	 * Lock order: UBC object -> ubc_map::uobj.
    500   1.2       chs 	 */
    501   1.2       chs 	umap = ubc_find_mapping(uobj, umap_offset);
    502   1.2       chs 	if (umap == NULL) {
    503  1.73     rmind 		struct uvm_object *oobj;
    504  1.73     rmind 
    505  1.50      yamt 		UBC_EVCNT_INCR(wincachemiss);
    506   1.2       chs 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    507   1.2       chs 		if (umap == NULL) {
    508  1.73     rmind 			kpause("ubc_alloc", false, hz >> 2,
    509  1.73     rmind 			    ubc_object.uobj.vmobjlock);
    510   1.2       chs 			goto again;
    511   1.2       chs 		}
    512   1.2       chs 
    513  1.73     rmind 		va = UBC_UMAP_ADDR(umap);
    514  1.73     rmind 		oobj = umap->uobj;
    515  1.73     rmind 
    516   1.2       chs 		/*
    517  1.76     rmind 		 * Remove from old hash (if any), add to new hash.
    518   1.2       chs 		 */
    519   1.2       chs 
    520  1.73     rmind 		if (oobj != NULL) {
    521  1.76     rmind 			/*
    522  1.76     rmind 			 * Mapping must be removed before the list entry,
    523  1.76     rmind 			 * since there is a race with ubc_purge().
    524  1.76     rmind 			 */
    525  1.73     rmind 			if (umap->flags & UMAP_MAPPING_CACHED) {
    526  1.73     rmind 				umap->flags &= ~UMAP_MAPPING_CACHED;
    527  1.73     rmind 				mutex_enter(oobj->vmobjlock);
    528  1.73     rmind 				pmap_remove(pmap_kernel(), va,
    529  1.73     rmind 				    va + ubc_winsize);
    530  1.73     rmind 				pmap_update(pmap_kernel());
    531  1.73     rmind 				mutex_exit(oobj->vmobjlock);
    532  1.73     rmind 			}
    533  1.75   hannken 			LIST_REMOVE(umap, hash);
    534  1.75   hannken 			LIST_REMOVE(umap, list);
    535  1.73     rmind 		} else {
    536  1.73     rmind 			KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    537   1.2       chs 		}
    538   1.2       chs 		umap->uobj = uobj;
    539   1.2       chs 		umap->offset = umap_offset;
    540   1.2       chs 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    541  1.18       chs 		    umap, hash);
    542  1.73     rmind 		LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
    543  1.18       chs 	} else {
    544  1.50      yamt 		UBC_EVCNT_INCR(wincachehit);
    545  1.18       chs 		va = UBC_UMAP_ADDR(umap);
    546   1.2       chs 	}
    547   1.2       chs 
    548   1.2       chs 	if (umap->refcount == 0) {
    549   1.2       chs 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    550   1.2       chs 	}
    551   1.2       chs 
    552   1.2       chs 	if (flags & UBC_WRITE) {
    553  1.73     rmind 		KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
    554  1.79       jym 		    "ubc_alloc: concurrent writes to uobj %p", uobj);
    555   1.2       chs 		umap->writeoff = slot_offset;
    556   1.2       chs 		umap->writelen = *lenp;
    557   1.2       chs 	}
    558   1.2       chs 
    559   1.2       chs 	umap->refcount++;
    560  1.42      yamt 	umap->advice = advice;
    561  1.73     rmind 	mutex_exit(ubc_object.uobj.vmobjlock);
    562  1.91  pgoyette 	UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
    563  1.91  pgoyette 	    (uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
    564  1.18       chs 
    565  1.18       chs 	if (flags & UBC_FAULTBUSY) {
    566  1.95  jdolecek 		// XXX add offset from slot_offset?
    567  1.18       chs 		int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
    568  1.18       chs 		struct vm_page *pgs[npages];
    569  1.40      yamt 		int gpflags =
    570  1.41      yamt 		    PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
    571  1.41      yamt 		    PGO_NOTIMESTAMP;
    572  1.18       chs 		int i;
    573  1.30       dbj 		KDASSERT(flags & UBC_WRITE);
    574  1.57      yamt 		KASSERT(umap->refcount == 1);
    575   1.2       chs 
    576  1.57      yamt 		UBC_EVCNT_INCR(faultbusy);
    577  1.73     rmind again_faultbusy:
    578  1.73     rmind 		mutex_enter(uobj->vmobjlock);
    579  1.18       chs 		if (umap->flags & UMAP_MAPPING_CACHED) {
    580  1.18       chs 			umap->flags &= ~UMAP_MAPPING_CACHED;
    581  1.18       chs 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    582  1.18       chs 		}
    583  1.22     enami 		memset(pgs, 0, sizeof(pgs));
    584  1.73     rmind 
    585  1.33       chs 		error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    586  1.42      yamt 		    &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
    587  1.91  pgoyette 		UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0);
    588  1.18       chs 		if (error) {
    589  1.83     rmind 			/*
    590  1.83     rmind 			 * Flush: the mapping above might have been removed.
    591  1.83     rmind 			 */
    592  1.83     rmind 			pmap_update(pmap_kernel());
    593  1.18       chs 			goto out;
    594  1.18       chs 		}
    595  1.18       chs 		for (i = 0; i < npages; i++) {
    596  1.59      yamt 			struct vm_page *pg = pgs[i];
    597  1.59      yamt 
    598  1.59      yamt 			KASSERT(pg->uobject == uobj);
    599  1.59      yamt 			if (pg->loan_count != 0) {
    600  1.73     rmind 				mutex_enter(uobj->vmobjlock);
    601  1.59      yamt 				if (pg->loan_count != 0) {
    602  1.59      yamt 					pg = uvm_loanbreak(pg);
    603  1.59      yamt 				}
    604  1.59      yamt 				if (pg == NULL) {
    605  1.59      yamt 					pmap_kremove(va, ubc_winsize);
    606  1.59      yamt 					pmap_update(pmap_kernel());
    607  1.59      yamt 					uvm_page_unbusy(pgs, npages);
    608  1.73     rmind 					mutex_exit(uobj->vmobjlock);
    609  1.59      yamt 					uvm_wait("ubc_alloc");
    610  1.59      yamt 					goto again_faultbusy;
    611  1.59      yamt 				}
    612  1.73     rmind 				mutex_exit(uobj->vmobjlock);
    613  1.59      yamt 				pgs[i] = pg;
    614  1.59      yamt 			}
    615  1.18       chs 			pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
    616  1.68    cegger 			    VM_PAGE_TO_PHYS(pg),
    617  1.68    cegger 			    VM_PROT_READ | VM_PROT_WRITE, 0);
    618  1.18       chs 		}
    619  1.18       chs 		pmap_update(pmap_kernel());
    620  1.18       chs 		umap->flags |= UMAP_PAGES_LOCKED;
    621  1.57      yamt 	} else {
    622  1.57      yamt 		KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
    623  1.18       chs 	}
    624  1.18       chs 
    625  1.18       chs out:
    626  1.18       chs 	return (void *)(va + slot_offset);
    627   1.2       chs }
    628   1.2       chs 
    629  1.18       chs /*
    630  1.18       chs  * ubc_release:  free a file mapping window.
    631  1.18       chs  */
    632   1.2       chs 
    633  1.94  jdolecek static void __noinline
    634  1.39   thorpej ubc_release(void *va, int flags)
    635   1.2       chs {
    636   1.2       chs 	struct ubc_map *umap;
    637   1.2       chs 	struct uvm_object *uobj;
    638  1.18       chs 	vaddr_t umapva;
    639  1.55   thorpej 	bool unmapped;
    640   1.2       chs 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    641   1.2       chs 
    642  1.91  pgoyette 	UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
    643  1.11       chs 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    644  1.18       chs 	umapva = UBC_UMAP_ADDR(umap);
    645   1.2       chs 	uobj = umap->uobj;
    646   1.2       chs 	KASSERT(uobj != NULL);
    647   1.2       chs 
    648  1.18       chs 	if (umap->flags & UMAP_PAGES_LOCKED) {
    649  1.72     rmind 		const voff_t slot_offset = umap->writeoff;
    650  1.72     rmind 		const voff_t endoff = umap->writeoff + umap->writelen;
    651  1.72     rmind 		const voff_t zerolen = round_page(endoff) - endoff;
    652  1.72     rmind 		const u_int npages = (round_page(endoff) -
    653  1.72     rmind 		    trunc_page(slot_offset)) >> PAGE_SHIFT;
    654  1.18       chs 		struct vm_page *pgs[npages];
    655  1.18       chs 
    656  1.57      yamt 		KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
    657  1.18       chs 		if (zerolen) {
    658  1.18       chs 			memset((char *)umapva + endoff, 0, zerolen);
    659  1.18       chs 		}
    660  1.18       chs 		umap->flags &= ~UMAP_PAGES_LOCKED;
    661  1.73     rmind 		mutex_enter(uobj->vmobjlock);
    662  1.64        ad 		mutex_enter(&uvm_pageqlock);
    663  1.72     rmind 		for (u_int i = 0; i < npages; i++) {
    664  1.72     rmind 			paddr_t pa;
    665  1.80    martin 			bool rv __diagused;
    666  1.72     rmind 
    667  1.18       chs 			rv = pmap_extract(pmap_kernel(),
    668  1.18       chs 			    umapva + slot_offset + (i << PAGE_SHIFT), &pa);
    669  1.18       chs 			KASSERT(rv);
    670  1.18       chs 			pgs[i] = PHYS_TO_VM_PAGE(pa);
    671  1.18       chs 			pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
    672  1.28      yamt 			KASSERT(pgs[i]->loan_count == 0);
    673  1.18       chs 			uvm_pageactivate(pgs[i]);
    674  1.18       chs 		}
    675  1.64        ad 		mutex_exit(&uvm_pageqlock);
    676  1.18       chs 		pmap_kremove(umapva, ubc_winsize);
    677  1.18       chs 		pmap_update(pmap_kernel());
    678  1.18       chs 		uvm_page_unbusy(pgs, npages);
    679  1.73     rmind 		mutex_exit(uobj->vmobjlock);
    680  1.56   thorpej 		unmapped = true;
    681  1.18       chs 	} else {
    682  1.56   thorpej 		unmapped = false;
    683  1.18       chs 	}
    684  1.18       chs 
    685  1.73     rmind 	mutex_enter(ubc_object.uobj.vmobjlock);
    686   1.2       chs 	umap->writeoff = 0;
    687   1.2       chs 	umap->writelen = 0;
    688   1.2       chs 	umap->refcount--;
    689   1.2       chs 	if (umap->refcount == 0) {
    690  1.33       chs 		if (flags & UBC_UNMAP) {
    691   1.2       chs 			/*
    692  1.33       chs 			 * Invalidate any cached mappings if requested.
    693  1.33       chs 			 * This is typically used to avoid leaving
    694  1.33       chs 			 * incompatible cache aliases around indefinitely.
    695   1.2       chs 			 */
    696  1.73     rmind 			mutex_enter(uobj->vmobjlock);
    697  1.18       chs 			pmap_remove(pmap_kernel(), umapva,
    698  1.18       chs 				    umapva + ubc_winsize);
    699  1.73     rmind 			pmap_update(pmap_kernel());
    700  1.73     rmind 			mutex_exit(uobj->vmobjlock);
    701  1.73     rmind 
    702  1.18       chs 			umap->flags &= ~UMAP_MAPPING_CACHED;
    703   1.2       chs 			LIST_REMOVE(umap, hash);
    704  1.77     rmind 			LIST_REMOVE(umap, list);
    705   1.2       chs 			umap->uobj = NULL;
    706   1.2       chs 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    707   1.2       chs 			    inactive);
    708   1.2       chs 		} else {
    709  1.18       chs 			if (!unmapped) {
    710  1.18       chs 				umap->flags |= UMAP_MAPPING_CACHED;
    711  1.18       chs 			}
    712   1.2       chs 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    713   1.2       chs 			    inactive);
    714   1.2       chs 		}
    715   1.2       chs 	}
    716  1.91  pgoyette 	UVMHIST_LOG(ubchist, "umap %cw#jxp refs %jd", (uintptr_t)umap,
    717  1.91  pgoyette 	    umap->refcount, 0, 0);
    718  1.73     rmind 	mutex_exit(ubc_object.uobj.vmobjlock);
    719   1.2       chs }
    720   1.2       chs 
    721  1.58      yamt /*
    722  1.58      yamt  * ubc_uiomove: move data to/from an object.
    723  1.58      yamt  */
    724  1.58      yamt 
    725  1.58      yamt int
    726  1.62      yamt ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    727  1.62      yamt     int flags)
    728  1.58      yamt {
    729  1.72     rmind 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    730  1.58      yamt 	voff_t off;
    731  1.58      yamt 	int error;
    732  1.58      yamt 
    733  1.58      yamt 	KASSERT(todo <= uio->uio_resid);
    734  1.58      yamt 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    735  1.58      yamt 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    736  1.58      yamt 
    737  1.95  jdolecek #ifdef UBC_USE_PMAP_DIRECT
    738  1.95  jdolecek 	if (ubc_direct) {
    739  1.95  jdolecek 		return ubc_uiomove_direct(uobj, uio, todo, advice, flags);
    740  1.95  jdolecek 	}
    741  1.95  jdolecek #endif
    742  1.95  jdolecek 
    743  1.58      yamt 	off = uio->uio_offset;
    744  1.58      yamt 	error = 0;
    745  1.58      yamt 	while (todo > 0) {
    746  1.58      yamt 		vsize_t bytelen = todo;
    747  1.58      yamt 		void *win;
    748  1.58      yamt 
    749  1.62      yamt 		win = ubc_alloc(uobj, off, &bytelen, advice, flags);
    750  1.58      yamt 		if (error == 0) {
    751  1.58      yamt 			error = uiomove(win, bytelen, uio);
    752  1.58      yamt 		}
    753  1.58      yamt 		if (error != 0 && overwrite) {
    754  1.58      yamt 			/*
    755  1.58      yamt 			 * if we haven't initialized the pages yet,
    756  1.58      yamt 			 * do it now.  it's safe to use memset here
    757  1.58      yamt 			 * because we just mapped the pages above.
    758  1.58      yamt 			 */
    759  1.64        ad 			printf("%s: error=%d\n", __func__, error);
    760  1.58      yamt 			memset(win, 0, bytelen);
    761  1.58      yamt 		}
    762  1.58      yamt 		ubc_release(win, flags);
    763  1.58      yamt 		off += bytelen;
    764  1.58      yamt 		todo -= bytelen;
    765  1.58      yamt 		if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
    766  1.58      yamt 			break;
    767  1.58      yamt 		}
    768  1.58      yamt 	}
    769  1.58      yamt 
    770  1.58      yamt 	return error;
    771  1.58      yamt }
    772  1.67     pooka 
    773  1.67     pooka /*
    774  1.74   hannken  * ubc_zerorange: set a range of bytes in an object to zero.
    775  1.67     pooka  */
    776  1.67     pooka 
    777  1.67     pooka void
    778  1.74   hannken ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
    779  1.67     pooka {
    780  1.95  jdolecek 
    781  1.95  jdolecek #ifdef UBC_USE_PMAP_DIRECT
    782  1.95  jdolecek 	if (ubc_direct) {
    783  1.95  jdolecek 		ubc_zerorange_direct(uobj, off, len, flags);
    784  1.95  jdolecek 		return;
    785  1.95  jdolecek 	}
    786  1.95  jdolecek #endif
    787  1.67     pooka 
    788  1.67     pooka 	/*
    789  1.67     pooka 	 * XXXUBC invent kzero() and use it
    790  1.67     pooka 	 */
    791  1.67     pooka 
    792  1.67     pooka 	while (len) {
    793  1.95  jdolecek 		void *win;
    794  1.67     pooka 		vsize_t bytelen = len;
    795  1.67     pooka 
    796  1.74   hannken 		win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
    797  1.67     pooka 		memset(win, 0, bytelen);
    798  1.67     pooka 		ubc_release(win, flags);
    799  1.67     pooka 
    800  1.67     pooka 		off += bytelen;
    801  1.67     pooka 		len -= bytelen;
    802  1.67     pooka 	}
    803  1.67     pooka }
    804  1.73     rmind 
    805  1.95  jdolecek #ifdef UBC_USE_PMAP_DIRECT
    806  1.95  jdolecek /* Copy data using direct map */
    807  1.95  jdolecek 
    808  1.95  jdolecek /*
    809  1.95  jdolecek  * ubc_alloc_direct:  allocate a file mapping window using direct map
    810  1.95  jdolecek  */
    811  1.95  jdolecek static int __noinline
    812  1.95  jdolecek ubc_alloc_direct(struct uvm_object *uobj, voff_t offset, vsize_t *lenp,
    813  1.95  jdolecek     int advice, int flags, struct vm_page **pgs, int *npages)
    814  1.95  jdolecek {
    815  1.95  jdolecek 	voff_t pgoff;
    816  1.95  jdolecek 	int error;
    817  1.99  jdolecek 	int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO | PGO_ALLPAGES;
    818  1.95  jdolecek 	int access_type = VM_PROT_READ;
    819  1.97       chs 	UVMHIST_FUNC("ubc_alloc_direct"); UVMHIST_CALLED(ubchist);
    820  1.95  jdolecek 
    821  1.95  jdolecek 	if (flags & UBC_WRITE) {
    822  1.95  jdolecek 		if (flags & UBC_FAULTBUSY)
    823  1.95  jdolecek 			gpflags |= PGO_OVERWRITE;
    824  1.95  jdolecek #if 0
    825  1.95  jdolecek 		KASSERT(!UVM_OBJ_NEEDS_WRITEFAULT(uobj));
    826  1.95  jdolecek #endif
    827  1.95  jdolecek 
    828  1.99  jdolecek 		/*
    829  1.99  jdolecek 		 * Tell genfs_getpages() we already have the journal lock,
    830  1.99  jdolecek 		 * allow allocation past current EOF.
    831  1.99  jdolecek 		 */
    832  1.99  jdolecek 		gpflags |= PGO_JOURNALLOCKED | PGO_PASTEOF;
    833  1.95  jdolecek 		access_type |= VM_PROT_WRITE;
    834  1.99  jdolecek 	} else {
    835  1.99  jdolecek 		/* Don't need the empty blocks allocated, PG_RDONLY is okay */
    836  1.99  jdolecek 		gpflags |= PGO_NOBLOCKALLOC;
    837  1.95  jdolecek 	}
    838  1.95  jdolecek 
    839  1.95  jdolecek 	pgoff = (offset & PAGE_MASK);
    840  1.95  jdolecek 	*lenp = MIN(*lenp, ubc_winsize - pgoff);
    841  1.95  jdolecek 
    842  1.95  jdolecek again:
    843  1.95  jdolecek 	*npages = (*lenp + pgoff + PAGE_SIZE - 1) >> PAGE_SHIFT;
    844  1.95  jdolecek 	KASSERT((*npages * PAGE_SIZE) <= ubc_winsize);
    845  1.95  jdolecek 	KASSERT(*lenp + pgoff <= ubc_winsize);
    846  1.95  jdolecek 	memset(pgs, 0, *npages * sizeof(pgs[0]));
    847  1.95  jdolecek 
    848  1.95  jdolecek 	mutex_enter(uobj->vmobjlock);
    849  1.95  jdolecek 	error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
    850  1.95  jdolecek 	    npages, 0, access_type, advice, gpflags);
    851  1.95  jdolecek 	UVMHIST_LOG(ubchist, "alloc_direct getpages %jd", error, 0, 0, 0);
    852  1.95  jdolecek 	if (error) {
    853  1.95  jdolecek 		if (error == EAGAIN) {
    854  1.95  jdolecek 			kpause("ubc_alloc_directg", false, hz >> 2, NULL);
    855  1.95  jdolecek 			goto again;
    856  1.95  jdolecek 		}
    857  1.95  jdolecek 		return error;
    858  1.95  jdolecek 	}
    859  1.95  jdolecek 
    860  1.95  jdolecek 	mutex_enter(uobj->vmobjlock);
    861  1.95  jdolecek 	for (int i = 0; i < *npages; i++) {
    862  1.95  jdolecek 		struct vm_page *pg = pgs[i];
    863  1.95  jdolecek 
    864  1.95  jdolecek 		KASSERT(pg != NULL);
    865  1.95  jdolecek 		KASSERT(pg != PGO_DONTCARE);
    866  1.95  jdolecek 		KASSERT((pg->flags & PG_FAKE) == 0 || (gpflags & PGO_OVERWRITE));
    867  1.95  jdolecek 		KASSERT(pg->uobject->vmobjlock == uobj->vmobjlock);
    868  1.95  jdolecek 
    869  1.95  jdolecek 		/* Avoid breaking loan if possible, only do it on write */
    870  1.95  jdolecek 		if ((flags & UBC_WRITE) && pg->loan_count != 0) {
    871  1.95  jdolecek 			pg = uvm_loanbreak(pg);
    872  1.95  jdolecek 			if (pg == NULL) {
    873  1.95  jdolecek 				uvm_page_unbusy(pgs, *npages);
    874  1.95  jdolecek 				mutex_exit(uobj->vmobjlock);
    875  1.95  jdolecek 				uvm_wait("ubc_alloc_directl");
    876  1.95  jdolecek 				goto again;
    877  1.95  jdolecek 			}
    878  1.95  jdolecek 			pgs[i] = pg;
    879  1.95  jdolecek 		}
    880  1.95  jdolecek 
    881  1.95  jdolecek 		/* Page must be writable by now */
    882  1.95  jdolecek 		KASSERT((pg->flags & PG_RDONLY) == 0 || (flags & UBC_WRITE) == 0);
    883  1.96  jdolecek 	}
    884  1.96  jdolecek 	mutex_exit(uobj->vmobjlock);
    885  1.96  jdolecek 
    886  1.96  jdolecek 	return 0;
    887  1.96  jdolecek }
    888  1.96  jdolecek 
    889  1.96  jdolecek static void __noinline
    890  1.96  jdolecek ubc_direct_release(struct uvm_object *uobj,
    891  1.96  jdolecek 	int flags, struct vm_page **pgs, int npages)
    892  1.96  jdolecek {
    893  1.96  jdolecek 	mutex_enter(uobj->vmobjlock);
    894  1.96  jdolecek 	mutex_enter(&uvm_pageqlock);
    895  1.96  jdolecek 	for (int i = 0; i < npages; i++) {
    896  1.96  jdolecek 		struct vm_page *pg = pgs[i];
    897  1.95  jdolecek 
    898  1.95  jdolecek 		uvm_pageactivate(pg);
    899  1.95  jdolecek 
    900  1.96  jdolecek 		/* Page was changed, no longer fake and neither clean */
    901  1.95  jdolecek 		if (flags & UBC_WRITE)
    902  1.95  jdolecek 			pg->flags &= ~(PG_FAKE|PG_CLEAN);
    903  1.95  jdolecek 	}
    904  1.96  jdolecek 	mutex_exit(&uvm_pageqlock);
    905  1.96  jdolecek 
    906  1.96  jdolecek 	uvm_page_unbusy(pgs, npages);
    907  1.95  jdolecek 	mutex_exit(uobj->vmobjlock);
    908  1.95  jdolecek }
    909  1.95  jdolecek 
    910  1.95  jdolecek static int
    911  1.95  jdolecek ubc_uiomove_process(void *win, size_t len, void *arg)
    912  1.95  jdolecek {
    913  1.95  jdolecek 	struct uio *uio = (struct uio *)arg;
    914  1.95  jdolecek 
    915  1.95  jdolecek 	return uiomove(win, len, uio);
    916  1.95  jdolecek }
    917  1.95  jdolecek 
    918  1.95  jdolecek static int
    919  1.95  jdolecek ubc_zerorange_process(void *win, size_t len, void *arg)
    920  1.95  jdolecek {
    921  1.95  jdolecek 	memset(win, 0, len);
    922  1.95  jdolecek 	return 0;
    923  1.95  jdolecek }
    924  1.95  jdolecek 
    925  1.95  jdolecek static int __noinline
    926  1.95  jdolecek ubc_uiomove_direct(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
    927  1.95  jdolecek     int flags)
    928  1.95  jdolecek {
    929  1.95  jdolecek 	const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
    930  1.95  jdolecek 	voff_t off;
    931  1.95  jdolecek 	int error, npages;
    932  1.95  jdolecek 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    933  1.95  jdolecek 
    934  1.95  jdolecek 	KASSERT(todo <= uio->uio_resid);
    935  1.95  jdolecek 	KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
    936  1.95  jdolecek 	    ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
    937  1.95  jdolecek 
    938  1.95  jdolecek 	off = uio->uio_offset;
    939  1.95  jdolecek 	error = 0;
    940  1.95  jdolecek 	while (todo > 0) {
    941  1.95  jdolecek 		vsize_t bytelen = todo;
    942  1.95  jdolecek 
    943  1.95  jdolecek 		error = ubc_alloc_direct(uobj, off, &bytelen, advice, flags,
    944  1.95  jdolecek 		    pgs, &npages);
    945  1.95  jdolecek 		if (error != 0) {
    946  1.95  jdolecek 			/* can't do anything, failed to get the pages */
    947  1.95  jdolecek 			break;
    948  1.95  jdolecek 		}
    949  1.95  jdolecek 
    950  1.95  jdolecek 		if (error == 0) {
    951  1.95  jdolecek 			error = uvm_direct_process(pgs, npages, off, bytelen,
    952  1.95  jdolecek 			    ubc_uiomove_process, uio);
    953  1.95  jdolecek 		}
    954  1.95  jdolecek 		if (error != 0 && overwrite) {
    955  1.95  jdolecek 			/*
    956  1.95  jdolecek 			 * if we haven't initialized the pages yet,
    957  1.95  jdolecek 			 * do it now.  it's safe to use memset here
    958  1.95  jdolecek 			 * because we just mapped the pages above.
    959  1.95  jdolecek 			 */
    960  1.95  jdolecek 			printf("%s: error=%d\n", __func__, error);
    961  1.95  jdolecek 			(void) uvm_direct_process(pgs, npages, off, bytelen,
    962  1.95  jdolecek 			    ubc_zerorange_process, NULL);
    963  1.95  jdolecek 		}
    964  1.95  jdolecek 
    965  1.96  jdolecek 		ubc_direct_release(uobj, flags, pgs, npages);
    966  1.95  jdolecek 
    967  1.95  jdolecek 		off += bytelen;
    968  1.95  jdolecek 		todo -= bytelen;
    969  1.95  jdolecek 
    970  1.95  jdolecek 		if (error != 0 && ISSET(flags, UBC_PARTIALOK)) {
    971  1.95  jdolecek 			break;
    972  1.95  jdolecek 		}
    973  1.95  jdolecek 	}
    974  1.95  jdolecek 
    975  1.95  jdolecek 	return error;
    976  1.95  jdolecek }
    977  1.95  jdolecek 
    978  1.95  jdolecek static void __noinline
    979  1.95  jdolecek ubc_zerorange_direct(struct uvm_object *uobj, off_t off, size_t todo, int flags)
    980  1.95  jdolecek {
    981  1.95  jdolecek 	int error, npages;
    982  1.95  jdolecek 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
    983  1.95  jdolecek 
    984  1.96  jdolecek 	flags |= UBC_WRITE;
    985  1.96  jdolecek 
    986  1.95  jdolecek 	error = 0;
    987  1.95  jdolecek 	while (todo > 0) {
    988  1.95  jdolecek 		vsize_t bytelen = todo;
    989  1.95  jdolecek 
    990  1.95  jdolecek 		error = ubc_alloc_direct(uobj, off, &bytelen, UVM_ADV_NORMAL,
    991  1.96  jdolecek 		    flags, pgs, &npages);
    992  1.95  jdolecek 		if (error != 0) {
    993  1.95  jdolecek 			/* can't do anything, failed to get the pages */
    994  1.95  jdolecek 			break;
    995  1.95  jdolecek 		}
    996  1.95  jdolecek 
    997  1.95  jdolecek 		error = uvm_direct_process(pgs, npages, off, bytelen,
    998  1.95  jdolecek 		    ubc_zerorange_process, NULL);
    999  1.95  jdolecek 
   1000  1.96  jdolecek 		ubc_direct_release(uobj, flags, pgs, npages);
   1001  1.95  jdolecek 
   1002  1.95  jdolecek 		off += bytelen;
   1003  1.95  jdolecek 		todo -= bytelen;
   1004  1.95  jdolecek 	}
   1005  1.95  jdolecek }
   1006  1.95  jdolecek 
   1007  1.95  jdolecek #endif /* UBC_USE_PMAP_DIRECT */
   1008  1.95  jdolecek 
   1009  1.73     rmind /*
   1010  1.73     rmind  * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
   1011  1.73     rmind  */
   1012  1.73     rmind 
   1013  1.73     rmind void
   1014  1.73     rmind ubc_purge(struct uvm_object *uobj)
   1015  1.73     rmind {
   1016  1.73     rmind 	struct ubc_map *umap;
   1017  1.73     rmind 	vaddr_t va;
   1018  1.73     rmind 
   1019  1.73     rmind 	KASSERT(uobj->uo_npages == 0);
   1020  1.73     rmind 
   1021  1.76     rmind 	/*
   1022  1.76     rmind 	 * Safe to check without lock held, as ubc_alloc() removes
   1023  1.76     rmind 	 * the mapping and list entry in the correct order.
   1024  1.76     rmind 	 */
   1025  1.76     rmind 	if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
   1026  1.76     rmind 		return;
   1027  1.76     rmind 	}
   1028  1.73     rmind 	mutex_enter(ubc_object.uobj.vmobjlock);
   1029  1.73     rmind 	while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
   1030  1.73     rmind 		KASSERT(umap->refcount == 0);
   1031  1.73     rmind 		for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
   1032  1.73     rmind 			KASSERT(!pmap_extract(pmap_kernel(),
   1033  1.73     rmind 			    va + UBC_UMAP_ADDR(umap), NULL));
   1034  1.73     rmind 		}
   1035  1.73     rmind 		LIST_REMOVE(umap, list);
   1036  1.73     rmind 		LIST_REMOVE(umap, hash);
   1037  1.73     rmind 		umap->flags &= ~UMAP_MAPPING_CACHED;
   1038  1.73     rmind 		umap->uobj = NULL;
   1039  1.73     rmind 	}
   1040  1.73     rmind 	mutex_exit(ubc_object.uobj.vmobjlock);
   1041  1.73     rmind }
   1042