| /src/sys/rump/librump/rumpvfs/ |
| vm_vfs.c | 40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error) 42 struct uvm_object *uobj = pgs[0]->uobject; 48 pg = pgs[i]; 62 uvm_page_unbusy(pgs, npages); 74 struct vm_page **pgs; local 83 pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP); 86 pgs[i] = uvm_pageratop(va); 89 uobj = pgs[i]->uobject; 92 KASSERT(uobj == pgs[i]->uobject) [all...] |
| /src/sys/uvm/ |
| uvm_object.c | 135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; local 146 memset(pgs, 0, sizeof(pgs)); 147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, 157 KASSERT(pgs[i] != NULL); 158 KASSERT(!(pgs[i]->flags & PG_RELEASED)); 163 if (pgs[i]->loan_count) { 164 while (pgs[i]->loan_count) { 165 pg = uvm_loanbreak(pgs[i]); 173 pgs[i] = pg [all...] |
| uvm_bio.c | 314 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local 378 memset(pgs, 0, sizeof (pgs)); 386 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, 428 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i], 430 pg = pgs[i]; 483 int flags, struct vm_page **pgs, int *npagesp) 590 memset(pgs, 0, *npagesp * sizeof(pgs[0])) 744 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local 809 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local 986 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local 1053 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local [all...] |
| uvm_physseg.c | 96 struct vm_page *pgs; /* vm_page structures (from start) */ member in struct:uvm_physseg 101 struct extent *ext; /* extent(9) structure to manage pgs[] */ 118 /* returns a pgs array */ 238 struct vm_page *slab = NULL, *pgs = NULL; local 287 if (current_ps->pgs != NULL) { 290 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages); 292 if (pgs != NULL) { 304 if (pgs == NULL) { /* Brand new */ 307 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP); 319 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages) 748 struct vm_page *pgs; local 1194 struct vm_page *pgs = NULL; local [all...] |
| uvm_pager.c | 327 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error) 342 pg = pgs[0]; 370 pg = pgs[i]; 478 uvm_page_unbusy(pgs, npages); 507 struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)]; local 514 KASSERT(npages <= __arraycount(pgs)); 520 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); 521 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, 522 (uintptr_t)pgs[i], 0, 0); 526 uvm_aio_aiodone_pages(pgs, npages, write, error) [all...] |
| uvm_vnode.c | 226 struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) 252 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, 263 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
|
| uvm_page.c | 191 /* returns a pgs array */ 1581 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1589 pg = pgs[i]; 2071 uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len, 2084 pg = pgs[i];
|
| /src/sys/ufs/lfs/ |
| ulfs_inode.c | 174 off_t pagestart; /* starting offset of range covered by pgs */ 181 struct vm_page **pgs; local 203 pgs = kmem_zalloc(pgssize, KM_SLEEP); 215 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, 243 KASSERT((pgs[i]->flags & PG_RELEASED) == 0); 247 pgs[i]->flags &= ~PG_RDONLY; 249 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY); 251 uvm_pagelock(pgs[i]); 252 uvm_pageactivate(pgs[i]); 253 uvm_pageunlock(pgs[i]) [all...] |
| lfs_pages.c | 245 struct vm_page *pgs[MAXBSIZE / MIN_PAGE_SIZE], *pg; local 273 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off); 289 uvm_page_unbusy(pgs, i); 301 uvm_page_unbusy(pgs, i); 333 pg = pgs[i];
|
| lfs_vfsops.c | 2125 lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, 2191 if ((pgs[0]->offset & lfs_sb_getbmask(fs)) != 0) { 2196 UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx", 2197 (uintptr_t)vp, (uintptr_t)pgs, npages, flags); 2207 pg = pgs[0]; 2222 if (pgs[i]->flags & PG_DELWRI) { 2223 KASSERT(!(pgs[i]->flags & PG_PAGEOUT)); 2224 pgs[i]->flags &= ~PG_DELWRI; 2225 pgs[i]->flags |= PG_PAGEOUT; 2228 uvm_pagelock(pgs[i]) [all...] |
| /src/sys/ufs/ufs/ |
| ufs_inode.c | 207 off_t pagestart; /* starting offset of range covered by pgs */ 214 struct vm_page **pgs; local 236 pgs = kmem_zalloc(pgssize, KM_SLEEP); 248 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, 276 KASSERT((pgs[i]->flags & PG_RELEASED) == 0); 280 pgs[i]->flags &= ~PG_RDONLY; 282 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY); 284 uvm_pagelock(pgs[i]); 285 uvm_pageactivate(pgs[i]); 286 uvm_pageunlock(pgs[i]) [all...] |
| /src/sys/miscfs/genfs/ |
| genfs_io.c | 69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages) 74 struct vm_page *pg = pgs[i]; 83 uvm_page_unbusy(pgs, npages); 310 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES]; local 313 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP); 314 if (pgs == NULL) { 315 pgs = pgs_onstack; 320 pgs = pgs_onstack; 321 (void)memset(pgs, 0, pgs_size); 369 if (pgs != pgs_onstack 875 struct vm_page *pgs[MAXPHYS \/ MIN_PAGE_SIZE]; local 1612 struct vm_page *pg, **pgs; local [all...] |
| genfs_node.h | 53 #define GOP_WRITE(vp, pgs, npages, flags) \ 54 (*VTOG(vp)->g_op->gop_write)((vp), (pgs), (npages), (flags))
|
| /src/sys/external/bsd/drm2/include/drm/ |
| bus_dma_hacks.h | 149 struct page **pgs, bus_size_t size, int flags) 184 page = &pgs[seg]->p_vmp; 212 int nsegs, struct page **pgs, unsigned npgs) 226 pgs[pg++] = container_of(PHYS_TO_VM_PAGE(paddr), 241 int nsegs, int *rsegs, struct page *const *pgs, unsigned npgs) 248 paddr_t paddr = VM_PAGE_TO_PHYS(&pgs[i]->p_vmp);
|
| /src/tests/sys/uvm/ |
| t_uvm_physseg.c | 479 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 local 518 pgs = uvm_physseg_get_pg(upm3, 0); 519 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3)); 524 pgs = uvm_physseg_get_pg(upm4, 0); 525 ATF_REQUIRE(pgs < slab || pgs >= (slab + npages1 780 struct vm_page *slab, *pgs; local 793 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs)); 794 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO) 835 struct vm_page *slab, *pgs; local 873 struct vm_page *slab, *pgs; local 895 uvm_physseg_init_seg(PHYSSEG_NODE_TO_HANDLE(seg), pgs); local 1500 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 1649 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 1706 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 1750 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 1930 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 2035 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local 2209 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local [all...] |
| /src/sys/rump/librump/rumpkern/ |
| vm.c | 563 uvm_pagermapin(struct vm_page **pgs, int npages, int flags) 587 memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE); 588 pgri->pgr_pgs[i] = pgs[i]; 688 uvm_page_unbusy(struct vm_page **pgs, int npgs) 697 pg = pgs[i]; 1350 struct vm_page **pgs; local 1357 pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP); 1361 memset(pgs, 0, npages * sizeof(struct vm_page *)); 1363 pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE 1403 struct vm_page **pgs; local [all...] |
| /src/sys/nfs/ |
| nfs_bio.c | 946 struct vm_page **pgs, *spgs[UBC_MAX_PAGES]; local 958 pgs = spgs; 960 if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) == 977 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); 978 if (pgs[i]->uobject == uobj && 979 pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) { 980 KASSERT(pgs[i]->flags & PG_BUSY); 993 if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT)) 999 if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0 1250 struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES]; local [all...] |
| nfs_node.c | 275 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) 281 pmap_page_protect(pgs[i], VM_PROT_READ); 285 return genfs_gop_write(vp, pgs, npages, flags);
|
| /src/sys/external/bsd/drm2/linux/ |
| linux_sgt.c | 57 __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs, 72 sgt->sgl->sg_pgs[i] = pgs[i]; 78 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs, 82 return __sg_alloc_table_from_pages(sgt, pgs, npgs, offset, size,
|
| /src/sys/kern/ |
| subr_physmap.c | 190 physmap_create_pagelist(struct vm_page **pgs, size_t npgs) 199 paddr_t lastaddr = VM_PAGE_TO_PHYS(pgs[0]); 203 for (pgs++; npgs-- > 1; pgs++) { 208 paddr_t curaddr = VM_PAGE_TO_PHYS(*pgs);
|
| /src/sys/dev/isa/ |
| if_ntwoc_isa.c | 392 int i, pgs, rv; local 532 sca->scu_npages = pgs = i; /* final count of 16K pages */ 536 for (i = 0; i <= pgs; addr += sca->scu_pagesize, i++) { 546 pgs * (sca->scu_pagesize / 1024), sca->sc_numports, 550 device_xname(sc->sc_dev), (u_long)pgs * (sca->scu_pagesize / 1024), 571 if (pgs < 2 * sca->sc_numports) { 574 pgs, 2 * sca->sc_numports);
|
| /src/sys/fs/tmpfs/ |
| tmpfs_vnops.c | 1204 struct vm_page **pgs = ap->a_m; local 1269 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, centeridx, 1272 if (!error && pgs) { 1273 KASSERT(pgs[centeridx] != NULL);
|
| /src/sys/fs/puffs/ |
| puffs_vnops.c | 2993 struct vm_page **pgs; local 3003 pgs = ap->a_m; 3077 if (pgs[i] == NULL || pgs[i] == PGO_DONTCARE) { 3081 = trunc_page(pgs[i]->offset) + PAGE_MASK; 3088 pcrun[si].pcache_runstart = pgs[i]->offset; 3092 pgs[i]->flags |= PG_RDONLY; 3097 = trunc_page(pgs[i-1]->offset) + PAGE_MASK;
|
| /src/sys/arch/x86/x86/ |
| bus_dma.c | 536 const struct vm_page * const *pgs; local 569 pgs = (const struct vm_page * const *) 581 pg = *pgs++;
|
| /src/sys/arch/arm/arm32/ |
| bus_dma.c | 635 const struct vm_page * const *pgs; local 668 pgs = (const struct vm_page * const *) 679 pg = *pgs++;
|