HomeSort by: relevance | last modified time | path
    Searched defs:pgs (Results 1 - 18 of 18) sorted by relevancy

  /src/sys/rump/librump/rumpvfs/
vm_vfs.c 40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
42 struct uvm_object *uobj = pgs[0]->uobject;
48 pg = pgs[i];
62 uvm_page_unbusy(pgs, npages);
74 struct vm_page **pgs; local in function:uvm_aio_aiodone
83 pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
86 pgs[i] = uvm_pageratop(va);
89 uobj = pgs[i]->uobject;
92 KASSERT(uobj == pgs[i]->uobject)
    [all...]
  /src/sys/ufs/lfs/
ulfs_inode.c 174 off_t pagestart; /* starting offset of range covered by pgs */
181 struct vm_page **pgs; local in function:ulfs_balloc_range
203 pgs = kmem_zalloc(pgssize, KM_SLEEP);
215 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
243 KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
247 pgs[i]->flags &= ~PG_RDONLY;
249 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY);
251 uvm_pagelock(pgs[i]);
252 uvm_pageactivate(pgs[i]);
253 uvm_pageunlock(pgs[i])
    [all...]
lfs_pages.c 245 struct vm_page *pgs[MAXBSIZE / MIN_PAGE_SIZE], *pg; local in function:check_dirty
273 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
289 uvm_page_unbusy(pgs, i);
301 uvm_page_unbusy(pgs, i);
333 pg = pgs[i];
  /src/sys/ufs/ufs/
ufs_inode.c 205 off_t pagestart; /* starting offset of range covered by pgs */
212 struct vm_page **pgs; local in function:ufs_balloc_range
234 pgs = kmem_zalloc(pgssize, KM_SLEEP);
246 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
274 KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
278 pgs[i]->flags &= ~PG_RDONLY;
280 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY);
282 uvm_pagelock(pgs[i]);
283 uvm_pageactivate(pgs[i]);
284 uvm_pageunlock(pgs[i])
    [all...]
  /src/sys/uvm/
uvm_object.c 135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; local in function:uvm_obj_wirepages
146 memset(pgs, 0, sizeof(pgs));
147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
157 KASSERT(pgs[i] != NULL);
158 KASSERT(!(pgs[i]->flags & PG_RELEASED));
163 if (pgs[i]->loan_count) {
164 while (pgs[i]->loan_count) {
165 pg = uvm_loanbreak(pgs[i]);
173 pgs[i] = pg
    [all...]
uvm_pager.c 327 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
342 pg = pgs[0];
370 pg = pgs[i];
478 uvm_page_unbusy(pgs, npages);
507 struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)]; local in function:uvm_aio_aiodone
514 KASSERT(npages <= __arraycount(pgs));
520 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
521 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i,
522 (uintptr_t)pgs[i], 0, 0);
526 uvm_aio_aiodone_pages(pgs, npages, write, error)
    [all...]
uvm_bio.c 314 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local in function:ubc_fault
378 memset(pgs, 0, sizeof (pgs));
386 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
428 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
430 pg = pgs[i];
483 int flags, struct vm_page **pgs, int *npagesp)
590 memset(pgs, 0, *npagesp * sizeof(pgs[0]))
744 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local in function:ubc_uiomove
809 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local in function:ubc_zerorange
986 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local in function:ubc_uiomove_direct
1053 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local in function:ubc_zerorange_direct
    [all...]
uvm_physseg.c 96 struct vm_page *pgs; /* vm_page structures (from start) */ member in struct:uvm_physseg
101 struct extent *ext; /* extent(9) structure to manage pgs[] */
118 /* returns a pgs array */
238 struct vm_page *slab = NULL, *pgs = NULL;
287 if (current_ps->pgs != NULL) {
290 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
292 if (pgs != NULL) {
304 if (pgs == NULL) { /* Brand new */
307 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
319 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages)
748 struct vm_page *pgs; local in function:uvm_physseg_plug
1194 struct vm_page *pgs = NULL; local in function:uvm_physseg_seg_alloc_from_slab
    [all...]
  /src/sys/dev/isa/
if_ntwoc_isa.c 392 int i, pgs, rv; local in function:ntwoc_isa_attach
532 sca->scu_npages = pgs = i; /* final count of 16K pages */
536 for (i = 0; i <= pgs; addr += sca->scu_pagesize, i++) {
546 pgs * (sca->scu_pagesize / 1024), sca->sc_numports,
550 device_xname(sc->sc_dev), (u_long)pgs * (sca->scu_pagesize / 1024),
571 if (pgs < 2 * sca->sc_numports) {
574 pgs, 2 * sca->sc_numports);
  /src/sys/fs/tmpfs/
tmpfs_vnops.c 1204 struct vm_page **pgs = ap->a_m; local in function:tmpfs_getpages
1269 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, centeridx,
1272 if (!error && pgs) {
1273 KASSERT(pgs[centeridx] != NULL);
  /src/sys/nfs/
nfs_bio.c 946 struct vm_page **pgs, *spgs[UBC_MAX_PAGES]; local in function:nfs_doio_write
958 pgs = spgs;
960 if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
977 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
978 if (pgs[i]->uobject == uobj &&
979 pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
980 KASSERT(pgs[i]->flags & PG_BUSY);
993 if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
999 if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0
1250 struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES]; local in function:nfs_getpages
    [all...]
  /src/tests/sys/uvm/
t_uvm_physseg.c 479 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 local in function:ATF_TC_BODY
518 pgs = uvm_physseg_get_pg(upm3, 0);
519 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3));
524 pgs = uvm_physseg_get_pg(upm4, 0);
525 ATF_REQUIRE(pgs < slab || pgs >= (slab + npages1
780 struct vm_page *slab, *pgs; local in function:ATF_TC_BODY
793 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
794 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO)
835 struct vm_page *slab, *pgs; local in function:ATF_TC_BODY
873 struct vm_page *slab, *pgs; local in function:ATF_TC_BODY
1500 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
1649 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
1706 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
1750 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
1930 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
2035 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
2209 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local in function:ATF_TC_BODY
    [all...]
  /src/sys/arch/x86/x86/
bus_dma.c 536 const struct vm_page * const *pgs; local in function:_bus_dmamap_load_mbuf
569 pgs = (const struct vm_page * const *)
581 pg = *pgs++;
  /src/sys/rump/librump/rumpkern/
vm.c 563 uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
587 memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
588 pgri->pgr_pgs[i] = pgs[i];
688 uvm_page_unbusy(struct vm_page **pgs, int npgs)
697 pg = pgs[i];
1350 struct vm_page **pgs; local in function:ubc_zerorange
1357 pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
1361 memset(pgs, 0, npages * sizeof(struct vm_page *));
1363 pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE
1403 struct vm_page **pgs; local in function:ubc_uiomove
    [all...]
  /src/sys/miscfs/genfs/
genfs_io.c 69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
74 struct vm_page *pg = pgs[i];
83 uvm_page_unbusy(pgs, npages);
310 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES]; local in function:genfs_getpages
313 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
314 if (pgs == NULL) {
315 pgs = pgs_onstack;
320 pgs = pgs_onstack;
321 (void)memset(pgs, 0, pgs_size);
369 if (pgs != pgs_onstack
875 struct vm_page *pgs[MAXPHYS \/ MIN_PAGE_SIZE]; local in function:genfs_do_putpages
1612 struct vm_page *pg, **pgs; local in function:genfs_compat_getpages
    [all...]
  /src/sys/arch/riscv/riscv/
bus_dma.c 629 const struct vm_page * const *pgs; local in function:_bus_dmamap_load_mbuf
662 pgs = (const struct vm_page * const *)
673 pg = *pgs++;
  /src/sys/arch/arm/arm32/
bus_dma.c 635 const struct vm_page * const *pgs; local in function:_bus_dmamap_load_mbuf
668 pgs = (const struct vm_page * const *)
679 pg = *pgs++;
  /src/sys/fs/puffs/
puffs_vnops.c 2993 struct vm_page **pgs; local in function:puffs_vnop_getpages
3003 pgs = ap->a_m;
3077 if (pgs[i] == NULL || pgs[i] == PGO_DONTCARE) {
3081 = trunc_page(pgs[i]->offset) + PAGE_MASK;
3088 pcrun[si].pcache_runstart = pgs[i]->offset;
3092 pgs[i]->flags |= PG_RDONLY;
3097 = trunc_page(pgs[i-1]->offset) + PAGE_MASK;

Completed in 33 milliseconds