HomeSort by: relevance | last modified time | path
    Searched refs:pgs (Results 1 - 25 of 48) sorted by relevancy

1 2

  /src/sys/rump/librump/rumpvfs/
vm_vfs.c 40 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
42 struct uvm_object *uobj = pgs[0]->uobject;
48 pg = pgs[i];
62 uvm_page_unbusy(pgs, npages);
74 struct vm_page **pgs; local
83 pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP);
86 pgs[i] = uvm_pageratop(va);
89 uobj = pgs[i]->uobject;
92 KASSERT(uobj == pgs[i]->uobject)
    [all...]
  /src/external/bsd/jemalloc/dist/test/unit/
sc.c 12 expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
20 expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
23 sc->pgs, 1, "Allowed invalid page size hint");
  /src/external/bsd/jemalloc.old/dist/test/unit/
sc.c 12 expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
20 expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
22 expect_d_gt(sc->pgs, 1,
  /src/sys/uvm/
uvm_object.c 135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; local
146 memset(pgs, 0, sizeof(pgs));
147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
157 KASSERT(pgs[i] != NULL);
158 KASSERT(!(pgs[i]->flags & PG_RELEASED));
163 if (pgs[i]->loan_count) {
164 while (pgs[i]->loan_count) {
165 pg = uvm_loanbreak(pgs[i]);
173 pgs[i] = pg
    [all...]
uvm_bio.c 314 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local
378 memset(pgs, 0, sizeof (pgs));
386 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
428 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i, (uintptr_t)pgs[i],
430 pg = pgs[i];
483 int flags, struct vm_page **pgs, int *npagesp)
590 memset(pgs, 0, *npagesp * sizeof(pgs[0]))
744 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local
809 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local
986 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local
1053 struct vm_page *pgs[howmany(ubc_winsize, MIN_PAGE_SIZE)]; local
    [all...]
uvm_physseg.c 96 struct vm_page *pgs; /* vm_page structures (from start) */ member in struct:uvm_physseg
101 struct extent *ext; /* extent(9) structure to manage pgs[] */
118 /* returns a pgs array */
238 struct vm_page *slab = NULL, *pgs = NULL; local
287 if (current_ps->pgs != NULL) {
290 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
292 if (pgs != NULL) {
304 if (pgs == NULL) { /* Brand new */
307 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
319 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages)
748 struct vm_page *pgs; local
1194 struct vm_page *pgs = NULL; local
    [all...]
uvm_pager.c 342 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
357 pg = pgs[0];
379 pg = pgs[i];
494 uvm_page_unbusy(pgs, npages);
532 struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)]; local
539 KASSERT(npages <= __arraycount(pgs));
545 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
546 UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i,
547 (uintptr_t)pgs[i], 0, 0);
552 ((pgs[0]->flags & PG_SWAPBACKED) != 0))
    [all...]
  /src/sys/ufs/lfs/
ulfs_inode.c 174 off_t pagestart; /* starting offset of range covered by pgs */
181 struct vm_page **pgs; local
203 pgs = kmem_zalloc(pgssize, KM_SLEEP);
215 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
243 KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
247 pgs[i]->flags &= ~PG_RDONLY;
249 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY);
251 uvm_pagelock(pgs[i]);
252 uvm_pageactivate(pgs[i]);
253 uvm_pageunlock(pgs[i])
    [all...]
  /src/sys/ufs/ufs/
ufs_inode.c 207 off_t pagestart; /* starting offset of range covered by pgs */
214 struct vm_page **pgs; local
236 pgs = kmem_zalloc(pgssize, KM_SLEEP);
248 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
276 KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
280 pgs[i]->flags &= ~PG_RDONLY;
282 uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY);
284 uvm_pagelock(pgs[i]);
285 uvm_pageactivate(pgs[i]);
286 uvm_pageunlock(pgs[i])
    [all...]
  /src/sys/miscfs/genfs/
genfs_io.c 69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
74 struct vm_page *pg = pgs[i];
83 uvm_page_unbusy(pgs, npages);
310 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES]; local
313 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
314 if (pgs == NULL) {
315 pgs = pgs_onstack;
320 pgs = pgs_onstack;
321 (void)memset(pgs, 0, pgs_size);
369 if (pgs != pgs_onstack
875 struct vm_page *pgs[MAXPHYS \/ MIN_PAGE_SIZE]; local
1612 struct vm_page *pg, **pgs; local
    [all...]
genfs_node.h 53 #define GOP_WRITE(vp, pgs, npages, flags) \
54 (*VTOG(vp)->g_op->gop_write)((vp), (pgs), (npages), (flags))
  /src/external/bsd/jemalloc/dist/src/
sc.c 72 sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
75 sc->pgs = 0;
279 sc->pgs = (int)min_pgs;
281 sc->pgs = (int)max_pgs;
283 sc->pgs = (int)pgs_guess;
288 sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
298 sc_data_update_sc_slab_size(sc, reg_size, pgs);
bin_info.c 16 bin_info->slab_size = (sc->pgs << LG_PAGE);
  /src/external/bsd/jemalloc.old/dist/src/
sc.c 72 sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
75 sc->pgs = 0;
279 sc->pgs = (int)min_pgs;
281 sc->pgs = (int)max_pgs;
283 sc->pgs = (int)pgs_guess;
288 sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
298 sc_data_update_sc_slab_size(sc, reg_size, pgs);
bin_info.c 16 bin_info->slab_size = (sc->pgs << LG_PAGE);
  /src/external/bsd/jemalloc/dist/include/jemalloc/internal/
sc.h 329 int pgs;
369 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
373 sc_data_t *data, size_t begin, size_t end, int pgs);
310 int pgs; member in struct:sc_s
  /src/external/bsd/jemalloc/include/jemalloc/internal/
sc.h 329 int pgs;
369 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
373 sc_data_t *data, size_t begin, size_t end, int pgs);
310 int pgs; member in struct:sc_s
  /src/external/bsd/jemalloc.old/dist/include/jemalloc/internal/
sc.h 310 int pgs; member in struct:sc_s
350 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
354 int pgs);
  /src/external/bsd/jemalloc.old/include/jemalloc/internal/
sc.h 310 int pgs; member in struct:sc_s
350 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
354 int pgs);
  /src/sys/external/bsd/drm2/include/drm/
bus_dma_hacks.h 149 struct page **pgs, bus_size_t size, int flags)
184 page = &pgs[seg]->p_vmp;
212 int nsegs, struct page **pgs, unsigned npgs)
226 pgs[pg++] = container_of(PHYS_TO_VM_PAGE(paddr),
241 int nsegs, int *rsegs, struct page *const *pgs, unsigned npgs)
248 paddr_t paddr = VM_PAGE_TO_PHYS(&pgs[i]->p_vmp);
  /src/tests/sys/uvm/
t_uvm_physseg.c 479 struct vm_page *pgs, *slab = malloc(sizeof(struct vm_page) * (npages1 local
518 pgs = uvm_physseg_get_pg(upm3, 0);
519 ATF_REQUIRE(pgs > slab && pgs < (slab + npages1 + npages2 + npages3));
524 pgs = uvm_physseg_get_pg(upm4, 0);
525 ATF_REQUIRE(pgs < slab || pgs >= (slab + npages1
780 struct vm_page *slab, *pgs; local
793 ATF_REQUIRE_EQ(0, extent_alloc(seg->ext, sizeof(*slab), 1, 0, EX_BOUNDZERO, (void *)&pgs));
794 err = extent_free(seg->ext, (u_long) pgs, sizeof(*slab), EX_BOUNDZERO)
835 struct vm_page *slab, *pgs; local
873 struct vm_page *slab, *pgs; local
895 uvm_physseg_init_seg(PHYSSEG_NODE_TO_HANDLE(seg), pgs); local
1500 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
1649 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
1706 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
1750 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
1930 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
2035 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
2209 struct vm_page *pgs = malloc(sizeof(struct vm_page) * npages); local
    [all...]
  /src/sys/rump/librump/rumpkern/
vm.c 563 uvm_pagermapin(struct vm_page **pgs, int npages, int flags)
587 memcpy((void*)curkva, pgs[i]->uanon, PAGE_SIZE);
588 pgri->pgr_pgs[i] = pgs[i];
688 uvm_page_unbusy(struct vm_page **pgs, int npgs)
697 pg = pgs[i];
1350 struct vm_page **pgs; local
1357 pgs = kmem_alloc(maxpages * sizeof(pgs), KM_SLEEP);
1361 memset(pgs, 0, npages * sizeof(struct vm_page *));
1363 pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE
1403 struct vm_page **pgs; local
    [all...]
  /src/sys/nfs/
nfs_bio.c 946 struct vm_page **pgs, *spgs[UBC_MAX_PAGES]; local
958 pgs = spgs;
960 if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
977 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
978 if (pgs[i]->uobject == uobj &&
979 pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
980 KASSERT(pgs[i]->flags & PG_BUSY);
993 if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
999 if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0
1250 struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES]; local
    [all...]
  /src/sys/external/bsd/drm2/linux/
linux_sgt.c 57 __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs,
72 sgt->sgl->sg_pgs[i] = pgs[i];
78 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs,
82 return __sg_alloc_table_from_pages(sgt, pgs, npgs, offset, size,
  /src/sys/kern/
subr_physmap.c 190 physmap_create_pagelist(struct vm_page **pgs, size_t npgs)
199 paddr_t lastaddr = VM_PAGE_TO_PHYS(pgs[0]);
203 for (pgs++; npgs-- > 1; pgs++) {
208 paddr_t curaddr = VM_PAGE_TO_PHYS(*pgs);

Completed in 76 milliseconds

1 2