Lines Matching defs:pgs
69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
74 struct vm_page *pg = pgs[i];
83 uvm_page_unbusy(pgs, npages);
310 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
313 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
314 if (pgs == NULL) {
315 pgs = pgs_onstack;
320 pgs = pgs_onstack;
321 (void)memset(pgs, 0, pgs_size);
369 if (pgs != pgs_onstack)
370 kmem_free(pgs, pgs_size);
374 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
380 genfs_rel_pages(&pgs[ridx], orignmempages);
397 struct vm_page *pg = pgs[ridx + i];
426 struct vm_page *pg = pgs[ridx + i];
458 genfs_rel_pages(&pgs[ridx], orignmempages);
459 memset(pgs, 0, pgs_size);
464 if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
470 genfs_rel_pages(pgs, npages);
478 error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
495 genfs_rel_pages(pgs, npages);
505 struct vm_page *pg = pgs[i];
552 memcpy(ap->a_m, &pgs[ridx],
557 if (pgs != NULL && pgs != pgs_onstack)
558 kmem_free(pgs, pgs_size);
576 genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
604 kva = uvm_pagermapin(pgs, npages,
637 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
665 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
669 if ((pgs[pidx]->flags & PG_RDONLY)) {
713 pgs[pidx + pcount]->flags & PG_FAKE) {
739 pgs[pidx + i]->flags |= PG_RDONLY;
802 struct vm_page *pg = pgs[i];
875 struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
1146 memset(pgs, 0, sizeof(pgs));
1153 * it fits in the "pgs" pages array.
1176 &pgs[0], NULL,
1179 memmove(&pgs[0], &pgs[npages - nback],
1180 nback * sizeof(pgs[0]));
1182 memset(&pgs[nback], 0,
1183 (npages - nback) * sizeof(pgs[0]));
1185 memset(&pgs[npages - nback], 0,
1186 nback * sizeof(pgs[0]));
1193 pgs[nback] = pg;
1218 &pgs[nback + 1], &a,
1222 pgs[0] = pg;
1232 tpg = pgs[i];
1235 pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
1236 KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
1243 uvm_obj_page_set_writeback(pgs[i]);
1285 KASSERT(pgs[nback] == pg);
1286 KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
1292 error = GOP_WRITE(vp, pgs, npages, flags);
1374 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1382 UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
1383 (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
1385 off = pgs[0]->offset;
1386 kva = uvm_pagermapin(pgs, npages,
1405 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
1414 UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
1415 (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
1417 off = pgs[0]->offset;
1418 kva = uvm_pagermapin(pgs, npages,
1612 struct vm_page *pg, **pgs;
1623 pgs = ap->a_m;
1641 uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
1643 kva = uvm_pagermapin(pgs, npages,
1646 pg = pgs[i];
1670 pg = pgs[i];
1681 uvm_page_unbusy(pgs, npages);
1688 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1699 offset = pgs[0]->offset;
1700 kva = uvm_pagermapin(pgs, npages,