HomeSort by: relevance | last modified time | path
    Searched refs:pg (Results 1 - 25 of 406) sorted by relevancy

1 2 3 4 5 6 7 8 91011>>

  /src/sys/uvm/
uvm_page_status.c 60 uvm_pagegetdirty(struct vm_page *pg)
62 struct uvm_object * const uobj __diagused = pg->uobject;
64 KASSERT((~pg->flags & (PG_CLEAN|PG_DIRTY)) != 0);
65 KASSERT(uvm_page_owner_locked_p(pg, false));
66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
67 uvm_obj_page_dirty_p(pg));
68 return pg->flags & (PG_CLEAN|PG_DIRTY);
84 uvm_pagemarkdirty(struct vm_page *pg, unsigned int newstatus)
86 struct uvm_object * const uobj = pg->uobject;
87 const unsigned int oldstatus = uvm_pagegetdirty(pg);
    [all...]
uvm_pdpolicy_clockpro.c 147 clockpro_setq(struct vm_page *pg, int qidx)
152 pg->pqflags = (pg->pqflags & ~PQ_QMASK) | (qidx * PQ_QFACTOR);
156 clockpro_getq(struct vm_page *pg)
160 qidx = (pg->pqflags & PQ_QMASK) / PQ_QFACTOR;
236 pageq_insert_tail(pageq_t *q, struct vm_page *pg)
239 TAILQ_INSERT_TAIL(&q->q_q, pg, pdqueue);
245 pageq_insert_head(pageq_t *q, struct vm_page *pg)
248 TAILQ_INSERT_HEAD(&q->q_q, pg, pdqueue);
254 pageq_remove(pageq_t *q, struct vm_page *pg)
268 struct vm_page *pg; local
700 struct vm_page *pg; local
882 pageq_remove(clockpro_queue(s, qidx), pg); local
909 struct vm_page *pg; local
1018 struct vm_page *pg; local
1340 struct vm_page *pg; local
1381 struct vm_page *pg; local
1507 struct vm_page *pg; local
1606 struct vm_page *pg; local
    [all...]
uvm_page.c 202 * => call should have already set pg's object and offset pointers
207 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
210 KASSERT(uobj == pg->uobject);
212 KASSERT((pg->flags & PG_TABLED) == 0);
214 if ((pg->flags & PG_STAT) != 0) {
216 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
218 if ((pg->flags & PG_FILE) != 0) {
235 pg->flags |= PG_TABLED;
240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
242 const uint64_t idx = pg->offset >> PAGE_SHIFT
737 struct vm_page *pg; local
1008 struct vm_page *pg; local
1089 struct vm_page *pg; local
1137 struct vm_page *pg; local
1583 struct vm_page *pg; local
1737 struct vm_page *pg; local
2078 struct vm_page *pg; local
2203 struct vm_page *pg; local
    [all...]
uvm_anon.c 107 struct vm_page *pg = anon->an_page, *pg2 __diagused; local
119 if (__predict_true(pg != NULL)) {
128 if (__predict_false(pg->loan_count != 0)) {
130 KASSERT(pg2 == pg);
139 if (__predict_false(pg->uobject != NULL)) {
140 mutex_enter(&pg->interlock);
141 KASSERT(pg->loan_count > 0);
142 pg->loan_count--;
143 pg->uanon = NULL;
144 mutex_exit(&pg->interlock)
212 struct vm_page *pg; local
277 struct vm_page *pg; local
362 struct vm_page *pg = anon->an_page; local
    [all...]
uvm_object.c 135 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL; local
165 pg = uvm_loanbreak(pgs[i]);
166 if (!pg) {
173 pgs[i] = pg;
219 struct vm_page *pg; local
224 pg = uvm_pagelookup(uobj, offset);
226 KASSERT(pg != NULL);
227 KASSERT(!(pg->flags & PG_RELEASED));
229 uvm_pagelock(pg);
230 uvm_pageunwire(pg);
343 struct vm_page *pg; local
    [all...]
uvm_loan.c 61 * loans are tracked by pg->loan_count. an O->A page will have both
65 * each loan of a page to the kernel bumps the pg->wire_count. the
80 * locking: to read pg->loan_count either the owner or pg->interlock
81 * must be locked. to modify pg->loan_count, both the owner of the page
82 * and pg->interlock must be locked. pg->flags is (as always) locked by
88 * from dying pg->interlock should be locked. this forces us to sometimes
346 struct vm_page *pg; local
359 pg = anon->an_page
457 struct vm_page *pg = pgpp[i]; local
602 struct vm_page *pg; local
824 struct vm_page *pg; local
952 struct vm_page *pg; local
1035 struct vm_page *pg; local
1098 struct vm_page *pg; local
    [all...]
uvm_pdpolicy_clock.c 248 struct vm_page *pg; local
256 pg = TAILQ_NEXT(&ss->ss_marker, pdqueue);
257 if (pg == NULL) {
260 KASSERT((pg->flags & PG_MARKER) == 0);
268 mutex_enter(&pg->interlock);
269 if (uvmpdpol_pagerealize_locked(pg)) {
270 mutex_exit(&pg->interlock);
279 TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg,
289 anon = pg->uanon;
290 uobj = pg->uobject
729 struct vm_page *pg; local
796 struct vm_page *pg; local
    [all...]
  /src/regress/sys/uvm/pdsim/
pdsim.c 61 struct vm_page *pg; local
63 pg = TAILQ_FIRST(&freeq);
64 if (pg == NULL) {
67 TAILQ_REMOVE(&freeq, pg, pageq);
68 pg->offset = idx << PAGE_SHIFT;
69 pg->uanon = NULL;
70 pg->uobject = obj;
71 pg->pqflags = 0;
72 obj->pages[idx] = pg;
76 return pg;
112 struct vm_page *pg; local
146 struct vm_page *pg; local
165 struct vm_page *pg; local
182 struct vm_page *pg; local
    [all...]
  /src/external/cddl/dtracetoolkit/dist/Bin/
pgpginbyproc.d 10 vminfo:::pgpgin { @pg[execname] = sum(arg0); }
pgpginbypid.d 46 @pg[pid, execname] = sum(arg0);
52 printa("%6d %-16s %@16d\n", @pg);
  /src/external/cddl/dtracetoolkit/dist/Mem/
pgpginbyproc.d 10 vminfo:::pgpgin { @pg[execname] = sum(arg0); }
pgpginbypid.d 46 @pg[pid, execname] = sum(arg0);
52 printa("%6d %-16s %@16d\n", @pg);
  /src/sys/arch/dreamcast/include/
kloader.h 40 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/evbsh3/include/
kloader.h 38 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/hpcarm/include/
kloader.h 39 #define PG_VADDR(pg) kloader_phystov(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/hpcmips/include/
kloader.h 39 #define PG_VADDR(pg) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/hpcsh/include/
kloader.h 40 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/landisk/include/
kloader.h 38 #define PG_VADDR(pg) SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/playstation2/include/
kloader.h 39 #define PG_VADDR(pg) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg))
  /src/sys/arch/zaurus/include/
kloader.h 38 #define PG_VADDR(pg) kloader_phystov(VM_PAGE_TO_PHYS(pg))
  /src/sys/compat/common/
tty_60.c 55 ptmget_to_ptmget60(struct ptmget *pg, struct compat_60_ptmget *pg60)
58 pg60->cfd = pg->cfd;
59 pg60->sfd = pg->sfd;
60 strlcpy(pg60->cn, pg->cn, sizeof(pg60->cn));
61 strlcpy(pg60->sn, pg->sn, sizeof(pg60->sn));
62 if (strlen(pg->cn) >= sizeof(pg60->cn)
63 || strlen(pg->sn) >= sizeof(pg60->sn))
75 struct ptmget *pg; local
87 pg = kmem_alloc(sizeof(*pg), KM_SLEEP)
    [all...]
  /src/sys/rump/librump/rumpvfs/
vm_vfs.c 43 struct vm_page *pg; local
48 pg = pgs[i];
49 KASSERT((pg->flags & PG_PAGEOUT) == 0 ||
50 (pg->flags & PG_FAKE) == 0);
52 if (pg->flags & PG_FAKE) {
54 pg->flags &= ~PG_FAKE;
55 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
56 uvm_pagelock(pg);
57 uvm_pageenqueue(pg);
58 uvm_pageunlock(pg);
    [all...]
  /src/sys/arch/xen/x86/
xen_bus_dma.c 83 struct vm_page *pg, *pgnext; local
104 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
105 pa = VM_PAGE_TO_PHYS(pg);
140 pg = NULL;
145 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++)
    [all...]
  /src/sys/arch/m68k/include/
pmap_68k.h 350 #define VM_MDPAGE_INIT(pg) \
352 (pg)->mdpage.pvh_listx = 0; \
355 #define VM_MDPAGE_PVS(pg) \
356 ((struct pv_entry *)((pg)->mdpage.pvh_listx & (uintptr_t)PVH_PV_MASK))
358 #define VM_MDPAGE_HEAD_PVP(pg) \
359 ((struct pv_entry **)&(pg)->mdpage.pvh_listx)
375 #define VM_MDPAGE_UM(pg) \
376 (((pg)->mdpage.pvh_listx & PVH_UM_MASK) << PVH_UM_SHIFT)
378 #define VM_MDPAGE_ADD_UM(pg, a) \
380 (pg)->mdpage.pvh_listx |=
    [all...]
  /src/sys/rump/librump/rumpkern/
vm.c 37 * XXX: we abuse pg->uanon for the virtual address of the storage
148 struct vm_page *pg = obj; local
150 memset(pg, 0, sizeof(*pg));
151 pg->uanon = rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
153 return pg->uanon == NULL;
159 struct vm_page *pg = obj; local
161 rump_hyperfree(pg->uanon, PAGE_SIZE);
177 struct vm_page *pg; local
182 pg = pool_cache_get(&pagecache, PR_NOWAIT)
643 struct vm_page *pg = NULL; local
673 struct vm_page *pg; local
690 struct vm_page *pg; local
1175 struct vm_page *pg; local
1369 struct vm_page *pg; local
1433 struct vm_page *pg; local
    [all...]

Completed in 59 milliseconds

1 2 3 4 5 6 7 8 91011>>