HomeSort by: relevance | last modified time | path
    Searched defs:pg (Results 1 - 25 of 123) sorted by relevancy

1 2 3 4 5

  /src/lib/libc/db/recno/
rec_search.c 73 pgno_t pg; local in function:__rec_search
79 for (pg = P_ROOT, total = 0;;) {
80 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
94 BT_PUSH(t, pg, idx - 1);
96 pg = r->pgno;
rec_put.c 199 pgno_t pg; local in function:__rec_iput
211 if (__ovfl_put(t, data, &pg) == RET_ERROR)
215 memcpy(db, &pg, sizeof(pg));
  /src/sys/compat/common/
tty_60.c 55 ptmget_to_ptmget60(struct ptmget *pg, struct compat_60_ptmget *pg60)
58 pg60->cfd = pg->cfd;
59 pg60->sfd = pg->sfd;
60 strlcpy(pg60->cn, pg->cn, sizeof(pg60->cn));
61 strlcpy(pg60->sn, pg->sn, sizeof(pg60->sn));
62 if (strlen(pg->cn) >= sizeof(pg60->cn)
63 || strlen(pg->sn) >= sizeof(pg60->sn))
75 struct ptmget *pg; local in function:compat_60_ptmget_ioctl
87 pg = kmem_alloc(sizeof(*pg), KM_SLEEP)
    [all...]
  /src/sys/rump/librump/rumpvfs/
vm_vfs.c 43 struct vm_page *pg; local in function:uvm_aio_aiodone_pages
48 pg = pgs[i];
49 KASSERT((pg->flags & PG_PAGEOUT) == 0 ||
50 (pg->flags & PG_FAKE) == 0);
52 if (pg->flags & PG_FAKE) {
54 pg->flags &= ~PG_FAKE;
55 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
56 uvm_pagelock(pg);
57 uvm_pageenqueue(pg);
58 uvm_pageunlock(pg);
    [all...]
  /src/lib/libc/db/btree/
bt_overflow.c 86 pgno_t pg; local in function:__ovfl_get
90 memmove(&pg, p, sizeof(pg));
95 if (pg == P_INVALID || sz == 0)
114 for (p = *buf;; p = (char *)p + nb, pg = h->nextpg) {
115 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
140 __ovfl_put(BTREE *t, const DBT *dbt, pgno_t *pg)
176 *pg = h->pgno;
200 pgno_t pg; local in function:__ovfl_delete
204 (void)memmove(&pg, p, sizeof(pg))
    [all...]
bt_search.c 73 pgno_t pg; local in function:__bt_search
77 for (pg = P_ROOT;;) {
78 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
132 pg = GETBINTERNAL(h, idx)->pgno;
bt_seq.c 159 pgno_t pg; local in function:__bt_seqset
183 for (pg = P_ROOT;;) {
184 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
195 pg = GETBINTERNAL(h, 0)->pgno;
207 for (pg = P_ROOT;;) {
208 if ((h = mpool_get(t->bt_mp, pg, 0)) == NULL)
219 pg = GETBINTERNAL(h, NEXTINDEX(h) - 1)->pgno;
251 pgno_t pg; local in function:__bt_seqadv
296 c->pg.pgno = ep->page->pgno;
297 c->pg.index = ep->index
380 pgno_t pg; local in function:__bt_rseq_next
419 pgno_t pg; local in function:__bt_rseq_prev
468 pgno_t pg; local in function:__bt_first
    [all...]
bt_put.c 77 pgno_t pg; local in function:__bt_put
126 storekey: if (__ovfl_put(t, key, &pg) == RET_ERROR)
130 memmove(kb, &pg, sizeof(pg));
137 if (__ovfl_put(t, data, &pg) == RET_ERROR)
141 memmove(db, &pg, sizeof(pg));
155 if ((h = mpool_get(t->bt_mp, t->bt_cursor.pg.pgno, 0)) == NULL)
157 idx = t->bt_cursor.pg.index;
224 t->bt_cursor.pg.pgno == h->pgno && t->bt_cursor.pg.index >= idx
    [all...]
bt_delete.c 99 if ((h = mpool_get(t->bt_mp, c->pg.pgno, 0)) == NULL)
110 status = __bt_dleaf(t, NULL, h, (u_int)c->pg.index);
166 if (h->pgno == c->pg.pgno)
175 while (h->pgno != c->pg.pgno) {
215 if (h->pgno == c->pg.pgno)
230 while (h->pgno != c->pg.pgno) {
273 return ((*hp = mpool_get(t->bt_mp, c->pg.pgno, 0)) == NULL);
376 PAGE *pg; local in function:__bt_pdelete
396 if ((pg = mpool_get(t->bt_mp, parent->pgno, 0)) == NULL)
400 bi = GETBINTERNAL(pg, idx)
533 PAGE *pg; local in function:__bt_curdel
626 PAGE *pg; local in function:__bt_relink
    [all...]
  /src/sys/external/bsd/drm2/linux/
linux_io_mapping.c 118 bus_size_t pg, npgs = size >> PAGE_SHIFT; local in function:io_mapping_map_wc
130 for (pg = 0; pg < npgs; pg++) {
132 offset + pg*PAGE_SIZE,
137 pmap_kenter_pa(va + pg*PAGE_SIZE, pmap_phys_address(cookie),
  /src/sys/uvm/
uvm_anon.c 107 struct vm_page *pg = anon->an_page, *pg2 __diagused; local in function:uvm_anfree
119 if (__predict_true(pg != NULL)) {
128 if (__predict_false(pg->loan_count != 0)) {
130 KASSERT(pg2 == pg);
139 if (__predict_false(pg->uobject != NULL)) {
140 mutex_enter(&pg->interlock);
141 KASSERT(pg->loan_count > 0);
142 pg->loan_count--;
143 pg->uanon = NULL;
144 mutex_exit(&pg->interlock)
212 struct vm_page *pg; local in function:uvm_anon_lockloanpg
277 struct vm_page *pg; local in function:uvm_anon_pagein
362 struct vm_page *pg = anon->an_page; local in function:uvm_anon_release
    [all...]
uvm_page_array.c 197 struct vm_page * const pg = ar->ar_pages[i]; local in function:uvm_page_array_fill
199 KASSERT(pg != NULL);
200 KDASSERT(pg->uobject == uobj);
202 KDASSERT(pg->offset <= off);
204 pg->offset < ar->ar_pages[i - 1]->offset);
206 KDASSERT(pg->offset >= off);
208 pg->offset > ar->ar_pages[i - 1]->offset);
  /src/sys/arch/vax/vax/
multicpu.c 84 struct vm_page *pg; local in function:cpu_slavesetup
96 pg = uvm_pagealloc(NULL, 0, NULL, 0);
97 if (pg == NULL)
100 istackbase = VM_PAGE_TO_PHYS(pg) | KERNBASE;
  /src/sys/arch/x86/x86/
vga_post.c 142 struct vm_page *pg; local in function:vga_post_init
183 TAILQ_FOREACH(pg, &sc->ram_backing, pageq.queue) {
184 pmap_kenter_pa(sc->sys_image + iter, VM_PAGE_TO_PHYS(pg),
  /src/sys/dev/dtv/
dtv_scatter.c 129 size_t pg; local in function:dtv_scatter_buf_map
132 pg = off >> PAGE_SHIFT;
134 if (pg >= sb->sb_npages)
136 else if (!pmap_extract(pmap_kernel(), (vaddr_t)sb->sb_page_ary[pg], &pa))
169 size_t pg, pgo; local in function:dtv_scatter_io_next
174 pg = sio->sio_offset >> PAGE_SHIFT;
178 *p = sio->sio_buf->sb_page_ary[pg] + pgo;
  /src/sys/external/bsd/drm2/include/drm/
bus_dma_hacks.h 206 unsigned pg; local in function:bus_dmamem_export_pages
208 pg = 0;
216 KASSERT(pg < npgs);
217 pgs[pg++] = container_of(PHYS_TO_VM_PAGE(paddr),
225 KASSERT(pg == npgs);
  /src/sys/external/bsd/drm2/include/linux/
mm.h 152 struct vm_page *pg = &page->p_vmp; local in function:set_page_dirty
155 if (pg->uobject != NULL) {
156 rw_enter(pg->uobject->vmobjlock, RW_WRITER);
157 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
158 rw_exit(pg->uobject->vmobjlock);
160 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
  /src/usr.bin/whatis/
whatis.c 77 glob_t pg; local in function:main
123 &pg)) != 0) {
129 if (pg.gl_pathc)
130 for (p = pg.gl_pathv; *p; p++)
132 globfree(&pg);
  /src/sys/arch/alpha/alpha/
vm_machdep.c 279 const struct vm_page * const pg = TAILQ_FIRST(&pglist); local in function:cpu_uarea_alloc
280 KASSERT(pg != NULL);
281 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
304 struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va)); local in function:cpu_uarea_free
305 KASSERT(pg != NULL);
306 for (size_t i = 0; i < UPAGES; i++, pg++) {
307 uvm_pagefree(pg);
  /src/sys/arch/powerpc/powerpc/
vm_machdep.c 334 const struct vm_page * const pg = TAILQ_FIRST(&pglist); local in function:cpu_uarea_alloc
335 KASSERT(pg != NULL);
336 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
363 struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va)); local in function:cpu_uarea_free
364 KASSERT(pg != NULL);
365 for (size_t i = 0; i < UPAGES; i++, pg++) {
366 uvm_pagefree(pg);
  /src/sys/arch/xen/x86/
xen_bus_dma.c 83 struct vm_page *pg, *pgnext; local in function:_xen_alloc_contig
104 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
105 pa = VM_PAGE_TO_PHYS(pg);
140 pg = NULL;
145 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++)
    [all...]
  /src/sys/arch/riscv/riscv/
vm_machdep.c 153 const struct vm_page * const pg = TAILQ_FIRST(&pglist); local in function:cpu_uarea_alloc
154 KASSERT(pg != NULL);
155 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
181 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); local in function:cpu_uarea_free
182 KASSERT(pg != NULL);
183 uvm_pagefree(pg);
  /src/sys/dev/
mm.c 111 vaddr_t pg; local in function:mm_init
116 pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
117 KASSERT(pg != 0);
118 pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
120 dev_zero_page = (void *)pg;
  /src/regress/sys/uvm/pdsim/
pdsim.c 61 struct vm_page *pg; local in function:pdsim_pagealloc
63 pg = TAILQ_FIRST(&freeq);
64 if (pg == NULL) {
67 TAILQ_REMOVE(&freeq, pg, pageq);
68 pg->offset = idx << PAGE_SHIFT;
69 pg->uanon = NULL;
70 pg->uobject = obj;
71 pg->pqflags = 0;
72 obj->pages[idx] = pg;
76 return pg;
112 struct vm_page *pg; local in function:pdsim_pagelookup
146 struct vm_page *pg; local in function:pdsim_init
165 struct vm_page *pg; local in function:pdsim_reclaimone
182 struct vm_page *pg; local in function:fault
    [all...]
  /src/sys/arch/amd64/amd64/
gdt.c 148 struct vm_page *pg; local in function:gdt_init
156 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
157 if (pg == NULL) {
160 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
187 struct vm_page *pg;
194 while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
198 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),

Completed in 27 milliseconds

1 2 3 4 5