/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/ |
nouveau_nvkm_subdev_mmu_vmmnv44.c | 33 dma_addr_t *list, u32 ptei, u32 ptes) 43 while (ptes--) { 79 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 84 const u32 pten = min(ptes, 4 - (ptei & 3)); 89 ptes -= pten; 92 while (ptes >= 4) { 99 ptes -= 4; 102 if (ptes) { 103 for (i = 0; i < ptes; i++, addr += 0x1000) 105 nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes); [all...] |
nouveau_nvkm_subdev_mmu_vmmnv41.c | 33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 36 while (ptes--) { 45 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 47 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); 53 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 57 while (ptes--) { 63 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); 69 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 71 VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
|
nouveau_nvkm_subdev_mmu_vmmnv04.c | 34 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 37 while (ptes--) { 46 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 48 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); 54 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 58 while (ptes--) 62 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); 68 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 70 VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
|
nouveau_nvkm_subdev_mmu_vmmgp100.c | 40 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 46 while (ptes--) { 66 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 70 while (ptes--) { 86 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 92 while (ptes--) { 125 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 129 map->type += ptes * map->ctag; 131 while (ptes--) { 140 u32 ptei, u32 ptes, struct nvkm_vmm_map *map [all...] |
nouveau_nvkm_subdev_mmu_vmmnv50.c | 38 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 44 map->type += ptes * map->ctag; 46 while (ptes) { 49 if (ptes >= pten && IS_ALIGNED(ptei, pten)) 55 ptes -= pten; 65 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 67 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); 73 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 76 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 78 while (ptes--) [all...] |
nouveau_nvkm_subdev_mmu_vmmgk104.c | 31 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 34 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
|
nouveau_nvkm_subdev_mmu_vmmgf100.c | 40 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 46 while (ptes--) { 55 map->type += ptes * map->ctag; 57 while (ptes--) { 67 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 69 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); 75 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 78 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); 80 while (ptes--) { 89 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte) [all...] |
nouveau_nvkm_subdev_mmu_vmm.c | 205 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) 216 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) { 217 const u32 pten = min(sptn - spti, ptes); 219 ptes -= pten; 229 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { 243 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) { 250 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes); 430 u32 pteb, ptei, ptes; local in function:nvkm_vmm_ref_hwpt 543 const u32 ptes = min_t(u64, it.cnt, pten - ptei); local in function:nvkm_vmm_iter [all...] |
nouveau_nvkm_subdev_mmu_vmmgm200.c | 34 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 37 VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
|
vmm.h | 56 struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 60 u32 ptei, u32 ptes, struct nvkm_vmm_map *); 74 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
|
/src/lib/libkvm/ |
kvm_sparc.c | 183 int *ptes; local in function:_kvm_kvatop44c 196 * ptes[cpup->npmegs]; 199 ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset); 211 pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)]; 287 int64_t *ptes; local in function:_kvm_kvatop4u 328 ptes = (int64_t *)(intptr_t)_kvm_pa2off(kd, 330 pte = ptes[sparc64_va_to_pte(va)];
|
/src/sys/arch/xen/x86/ |
xen_pmap.c | 211 pt_entry_t *ptes, pte; local in function:pmap_extract_ma 220 pmap_map_ptes(pmap, &pmap2, &ptes, &pdes); 230 pte = ptes[pl1_i(va)];
|
/src/sys/arch/x86/x86/ |
pmap.c | 253 * non-kernel PDEs in the PDP, the PTEs, and PTPs and connected data 254 * structures. For modifying unmanaged kernel PTEs it is not needed as 256 * consistent (and the lock can't be taken for unmanaged kernel PTEs, 414 * Special VAs and the PTEs that map them 739 * pmap_ptp_range_clip: abuse ptp->uanon to clip range of PTEs to remove 757 * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in 762 * our caller wants to access this pmap's PTEs. 1640 * Nothing to do, the PTEs will be entered via 2479 * (intolerable right now because the PTEs are likely mapped in). 2488 pt_entry_t *ptes, pd_entry_t * const *pdes 3251 pt_entry_t *ptes; local in function:pmap_remove_all 3901 pt_entry_t *ptes, pte; local in function:pmap_extract 4286 pt_entry_t *ptes; local in function:pmap_remove_locked 4481 pt_entry_t *ptes; local in function:pmap_pp_remove_ent 4831 pt_entry_t *ptes; local in function:pmap_write_protect 4915 pt_entry_t *ptes, *ptep, opte; local in function:pmap_unwire 4989 pt_entry_t *ptes, opte, npte; local in function:pmap_enter_ma 5359 pt_entry_t *ptes, opte; local in function:pmap_enter_gnt 5533 pt_entry_t *ptes; local in function:pmap_remove_gnt 5865 pt_entry_t *ptes, *pte; local in function:pmap_dump 6289 pt_entry_t *ptes, opte, npte; local in function:pmap_ept_enter 6529 pt_entry_t *ptes, pte; local in function:pmap_ept_extract 6676 pt_entry_t *ptes; local in function:pmap_ept_remove 6820 pt_entry_t *ptes, *spte; local in function:pmap_ept_write_protect 6875 pt_entry_t *ptes, *ptep, opte; local in function:pmap_ept_unwire [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_pages.c | 371 pte_t **ptes = mem; 374 **ptes++ = mk_pte(page, pgprot); 378 pte_t **ptes = mem; 385 **ptes++ = iomap_pte(iomap, addr, pgprot);
|
/src/sys/arch/mips/mips/ |
mipsX_subr.S | 480 ld k0, 0(k1) #0f: load both ptes 532 * Note that we do not support the full size of the PTEs, relying
|