HomeSort by: relevance | last modified time | path
    Searched refs:sva (Results 1 - 25 of 45) sorted by relevancy

1 2

  /src/sys/arch/vax/include/
pmap.h 154 int *pte, sva; local in function:pmap_extract
167 sva = PG_PFNUM(va);
169 if (sva >= (pmap->pm_p0lr & ~AST_MASK))
173 if (sva < pmap->pm_p1lr)
181 if (kvtopte(&pte[sva])->pg_v && (pte[sva] & PG_FRAME)) {
183 *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
  /src/sys/rump/librump/rumpkern/arch/x86/
rump_x86_pmap.c 92 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
107 pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
  /src/sys/arch/powerpc/booke/
booke_pmap.c 141 pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry)
144 KASSERT(sva == trunc_page(sva));
145 pt_entry_t *ptep = pmap_kvtopte(stb, sva);
146 for (; sva < eva; sva += NBPG) {
147 *ptep++ = pt_entry ? (sva | pt_entry) : 0;
149 return sva;
326 const vaddr_t sva = (vaddr_t) pa; local in function:pmap_md_map_poolpage
328 const vaddr_t eva = sva + size
    [all...]
  /src/sys/arch/x86/acpi/
acpi_machdep.c 423 vaddr_t sva, eva; local in function:acpi_md_OsReadable
426 sva = trunc_page((vaddr_t) Pointer);
429 if (sva < VM_MIN_KERNEL_ADDRESS)
432 for (; sva < eva; sva += PAGE_SIZE) {
433 pte = kvtopte(sva);
447 vaddr_t sva, eva; local in function:acpi_md_OsWritable
450 sva = trunc_page((vaddr_t) Pointer);
453 if (sva < VM_MIN_KERNEL_ADDRESS)
456 for (; sva < eva; sva += PAGE_SIZE)
    [all...]
  /src/sys/arch/hppa/include/
pmap.h 161 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
198 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
202 pmap_write_protect(pmap, sva, eva, prot);
204 pmap_remove(pmap, sva, eva);
  /src/sys/rump/librump/rumpkern/arch/generic/
rump_generic_pmap.c 64 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
  /src/lib/libpuffs/
subr.c 155 puffs_setvattr(struct vattr *vap, const struct vattr *sva)
158 #define SETIFVAL(a, t) if (sva->a != (t)PUFFS_VNOVAL) vap->a = sva->a
159 if (sva->va_type != VNON)
160 vap->va_type = sva->va_type;
  /src/sys/arch/sun2/sun2/
pmap.c 696 pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
705 sva, eva, keep);
708 sva &= ~(NBSG - 1);
710 while (sva < eva) {
711 sme = get_segmap(sva);
714 endseg = sva + NBSG;
715 for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
723 prom_printf(" sva=0x%x seg=0x%x valid=%d\n",
724 sva, sme, valid);
729 set_segmap(sva, SEGINV)
    [all...]
  /src/sys/arch/sun3/sun3/
pmap.c 705 pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
714 sva, eva, keep);
717 sva &= ~(NBSG - 1);
719 while (sva < eva) {
720 sme = get_segmap(sva);
723 endseg = sva + NBSG;
724 for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
732 mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
733 sva, sme, valid);
738 set_segmap(sva, SEGINV)
    [all...]
  /src/sys/arch/x86/include/
pmap.h 182 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
186 pmap_write_protect(pmap, sva, eva, prot);
188 pmap_remove(pmap, sva, eva);
  /src/sys/arch/arm/acpi/
acpi_machdep.c 230 vaddr_t sva, eva; local in function:acpi_md_OsReadable
233 sva = trunc_page((vaddr_t)va);
236 if (sva < VM_MIN_KERNEL_ADDRESS)
239 for (; sva < eva; sva += PAGE_SIZE) {
240 pte = kvtopte(sva);
251 vaddr_t sva, eva; local in function:acpi_md_OsWritable
254 sva = trunc_page((vaddr_t)va);
257 if (sva < VM_MIN_KERNEL_ADDRESS)
260 for (; sva < eva; sva += PAGE_SIZE)
    [all...]
  /src/sys/uvm/
uvm_glue.c 129 vaddr_t sva, eva; local in function:uvm_chgkprot
133 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
137 if (pmap_extract(pmap_kernel(), sva, &pa) == false)
139 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
  /src/sys/uvm/pmap/
pmap.c 394 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
398 if (sva < VM_MIN_KERNEL_ADDRESS)
400 func, sva);
1109 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1117 (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
1123 for (; sva < eva; sva += NBPG, ptep++) {
1137 pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
1145 pmap_tlb_invalidate_addr(pmap, sva);
1156 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva
1686 const vaddr_t sva = trunc_page(va); local in function:pmap_kremove
    [all...]
  /src/sys/arch/hppa/hppa/
pmap.c 484 pmap_dump_table(pa_space_t space, vaddr_t sva)
488 vaddr_t va = sva;
1478 * pmap_remove(pmap, sva, eva)
1480 * range determined by [sva, eva) and pmap.
1481 * sva and eva must be on machine independent page boundaries and
1482 * sva must be less than or equal to eva.
1485 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1488 UVMHIST_CALLARGS(maphist, "sva %#jx eva %#jx", sva, eva, 0, 0);
1499 for (batch = 0; sva < eva; sva += PAGE_SIZE)
    [all...]
  /src/sys/arch/alpha/alpha/
pmap.c 1733 pmap_remove_internal(pmap_t pmap, vaddr_t sva, vaddr_t eva,
1743 printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
1758 while (sva < eva) {
1759 l3pte = PMAP_KERNEL_PTE(sva);
1761 pte_bits = pmap_remove_mapping(pmap, sva,
1763 pmap_tlb_shootdown(pmap, sva, pte_bits,
1766 sva += PAGE_SIZE;
1783 KASSERT(sva < VM_MAXUSER_ADDRESS);
1790 l1pte = pmap_l1pte(lev1map, sva);
1792 for (; sva < eva; sva = l1eva, l1pte++)
    [all...]
  /src/sys/arch/amd64/stand/prekern/
mm.c 207 vaddr_t sva, eva; local in function:mm_randva_kregion
224 sva = bootspace.segs[i].va;
225 eva = sva + bootspace.segs[i].sz;
227 if ((sva <= randva) && (randva < eva)) {
231 if ((sva < randva + size) && (randva + size <= eva)) {
235 if (randva < sva && eva < (randva + size)) {
  /src/sys/arch/sun3/sun3x/
pmap.c 2967 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2972 if ((sva & PGOFSET) || (eva & PGOFSET))
2976 idx = m68k_btop(sva - KERNBASE3X);
2981 TBIS(sva);
2982 sva += PAGE_SIZE;
2992 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
2996 pmap_remove_kernel(sva, eva);
3014 if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
3050 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
3089 nstart = MMU_ROUND_UP_A(sva);
    [all...]
  /src/sys/arch/hpcmips/hpcmips/
bus_space.c 228 mips_pte_cachechange(struct pmap *pmap, vaddr_t sva, vaddr_t eva,
231 mips_dcache_wbinv_range(sva, eva - sva);
233 for (; sva < eva; sva += PAGE_SIZE) {
239 tlb_update_addr(sva, KERNEL_PID, pte, 0);
250 const vaddr_t sva = mips_trunc_page(bpa); local in function:__hpcmips_cacheable
252 pmap_pte_process(pmap_kernel(), sva, eva,
  /src/sys/arch/riscv/riscv/
pmap_machdep.c 476 vaddr_t sva = MEGAPAGE_TRUNC(va); local in function:pmap_kenter_range
483 while (sva < eva) {
484 const size_t sidx = (sva >> vshift) & pdetab_mask;
489 sva += NBSEG;
  /src/sys/arch/sparc/sparc/
memreg.c 179 u_int ser, u_int sva, u_int aer, u_int ava,
186 printf("%ssync mem arr: ser=%s sva=0x%x ",
187 issync ? "" : "a", bits, sva);
191 pte = getpte4(sva);
  /src/sys/arch/m68k/m68k/
pmap_motorola.c 862 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
872 ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
875 while (sva < eva) {
876 nssva = m68k_trunc_seg(sva) + NBSEG;
884 pte = pmap_pte(pmap, sva);
885 while (sva < nssva) {
892 if (!pmap_ste_v(pmap, sva)) {
893 sva = nssva;
923 pmap_remove_mapping(pmap, sva, pte, flags, NULL);
926 sva += PAGE_SIZE
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_phys.c 72 vaddr_t va, sva, eva; local in function:bus_dmamem_kunmap
77 sva = (vaddr_t)kva;
78 eva = sva + size;
83 for (va = sva; va < eva; va += PAGE_SIZE) {
  /src/sys/arch/x86/x86/
pmap.c 1109 pmap_kremove1(vaddr_t sva, vsize_t len, bool localonly)
1114 eva = sva + len;
1117 for (va = sva; va < eva; va += PAGE_SIZE) {
1136 pmap_kremove(vaddr_t sva, vsize_t len)
1139 pmap_kremove1(sva, len, false);
1149 pmap_kremove_local(vaddr_t sva, vsize_t len)
1152 pmap_kremove1(sva, len, true);
4284 pmap_remove_locked(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
4290 vaddr_t blkendva, va = sva;
4369 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva
    [all...]
  /src/sys/arch/sh3/sh3/
pmap.c 514 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
520 KDASSERT((sva & PGOFSET) == 0);
522 for (va = sva; va < eva; va += PAGE_SIZE) {
655 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
661 sva = trunc_page(sva);
664 pmap_remove(pmap, sva, eva);
682 for (va = sva; va < eva; va += PAGE_SIZE) {
  /src/sys/arch/aarch64/aarch64/
pmap.c 873 pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
879 KASSERT_PM_ADDR(pm, sva);
883 for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
929 pmap_procwr(struct proc *p, vaddr_t sva, int len)
933 cpu_icache_sync_range(sva, len);
940 for (va = sva; len > 0; va = eva, len -= tlen) {
1266 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1276 UVMHIST_CALLARGS(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x",
1277 pm, sva, eva, prot);
1279 KASSERT_PM_ADDR(pm, sva);
    [all...]

Completed in 37 milliseconds

1 2