| /src/sys/uvm/ |
| uvm_prot.h | 82 #define VM_PROT_NONE ((vm_prot_t) 0x00)
|
| uvm_anon.c | 154 pmap_page_protect(pg, VM_PROT_NONE);
|
| uvm_mremap.c | 58 reserved_entry->protection != VM_PROT_NONE) {
|
| uvm_aobj.c | 632 pmap_page_protect(pg, VM_PROT_NONE); 765 pmap_page_protect(pg, VM_PROT_NONE);
|
| uvm_loan.c | 68 * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a 1120 pmap_page_protect(uobjpage, VM_PROT_NONE); 1182 pmap_page_protect(oldpg, VM_PROT_NONE);
|
| uvm_map.c | 2552 * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE) 3260 * wire this entry now if the old protection was VM_PROT_NONE 3261 * and the new protection is not VM_PROT_NONE. 3266 old_prot == VM_PROT_NONE && 3267 new_prot != VM_PROT_NONE) { 3612 if (entry->protection == VM_PROT_NONE || 3816 if (entry->protection != VM_PROT_NONE && 3839 if (entry->protection == VM_PROT_NONE) 3911 * Skip VM_PROT_NONE entries like we did above. 3917 if (entry->protection == VM_PROT_NONE) [all...] |
| uvm_mmap.c | 567 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { 959 if (prot == VM_PROT_NONE) {
|
| uvm_pdaemon.c | 764 pmap_page_protect(p, VM_PROT_NONE);
|
| /src/sys/compat/linux/arch/i386/ |
| linux_exec_machdep.c | 117 noaccess_linear_min, NULLVP, 0, VM_PROT_NONE, VMCMD_STACK);
|
| /src/sys/compat/linux32/arch/aarch64/ |
| linux32_exec_machdep.c | 128 noaccess_linear_min, NULLVP, 0, VM_PROT_NONE, VMCMD_STACK);
|
| /src/sys/arch/m68k/m68k/ |
| pmap_motorola.c | 386 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 387 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 388 protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 389 protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 390 protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW [all...] |
| /src/sys/kern/ |
| exec_subr.c | 457 epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE); 460 epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE); 466 VM_PROT_NONE | PROT_MPROTECT(VM_PROT_READ | VM_PROT_WRITE),
|
| /src/sys/arch/sparc64/dev/ |
| mkclock.c | 286 if (prot == VM_PROT_NONE) {
|
| /src/sys/compat/linux/arch/amd64/ |
| linux_exec_machdep.c | 114 noaccess_linear_min, NULLVP, 0, VM_PROT_NONE, VMCMD_STACK);
|
| /src/sys/arch/usermode/usermode/ |
| pmap.c | 723 cur_prot = VM_PROT_NONE; 1063 if (prot == VM_PROT_NONE) { 1295 if (prot == VM_PROT_NONE) {
|
| /src/sys/arch/sun3/sun3x/ |
| pmap.c | 2229 case VM_PROT_NONE: 2253 * is discovered, we are effectively applying a protection of VM_PROT_NONE, 2285 case VM_PROT_NONE: 2288 * 'VM_PROT_NONE' is a synonym for pmap_remove(). 2738 case VM_PROT_NONE: 2778 if (prot == VM_PROT_NONE)
|
| /src/sys/arch/aarch64/aarch64/ |
| pmap.c | 1286 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1659 * VM_PROT_NONE. it is not correct because without considering 1813 KASSERT((prot & VM_PROT_ALL) != VM_PROT_NONE); 2307 KASSERT(prot == VM_PROT_NONE); 2334 VM_PROT_NONE) {
|
| /src/sys/arch/ia64/ia64/ |
| pmap.c | 1166 PTE_AR_R, /* VM_PROT_NONE */ 1174 pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap) 1887 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2832 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 2882 pmap_page_protect(pg, VM_PROT_NONE);
|
| /src/sys/ufs/lfs/ |
| lfs_pages.c | 308 pmap_page_protect(pg, VM_PROT_NONE);
|
| /src/sys/arch/powerpc/ibm4xx/ |
| pmap.c | 1762 pmap_protect(pmap_kernel(), va, va + PAGE_SIZE, VM_PROT_NONE); 1835 pmap_page_protect(pg, VM_PROT_NONE);
|
| /src/sys/arch/vax/vax/ |
| pmap.c | 1333 if (prot == VM_PROT_NONE) { 1530 if (prot == VM_PROT_NONE) {
|
| /src/sys/external/bsd/drm2/dist/drm/ttm/ |
| ttm_bo.c | 1852 pmap_pv_protect(pa, VM_PROT_NONE); 1859 VM_PROT_NONE);
|
| /src/sys/miscfs/genfs/ |
| genfs_io.c | 1106 pmap_page_protect(pg, VM_PROT_NONE); 1252 pmap_page_protect(tpg, VM_PROT_NONE);
|
| /src/sys/uvm/pmap/ |
| pmap.c | 1088 KASSERT(prot == VM_PROT_NONE); 1321 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
| /src/sys/arch/alpha/alpha/ |
| trap.c | 440 ftype = VM_PROT_NONE;
|